xref: /dflybsd-src/sys/dev/netif/bge/if_bge.c (revision 0a7869d85a6029677e388c6e46e3858fff21a0d7)
1 /*
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2001
4  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $
34  * $DragonFly: src/sys/dev/netif/bge/if_bge.c,v 1.100 2008/07/06 09:17:12 sephe Exp $
35  *
36  */
37 
38 /*
39  * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
40  *
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Engineer, Wind River Systems
43  */
44 
45 /*
46  * The Broadcom BCM5700 is based on technology originally developed by
47  * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48  * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49  * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50  * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
51  * frames, highly configurable RX filtering, and 16 RX and TX queues
52  * (which, along with RX filter rules, can be used for QOS applications).
53  * Other features, such as TCP segmentation, may be available as part
54  * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55  * firmware images can be stored in hardware and need not be compiled
56  * into the driver.
57  *
58  * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59  * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
60  *
61  * The BCM5701 is a single-chip solution incorporating both the BCM5700
62  * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63  * does not support external SSRAM.
64  *
65  * Broadcom also produces a variation of the BCM5700 under the "Altima"
66  * brand name, which is functionally similar but lacks PCI-X support.
67  *
68  * Without external SSRAM, you can only have at most 4 TX rings,
69  * and the use of the mini RX ring is disabled. This seems to imply
70  * that these features are simply not available on the BCM5701. As a
71  * result, this driver does not implement any support for the mini RX
72  * ring.
73  */
74 
75 #include "opt_polling.h"
76 #include "opt_ethernet.h"
77 
78 #include <sys/param.h>
79 #include <sys/bus.h>
80 #include <sys/endian.h>
81 #include <sys/kernel.h>
82 #include <sys/ktr.h>
83 #include <sys/interrupt.h>
84 #include <sys/mbuf.h>
85 #include <sys/malloc.h>
86 #include <sys/queue.h>
87 #include <sys/rman.h>
88 #include <sys/serialize.h>
89 #include <sys/socket.h>
90 #include <sys/sockio.h>
91 #include <sys/sysctl.h>
92 
93 #include <net/bpf.h>
94 #include <net/ethernet.h>
95 #include <net/if.h>
96 #include <net/if_arp.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_types.h>
100 #include <net/ifq_var.h>
101 #include <net/vlan/if_vlan_var.h>
102 #include <net/vlan/if_vlan_ether.h>
103 
104 #include <dev/netif/mii_layer/mii.h>
105 #include <dev/netif/mii_layer/miivar.h>
106 #include <dev/netif/mii_layer/brgphyreg.h>
107 
108 #include <bus/pci/pcidevs.h>
109 #include <bus/pci/pcireg.h>
110 #include <bus/pci/pcivar.h>
111 
112 #include <dev/netif/bge/if_bgereg.h>
113 
114 /* "device miibus" required.  See GENERIC if you get errors here. */
115 #include "miibus_if.h"
116 
117 #define BGE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
118 #define BGE_MIN_FRAME		60
119 
120 static const struct bge_type bge_devs[] = {
121 	{ PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996,
122 		"3COM 3C996 Gigabit Ethernet" },
123 
124 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
125 		"Alteon BCM5700 Gigabit Ethernet" },
126 	{ PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701,
127 		"Alteon BCM5701 Gigabit Ethernet" },
128 
129 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000,
130 		"Altima AC1000 Gigabit Ethernet" },
131 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001,
132 		"Altima AC1002 Gigabit Ethernet" },
133 	{ PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100,
134 		"Altima AC9100 Gigabit Ethernet" },
135 
136 	{ PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701,
137 		"Apple BCM5701 Gigabit Ethernet" },
138 
139 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700,
140 		"Broadcom BCM5700 Gigabit Ethernet" },
141 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701,
142 		"Broadcom BCM5701 Gigabit Ethernet" },
143 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702,
144 		"Broadcom BCM5702 Gigabit Ethernet" },
145 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X,
146 		"Broadcom BCM5702X Gigabit Ethernet" },
147 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT,
148 		"Broadcom BCM5702 Gigabit Ethernet" },
149 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703,
150 		"Broadcom BCM5703 Gigabit Ethernet" },
151 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X,
152 		"Broadcom BCM5703X Gigabit Ethernet" },
153 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703A3,
154 		"Broadcom BCM5703 Gigabit Ethernet" },
155 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C,
156 		"Broadcom BCM5704C Dual Gigabit Ethernet" },
157 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S,
158 		"Broadcom BCM5704S Dual Gigabit Ethernet" },
159 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT,
160 		"Broadcom BCM5704S Dual Gigabit Ethernet" },
161 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705,
162 		"Broadcom BCM5705 Gigabit Ethernet" },
163 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F,
164 		"Broadcom BCM5705F Gigabit Ethernet" },
165 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K,
166 		"Broadcom BCM5705K Gigabit Ethernet" },
167 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M,
168 		"Broadcom BCM5705M Gigabit Ethernet" },
169 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
170 		"Broadcom BCM5705M Gigabit Ethernet" },
171 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714,
172 		"Broadcom BCM5714C Gigabit Ethernet" },
173 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S,
174 		"Broadcom BCM5714S Gigabit Ethernet" },
175 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715,
176 		"Broadcom BCM5715 Gigabit Ethernet" },
177 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S,
178 		"Broadcom BCM5715S Gigabit Ethernet" },
179 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720,
180 		"Broadcom BCM5720 Gigabit Ethernet" },
181 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721,
182 		"Broadcom BCM5721 Gigabit Ethernet" },
183 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722,
184 		"Broadcom BCM5722 Gigabit Ethernet" },
185 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750,
186 		"Broadcom BCM5750 Gigabit Ethernet" },
187 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M,
188 		"Broadcom BCM5750M Gigabit Ethernet" },
189 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751,
190 		"Broadcom BCM5751 Gigabit Ethernet" },
191 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F,
192 		"Broadcom BCM5751F Gigabit Ethernet" },
193 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M,
194 		"Broadcom BCM5751M Gigabit Ethernet" },
195 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752,
196 		"Broadcom BCM5752 Gigabit Ethernet" },
197 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M,
198 		"Broadcom BCM5752M Gigabit Ethernet" },
199 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753,
200 		"Broadcom BCM5753 Gigabit Ethernet" },
201 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F,
202 		"Broadcom BCM5753F Gigabit Ethernet" },
203 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M,
204 		"Broadcom BCM5753M Gigabit Ethernet" },
205 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754,
206 		"Broadcom BCM5754 Gigabit Ethernet" },
207 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M,
208 		"Broadcom BCM5754M Gigabit Ethernet" },
209 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755,
210 		"Broadcom BCM5755 Gigabit Ethernet" },
211 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M,
212 		"Broadcom BCM5755M Gigabit Ethernet" },
213 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756,
214 		"Broadcom BCM5756 Gigabit Ethernet" },
215 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780,
216 		"Broadcom BCM5780 Gigabit Ethernet" },
217 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S,
218 		"Broadcom BCM5780S Gigabit Ethernet" },
219 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781,
220 		"Broadcom BCM5781 Gigabit Ethernet" },
221 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782,
222 		"Broadcom BCM5782 Gigabit Ethernet" },
223 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786,
224 		"Broadcom BCM5786 Gigabit Ethernet" },
225 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787,
226 		"Broadcom BCM5787 Gigabit Ethernet" },
227 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F,
228 		"Broadcom BCM5787F Gigabit Ethernet" },
229 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M,
230 		"Broadcom BCM5787M Gigabit Ethernet" },
231 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788,
232 		"Broadcom BCM5788 Gigabit Ethernet" },
233 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789,
234 		"Broadcom BCM5789 Gigabit Ethernet" },
235 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901,
236 		"Broadcom BCM5901 Fast Ethernet" },
237 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2,
238 		"Broadcom BCM5901A2 Fast Ethernet" },
239 	{ PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M,
240 		"Broadcom BCM5903M Fast Ethernet" },
241 
242 	{ PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
243 		"SysKonnect Gigabit Ethernet" },
244 
245 	{ 0, 0, NULL }
246 };
247 
248 #define BGE_IS_JUMBO_CAPABLE(sc)	((sc)->bge_flags & BGE_FLAG_JUMBO)
249 #define BGE_IS_5700_FAMILY(sc)		((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
250 #define BGE_IS_5705_PLUS(sc)		((sc)->bge_flags & BGE_FLAG_5705_PLUS)
251 #define BGE_IS_5714_FAMILY(sc)		((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
252 #define BGE_IS_575X_PLUS(sc)		((sc)->bge_flags & BGE_FLAG_575X_PLUS)
253 
254 static int	bge_probe(device_t);
255 static int	bge_attach(device_t);
256 static int	bge_detach(device_t);
257 static void	bge_txeof(struct bge_softc *);
258 static void	bge_rxeof(struct bge_softc *);
259 
260 static void	bge_tick(void *);
261 static void	bge_stats_update(struct bge_softc *);
262 static void	bge_stats_update_regs(struct bge_softc *);
263 static int	bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
264 
265 #ifdef DEVICE_POLLING
266 static void	bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
267 #endif
268 static void	bge_intr(void *);
269 static void	bge_enable_intr(struct bge_softc *);
270 static void	bge_disable_intr(struct bge_softc *);
271 static void	bge_start(struct ifnet *);
272 static int	bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
273 static void	bge_init(void *);
274 static void	bge_stop(struct bge_softc *);
275 static void	bge_watchdog(struct ifnet *);
276 static void	bge_shutdown(device_t);
277 static int	bge_suspend(device_t);
278 static int	bge_resume(device_t);
279 static int	bge_ifmedia_upd(struct ifnet *);
280 static void	bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
281 
282 static uint8_t	bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *);
283 static int	bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t);
284 
285 static void	bge_setmulti(struct bge_softc *);
286 static void	bge_setpromisc(struct bge_softc *);
287 
288 static int	bge_alloc_jumbo_mem(struct bge_softc *);
289 static void	bge_free_jumbo_mem(struct bge_softc *);
290 static struct bge_jslot
291 		*bge_jalloc(struct bge_softc *);
292 static void	bge_jfree(void *);
293 static void	bge_jref(void *);
294 static int	bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
295 static int	bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
296 static int	bge_init_rx_ring_std(struct bge_softc *);
297 static void	bge_free_rx_ring_std(struct bge_softc *);
298 static int	bge_init_rx_ring_jumbo(struct bge_softc *);
299 static void	bge_free_rx_ring_jumbo(struct bge_softc *);
300 static void	bge_free_tx_ring(struct bge_softc *);
301 static int	bge_init_tx_ring(struct bge_softc *);
302 
303 static int	bge_chipinit(struct bge_softc *);
304 static int	bge_blockinit(struct bge_softc *);
305 
306 static uint32_t	bge_readmem_ind(struct bge_softc *, uint32_t);
307 static void	bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t);
308 #ifdef notdef
309 static uint32_t	bge_readreg_ind(struct bge_softc *, uint32_t);
310 #endif
311 static void	bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t);
312 static void	bge_writemem_direct(struct bge_softc *, uint32_t, uint32_t);
313 static void	bge_set_max_readrq(struct bge_softc *);
314 
315 static int	bge_miibus_readreg(device_t, int, int);
316 static int	bge_miibus_writereg(device_t, int, int, int);
317 static void	bge_miibus_statchg(device_t);
318 static void	bge_bcm5700_link_upd(struct bge_softc *, uint32_t);
319 static void	bge_tbi_link_upd(struct bge_softc *, uint32_t);
320 static void	bge_copper_link_upd(struct bge_softc *, uint32_t);
321 
322 static void	bge_reset(struct bge_softc *);
323 
324 static void	bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
325 static void	bge_dma_map_mbuf(void *, bus_dma_segment_t *, int,
326 				 bus_size_t, int);
327 static int	bge_dma_alloc(struct bge_softc *);
328 static void	bge_dma_free(struct bge_softc *);
329 static int	bge_dma_block_alloc(struct bge_softc *, bus_size_t,
330 				    bus_dma_tag_t *, bus_dmamap_t *,
331 				    void **, bus_addr_t *);
332 static void	bge_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *);
333 
334 static void	bge_coal_change(struct bge_softc *);
335 static int	bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS);
336 static int	bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS);
337 static int	bge_sysctl_rx_max_coal_bds(SYSCTL_HANDLER_ARGS);
338 static int	bge_sysctl_tx_max_coal_bds(SYSCTL_HANDLER_ARGS);
339 static int	bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *, uint32_t);
340 
341 /*
342  * Set following tunable to 1 for some IBM blade servers with the DNLK
343  * switch module. Auto negotiation is broken for those configurations.
344  */
345 static int	bge_fake_autoneg = 0;
346 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg);
347 
348 /* Interrupt moderation control variables. */
349 static int	bge_rx_coal_ticks = 150;	/* usec */
350 static int	bge_tx_coal_ticks = 1023;	/* usec */
351 static int	bge_rx_max_coal_bds = 24;
352 static int	bge_tx_max_coal_bds = 128;
353 
354 TUNABLE_INT("hw.bge.rx_coal_ticks", &bge_rx_coal_ticks);
355 TUNABLE_INT("hw.bge.tx_coal_ticks", &bge_tx_coal_ticks);
356 TUNABLE_INT("hw.bge.rx_max_coal_bds", &bge_rx_max_coal_bds);
357 TUNABLE_INT("hw.bge.tx_max_coal_bds", &bge_tx_max_coal_bds);
358 
359 #if !defined(KTR_IF_BGE)
360 #define KTR_IF_BGE	KTR_ALL
361 #endif
362 KTR_INFO_MASTER(if_bge);
363 KTR_INFO(KTR_IF_BGE, if_bge, intr, 0, "intr", 0);
364 KTR_INFO(KTR_IF_BGE, if_bge, rx_pkt, 1, "rx_pkt", 0);
365 KTR_INFO(KTR_IF_BGE, if_bge, tx_pkt, 2, "tx_pkt", 0);
366 #define logif(name)	KTR_LOG(if_bge_ ## name)
367 
368 static device_method_t bge_methods[] = {
369 	/* Device interface */
370 	DEVMETHOD(device_probe,		bge_probe),
371 	DEVMETHOD(device_attach,	bge_attach),
372 	DEVMETHOD(device_detach,	bge_detach),
373 	DEVMETHOD(device_shutdown,	bge_shutdown),
374 	DEVMETHOD(device_suspend,	bge_suspend),
375 	DEVMETHOD(device_resume,	bge_resume),
376 
377 	/* bus interface */
378 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
379 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
380 
381 	/* MII interface */
382 	DEVMETHOD(miibus_readreg,	bge_miibus_readreg),
383 	DEVMETHOD(miibus_writereg,	bge_miibus_writereg),
384 	DEVMETHOD(miibus_statchg,	bge_miibus_statchg),
385 
386 	{ 0, 0 }
387 };
388 
389 static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc));
390 static devclass_t bge_devclass;
391 
392 DECLARE_DUMMY_MODULE(if_bge);
393 DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0);
394 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
395 
396 static uint32_t
397 bge_readmem_ind(struct bge_softc *sc, uint32_t off)
398 {
399 	device_t dev = sc->bge_dev;
400 	uint32_t val;
401 
402 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
403 	val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
404 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
405 	return (val);
406 }
407 
408 static void
409 bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
410 {
411 	device_t dev = sc->bge_dev;
412 
413 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
414 	pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
415 	pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
416 }
417 
418 /*
419  * PCI Express only
420  */
421 static void
422 bge_set_max_readrq(struct bge_softc *sc)
423 {
424 	device_t dev = sc->bge_dev;
425 	uint16_t val;
426 	uint8_t expr_ptr;
427 
428 	KKASSERT((sc->bge_flags & BGE_FLAG_PCIE) && sc->bge_expr_ptr != 0);
429 	expr_ptr = sc->bge_expr_ptr;
430 
431 	val = pci_read_config(dev, expr_ptr + PCIER_DEVCTRL, 2);
432 	if ((val & PCIEM_DEVCTL_MAX_READRQ_MASK) !=
433 	    PCIEM_DEVCTL_MAX_READRQ_4096) {
434 		device_printf(dev, "adjust device control 0x%04x ", val);
435 
436 		val &= ~PCIEM_DEVCTL_MAX_READRQ_MASK;
437 		val |= PCIEM_DEVCTL_MAX_READRQ_4096;
438 		pci_write_config(dev, expr_ptr + PCIER_DEVCTRL, val, 2);
439 
440 		kprintf("-> 0x%04x\n", val);
441 	}
442 }
443 
444 #ifdef notdef
445 static uint32_t
446 bge_readreg_ind(struct bge_softc *sc, uin32_t off)
447 {
448 	device_t dev = sc->bge_dev;
449 
450 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
451 	return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
452 }
453 #endif
454 
455 static void
456 bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
457 {
458 	device_t dev = sc->bge_dev;
459 
460 	pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
461 	pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
462 }
463 
464 static void
465 bge_writemem_direct(struct bge_softc *sc, uint32_t off, uint32_t val)
466 {
467 	CSR_WRITE_4(sc, off, val);
468 }
469 
470 /*
471  * Read a byte of data stored in the EEPROM at address 'addr.' The
472  * BCM570x supports both the traditional bitbang interface and an
473  * auto access interface for reading the EEPROM. We use the auto
474  * access method.
475  */
476 static uint8_t
477 bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest)
478 {
479 	int i;
480 	uint32_t byte = 0;
481 
482 	/*
483 	 * Enable use of auto EEPROM access so we can avoid
484 	 * having to use the bitbang method.
485 	 */
486 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
487 
488 	/* Reset the EEPROM, load the clock period. */
489 	CSR_WRITE_4(sc, BGE_EE_ADDR,
490 	    BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
491 	DELAY(20);
492 
493 	/* Issue the read EEPROM command. */
494 	CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
495 
496 	/* Wait for completion */
497 	for(i = 0; i < BGE_TIMEOUT * 10; i++) {
498 		DELAY(10);
499 		if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
500 			break;
501 	}
502 
503 	if (i == BGE_TIMEOUT) {
504 		if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
505 		return(1);
506 	}
507 
508 	/* Get result. */
509 	byte = CSR_READ_4(sc, BGE_EE_DATA);
510 
511         *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
512 
513 	return(0);
514 }
515 
516 /*
517  * Read a sequence of bytes from the EEPROM.
518  */
519 static int
520 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len)
521 {
522 	size_t i;
523 	int err;
524 	uint8_t byte;
525 
526 	for (byte = 0, err = 0, i = 0; i < len; i++) {
527 		err = bge_eeprom_getbyte(sc, off + i, &byte);
528 		if (err)
529 			break;
530 		*(dest + i) = byte;
531 	}
532 
533 	return(err ? 1 : 0);
534 }
535 
536 static int
537 bge_miibus_readreg(device_t dev, int phy, int reg)
538 {
539 	struct bge_softc *sc = device_get_softc(dev);
540 	struct ifnet *ifp = &sc->arpcom.ac_if;
541 	uint32_t val, autopoll;
542 	int i;
543 
544 	/*
545 	 * Broadcom's own driver always assumes the internal
546 	 * PHY is at GMII address 1. On some chips, the PHY responds
547 	 * to accesses at all addresses, which could cause us to
548 	 * bogusly attach the PHY 32 times at probe type. Always
549 	 * restricting the lookup to address 1 is simpler than
550 	 * trying to figure out which chips revisions should be
551 	 * special-cased.
552 	 */
553 	if (phy != 1)
554 		return(0);
555 
556 	/* Reading with autopolling on may trigger PCI errors */
557 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
558 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
559 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
560 		DELAY(40);
561 	}
562 
563 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
564 	    BGE_MIPHY(phy)|BGE_MIREG(reg));
565 
566 	for (i = 0; i < BGE_TIMEOUT; i++) {
567 		DELAY(10);
568 		val = CSR_READ_4(sc, BGE_MI_COMM);
569 		if (!(val & BGE_MICOMM_BUSY))
570 			break;
571 	}
572 
573 	if (i == BGE_TIMEOUT) {
574 		if_printf(ifp, "PHY read timed out "
575 			  "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val);
576 		val = 0;
577 		goto done;
578 	}
579 
580 	DELAY(5);
581 	val = CSR_READ_4(sc, BGE_MI_COMM);
582 
583 done:
584 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
585 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
586 		DELAY(40);
587 	}
588 
589 	if (val & BGE_MICOMM_READFAIL)
590 		return(0);
591 
592 	return(val & 0xFFFF);
593 }
594 
595 static int
596 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
597 {
598 	struct bge_softc *sc = device_get_softc(dev);
599 	uint32_t autopoll;
600 	int i;
601 
602 	/*
603 	 * See the related comment in bge_miibus_readreg()
604 	 */
605 	if (phy != 1)
606 		return(0);
607 
608 	/* Reading with autopolling on may trigger PCI errors */
609 	autopoll = CSR_READ_4(sc, BGE_MI_MODE);
610 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
611 		BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
612 		DELAY(40);
613 	}
614 
615 	CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
616 	    BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
617 
618 	for (i = 0; i < BGE_TIMEOUT; i++) {
619 		DELAY(10);
620 		if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
621 			DELAY(5);
622 			CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
623 			break;
624 		}
625 	}
626 
627 	if (autopoll & BGE_MIMODE_AUTOPOLL) {
628 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
629 		DELAY(40);
630 	}
631 
632 	if (i == BGE_TIMEOUT) {
633 		if_printf(&sc->arpcom.ac_if, "PHY write timed out "
634 			  "(phy %d, reg %d, val %d)\n", phy, reg, val);
635 		return(0);
636 	}
637 
638 	return(0);
639 }
640 
641 static void
642 bge_miibus_statchg(device_t dev)
643 {
644 	struct bge_softc *sc;
645 	struct mii_data *mii;
646 
647 	sc = device_get_softc(dev);
648 	mii = device_get_softc(sc->bge_miibus);
649 
650 	BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
651 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
652 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
653 	} else {
654 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
655 	}
656 
657 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
658 		BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
659 	} else {
660 		BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
661 	}
662 }
663 
664 /*
665  * Memory management for jumbo frames.
666  */
667 static int
668 bge_alloc_jumbo_mem(struct bge_softc *sc)
669 {
670 	struct ifnet *ifp = &sc->arpcom.ac_if;
671 	struct bge_jslot *entry;
672 	uint8_t *ptr;
673 	bus_addr_t paddr;
674 	int i, error;
675 
676 	/*
677 	 * Create tag for jumbo mbufs.
678 	 * This is really a bit of a kludge. We allocate a special
679 	 * jumbo buffer pool which (thanks to the way our DMA
680 	 * memory allocation works) will consist of contiguous
681 	 * pages. This means that even though a jumbo buffer might
682 	 * be larger than a page size, we don't really need to
683 	 * map it into more than one DMA segment. However, the
684 	 * default mbuf tag will result in multi-segment mappings,
685 	 * so we have to create a special jumbo mbuf tag that
686 	 * lets us get away with mapping the jumbo buffers as
687 	 * a single segment. I think eventually the driver should
688 	 * be changed so that it uses ordinary mbufs and cluster
689 	 * buffers, i.e. jumbo frames can span multiple DMA
690 	 * descriptors. But that's a project for another day.
691 	 */
692 
693 	/*
694 	 * Create DMA stuffs for jumbo RX ring.
695 	 */
696 	error = bge_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ,
697 				    &sc->bge_cdata.bge_rx_jumbo_ring_tag,
698 				    &sc->bge_cdata.bge_rx_jumbo_ring_map,
699 				    (void **)&sc->bge_ldata.bge_rx_jumbo_ring,
700 				    &sc->bge_ldata.bge_rx_jumbo_ring_paddr);
701 	if (error) {
702 		if_printf(ifp, "could not create jumbo RX ring\n");
703 		return error;
704 	}
705 
706 	/*
707 	 * Create DMA stuffs for jumbo buffer block.
708 	 */
709 	error = bge_dma_block_alloc(sc, BGE_JMEM,
710 				    &sc->bge_cdata.bge_jumbo_tag,
711 				    &sc->bge_cdata.bge_jumbo_map,
712 				    (void **)&sc->bge_ldata.bge_jumbo_buf,
713 				    &paddr);
714 	if (error) {
715 		if_printf(ifp, "could not create jumbo buffer\n");
716 		return error;
717 	}
718 
719 	SLIST_INIT(&sc->bge_jfree_listhead);
720 
721 	/*
722 	 * Now divide it up into 9K pieces and save the addresses
723 	 * in an array. Note that we play an evil trick here by using
724 	 * the first few bytes in the buffer to hold the the address
725 	 * of the softc structure for this interface. This is because
726 	 * bge_jfree() needs it, but it is called by the mbuf management
727 	 * code which will not pass it to us explicitly.
728 	 */
729 	for (i = 0, ptr = sc->bge_ldata.bge_jumbo_buf; i < BGE_JSLOTS; i++) {
730 		entry = &sc->bge_cdata.bge_jslots[i];
731 		entry->bge_sc = sc;
732 		entry->bge_buf = ptr;
733 		entry->bge_paddr = paddr;
734 		entry->bge_inuse = 0;
735 		entry->bge_slot = i;
736 		SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link);
737 
738 		ptr += BGE_JLEN;
739 		paddr += BGE_JLEN;
740 	}
741 	return 0;
742 }
743 
744 static void
745 bge_free_jumbo_mem(struct bge_softc *sc)
746 {
747 	/* Destroy jumbo RX ring. */
748 	bge_dma_block_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
749 			   sc->bge_cdata.bge_rx_jumbo_ring_map,
750 			   sc->bge_ldata.bge_rx_jumbo_ring);
751 
752 	/* Destroy jumbo buffer block. */
753 	bge_dma_block_free(sc->bge_cdata.bge_jumbo_tag,
754 			   sc->bge_cdata.bge_jumbo_map,
755 			   sc->bge_ldata.bge_jumbo_buf);
756 }
757 
758 /*
759  * Allocate a jumbo buffer.
760  */
761 static struct bge_jslot *
762 bge_jalloc(struct bge_softc *sc)
763 {
764 	struct bge_jslot *entry;
765 
766 	lwkt_serialize_enter(&sc->bge_jslot_serializer);
767 	entry = SLIST_FIRST(&sc->bge_jfree_listhead);
768 	if (entry) {
769 		SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jslot_link);
770 		entry->bge_inuse = 1;
771 	} else {
772 		if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
773 	}
774 	lwkt_serialize_exit(&sc->bge_jslot_serializer);
775 	return(entry);
776 }
777 
778 /*
779  * Adjust usage count on a jumbo buffer.
780  */
781 static void
782 bge_jref(void *arg)
783 {
784 	struct bge_jslot *entry = (struct bge_jslot *)arg;
785 	struct bge_softc *sc = entry->bge_sc;
786 
787 	if (sc == NULL)
788 		panic("bge_jref: can't find softc pointer!");
789 
790 	if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
791 		panic("bge_jref: asked to reference buffer "
792 		    "that we don't manage!");
793 	} else if (entry->bge_inuse == 0) {
794 		panic("bge_jref: buffer already free!");
795 	} else {
796 		atomic_add_int(&entry->bge_inuse, 1);
797 	}
798 }
799 
800 /*
801  * Release a jumbo buffer.
802  */
803 static void
804 bge_jfree(void *arg)
805 {
806 	struct bge_jslot *entry = (struct bge_jslot *)arg;
807 	struct bge_softc *sc = entry->bge_sc;
808 
809 	if (sc == NULL)
810 		panic("bge_jfree: can't find softc pointer!");
811 
812 	if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) {
813 		panic("bge_jfree: asked to free buffer that we don't manage!");
814 	} else if (entry->bge_inuse == 0) {
815 		panic("bge_jfree: buffer already free!");
816 	} else {
817 		/*
818 		 * Possible MP race to 0, use the serializer.  The atomic insn
819 		 * is still needed for races against bge_jref().
820 		 */
821 		lwkt_serialize_enter(&sc->bge_jslot_serializer);
822 		atomic_subtract_int(&entry->bge_inuse, 1);
823 		if (entry->bge_inuse == 0) {
824 			SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
825 					  entry, jslot_link);
826 		}
827 		lwkt_serialize_exit(&sc->bge_jslot_serializer);
828 	}
829 }
830 
831 
832 /*
833  * Intialize a standard receive ring descriptor.
834  */
835 static int
836 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
837 {
838 	struct mbuf *m_new = NULL;
839 	struct bge_dmamap_arg ctx;
840 	bus_dma_segment_t seg;
841 	struct bge_rx_bd *r;
842 	int error;
843 
844 	if (m == NULL) {
845 		m_new = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
846 		if (m_new == NULL)
847 			return ENOBUFS;
848 	} else {
849 		m_new = m;
850 		m_new->m_data = m_new->m_ext.ext_buf;
851 	}
852 	m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
853 
854 	if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
855 		m_adj(m_new, ETHER_ALIGN);
856 
857 	ctx.bge_maxsegs = 1;
858 	ctx.bge_segs = &seg;
859 	error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag,
860 				     sc->bge_cdata.bge_rx_std_dmamap[i],
861 				     m_new, bge_dma_map_mbuf, &ctx,
862 				     BUS_DMA_NOWAIT);
863 	if (error || ctx.bge_maxsegs == 0) {
864 		if (m == NULL)
865 			m_freem(m_new);
866 		return ENOMEM;
867 	}
868 
869 	sc->bge_cdata.bge_rx_std_chain[i] = m_new;
870 
871 	r = &sc->bge_ldata.bge_rx_std_ring[i];
872 	r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_segs[0].ds_addr);
873 	r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_segs[0].ds_addr);
874 	r->bge_flags = BGE_RXBDFLAG_END;
875 	r->bge_len = m_new->m_len;
876 	r->bge_idx = i;
877 
878 	bus_dmamap_sync(sc->bge_cdata.bge_mtag,
879 			sc->bge_cdata.bge_rx_std_dmamap[i],
880 			BUS_DMASYNC_PREREAD);
881 	return 0;
882 }
883 
884 /*
885  * Initialize a jumbo receive ring descriptor. This allocates
886  * a jumbo buffer from the pool managed internally by the driver.
887  */
888 static int
889 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
890 {
891 	struct mbuf *m_new = NULL;
892 	struct bge_jslot *buf;
893 	struct bge_rx_bd *r;
894 	bus_addr_t paddr;
895 
896 	if (m == NULL) {
897 		/* Allocate the mbuf. */
898 		MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
899 		if (m_new == NULL)
900 			return(ENOBUFS);
901 
902 		/* Allocate the jumbo buffer */
903 		buf = bge_jalloc(sc);
904 		if (buf == NULL) {
905 			m_freem(m_new);
906 			if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
907 			    "-- packet dropped!\n");
908 			return ENOBUFS;
909 		}
910 
911 		/* Attach the buffer to the mbuf. */
912 		m_new->m_ext.ext_arg = buf;
913 		m_new->m_ext.ext_buf = buf->bge_buf;
914 		m_new->m_ext.ext_free = bge_jfree;
915 		m_new->m_ext.ext_ref = bge_jref;
916 		m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
917 
918 		m_new->m_flags |= M_EXT;
919 	} else {
920 		KKASSERT(m->m_flags & M_EXT);
921 		m_new = m;
922 		buf = m_new->m_ext.ext_arg;
923 	}
924 	m_new->m_data = m_new->m_ext.ext_buf;
925 	m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
926 
927 	paddr = buf->bge_paddr;
928 	if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) {
929 		m_adj(m_new, ETHER_ALIGN);
930 		paddr += ETHER_ALIGN;
931 	}
932 
933 	/* Set up the descriptor. */
934 	sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
935 
936 	r = &sc->bge_ldata.bge_rx_jumbo_ring[i];
937 	r->bge_addr.bge_addr_lo = BGE_ADDR_LO(paddr);
938 	r->bge_addr.bge_addr_hi = BGE_ADDR_HI(paddr);
939 	r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
940 	r->bge_len = m_new->m_len;
941 	r->bge_idx = i;
942 
943 	return 0;
944 }
945 
946 /*
947  * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
948  * that's 1MB or memory, which is a lot. For now, we fill only the first
949  * 256 ring entries and hope that our CPU is fast enough to keep up with
950  * the NIC.
951  */
952 static int
953 bge_init_rx_ring_std(struct bge_softc *sc)
954 {
955 	int i;
956 
957 	for (i = 0; i < BGE_SSLOTS; i++) {
958 		if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
959 			return(ENOBUFS);
960 	};
961 
962 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
963 			sc->bge_cdata.bge_rx_std_ring_map,
964 			BUS_DMASYNC_PREWRITE);
965 
966 	sc->bge_std = i - 1;
967 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
968 
969 	return(0);
970 }
971 
972 static void
973 bge_free_rx_ring_std(struct bge_softc *sc)
974 {
975 	int i;
976 
977 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
978 		if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
979 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
980 					  sc->bge_cdata.bge_rx_std_dmamap[i]);
981 			m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
982 			sc->bge_cdata.bge_rx_std_chain[i] = NULL;
983 		}
984 		bzero(&sc->bge_ldata.bge_rx_std_ring[i],
985 		    sizeof(struct bge_rx_bd));
986 	}
987 }
988 
989 static int
990 bge_init_rx_ring_jumbo(struct bge_softc *sc)
991 {
992 	int i;
993 	struct bge_rcb *rcb;
994 
995 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
996 		if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
997 			return(ENOBUFS);
998 	};
999 
1000 	bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1001 			sc->bge_cdata.bge_rx_jumbo_ring_map,
1002 			BUS_DMASYNC_PREWRITE);
1003 
1004 	sc->bge_jumbo = i - 1;
1005 
1006 	rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1007 	rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
1008 	CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1009 
1010 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1011 
1012 	return(0);
1013 }
1014 
1015 static void
1016 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1017 {
1018 	int i;
1019 
1020 	for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1021 		if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1022 			m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1023 			sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1024 		}
1025 		bzero(&sc->bge_ldata.bge_rx_jumbo_ring[i],
1026 		    sizeof(struct bge_rx_bd));
1027 	}
1028 }
1029 
1030 static void
1031 bge_free_tx_ring(struct bge_softc *sc)
1032 {
1033 	int i;
1034 
1035 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
1036 		if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1037 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
1038 					  sc->bge_cdata.bge_tx_dmamap[i]);
1039 			m_freem(sc->bge_cdata.bge_tx_chain[i]);
1040 			sc->bge_cdata.bge_tx_chain[i] = NULL;
1041 		}
1042 		bzero(&sc->bge_ldata.bge_tx_ring[i],
1043 		    sizeof(struct bge_tx_bd));
1044 	}
1045 }
1046 
1047 static int
1048 bge_init_tx_ring(struct bge_softc *sc)
1049 {
1050 	sc->bge_txcnt = 0;
1051 	sc->bge_tx_saved_considx = 0;
1052 	sc->bge_tx_prodidx = 0;
1053 
1054 	/* Initialize transmit producer index for host-memory send ring. */
1055 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1056 
1057 	/* 5700 b2 errata */
1058 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1059 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1060 
1061 	CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1062 	/* 5700 b2 errata */
1063 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1064 		CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1065 
1066 	return(0);
1067 }
1068 
1069 static void
1070 bge_setmulti(struct bge_softc *sc)
1071 {
1072 	struct ifnet *ifp;
1073 	struct ifmultiaddr *ifma;
1074 	uint32_t hashes[4] = { 0, 0, 0, 0 };
1075 	int h, i;
1076 
1077 	ifp = &sc->arpcom.ac_if;
1078 
1079 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1080 		for (i = 0; i < 4; i++)
1081 			CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1082 		return;
1083 	}
1084 
1085 	/* First, zot all the existing filters. */
1086 	for (i = 0; i < 4; i++)
1087 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1088 
1089 	/* Now program new ones. */
1090 	LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1091 		if (ifma->ifma_addr->sa_family != AF_LINK)
1092 			continue;
1093 		h = ether_crc32_le(
1094 		    LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1095 		    ETHER_ADDR_LEN) & 0x7f;
1096 		hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1097 	}
1098 
1099 	for (i = 0; i < 4; i++)
1100 		CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1101 }
1102 
1103 /*
1104  * Do endian, PCI and DMA initialization. Also check the on-board ROM
1105  * self-test results.
1106  */
1107 static int
1108 bge_chipinit(struct bge_softc *sc)
1109 {
1110 	int i;
1111 	uint32_t dma_rw_ctl;
1112 
1113 	/* Set endian type before we access any non-PCI registers. */
1114 	pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4);
1115 
1116 	/*
1117 	 * Check the 'ROM failed' bit on the RX CPU to see if
1118 	 * self-tests passed.
1119 	 */
1120 	if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1121 		if_printf(&sc->arpcom.ac_if,
1122 			  "RX CPU self-diagnostics failed!\n");
1123 		return(ENODEV);
1124 	}
1125 
1126 	/* Clear the MAC control register */
1127 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1128 
1129 	/*
1130 	 * Clear the MAC statistics block in the NIC's
1131 	 * internal memory.
1132 	 */
1133 	for (i = BGE_STATS_BLOCK;
1134 	    i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1135 		BGE_MEMWIN_WRITE(sc, i, 0);
1136 
1137 	for (i = BGE_STATUS_BLOCK;
1138 	    i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1139 		BGE_MEMWIN_WRITE(sc, i, 0);
1140 
1141 	/* Set up the PCI DMA control register. */
1142 	if (sc->bge_flags & BGE_FLAG_PCIE) {
1143 		/* PCI Express */
1144 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1145 		    (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1146 		    (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1147 	} else if (sc->bge_flags & BGE_FLAG_PCIX) {
1148 		/* PCI-X bus */
1149 		if (BGE_IS_5714_FAMILY(sc)) {
1150 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD;
1151 			dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */
1152 			/* XXX magic values, Broadcom-supplied Linux driver */
1153 			if (sc->bge_asicrev == BGE_ASICREV_BCM5780) {
1154 				dma_rw_ctl |= (1 << 20) | (1 << 18) |
1155 				    BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1156 			} else {
1157 				dma_rw_ctl |= (1 << 20) | (1 << 18) | (1 << 15);
1158 			}
1159 		} else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1160 			/*
1161 			 * The 5704 uses a different encoding of read/write
1162 			 * watermarks.
1163 			 */
1164 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1165 			    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1166 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1167 		} else {
1168 			dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1169 			    (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1170 			    (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1171 			    (0x0F);
1172 		}
1173 
1174 		/*
1175 		 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1176 		 * for hardware bugs.
1177 		 */
1178 		if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1179 		    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1180 			uint32_t tmp;
1181 
1182 			tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1183 			if (tmp == 0x6 || tmp == 0x7)
1184 				dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1185 		}
1186 	} else {
1187 		/* Conventional PCI bus */
1188 		dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1189 		    (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1190 		    (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1191 		    (0x0F);
1192 	}
1193 
1194 	if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1195 	    sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1196 	    sc->bge_asicrev == BGE_ASICREV_BCM5705)
1197 		dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1198 	pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1199 
1200 	/*
1201 	 * Set up general mode register.
1202 	 */
1203 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1204 	    BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1205 	    BGE_MODECTL_TX_NO_PHDR_CSUM);
1206 
1207 	/*
1208 	 * Disable memory write invalidate.  Apparently it is not supported
1209 	 * properly by these devices.
1210 	 */
1211 	PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1212 
1213 	/* Set the timer prescaler (always 66Mhz) */
1214 	CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1215 
1216 	return(0);
1217 }
1218 
1219 static int
1220 bge_blockinit(struct bge_softc *sc)
1221 {
1222 	struct bge_rcb *rcb;
1223 	bus_size_t vrcb;
1224 	bge_hostaddr taddr;
1225 	uint32_t val;
1226 	int i;
1227 
1228 	/*
1229 	 * Initialize the memory window pointer register so that
1230 	 * we can access the first 32K of internal NIC RAM. This will
1231 	 * allow us to set up the TX send ring RCBs and the RX return
1232 	 * ring RCBs, plus other things which live in NIC memory.
1233 	 */
1234 	CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1235 
1236 	/* Note: the BCM5704 has a smaller mbuf space than other chips. */
1237 
1238 	if (!BGE_IS_5705_PLUS(sc)) {
1239 		/* Configure mbuf memory pool */
1240 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1241 		if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1242 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1243 		else
1244 			CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1245 
1246 		/* Configure DMA resource pool */
1247 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1248 		    BGE_DMA_DESCRIPTORS);
1249 		CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1250 	}
1251 
1252 	/* Configure mbuf pool watermarks */
1253 	if (BGE_IS_5705_PLUS(sc)) {
1254 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1255 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1256 	} else {
1257 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1258 		CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1259 	}
1260 	CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1261 
1262 	/* Configure DMA resource watermarks */
1263 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1264 	CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1265 
1266 	/* Enable buffer manager */
1267 	if (!BGE_IS_5705_PLUS(sc)) {
1268 		CSR_WRITE_4(sc, BGE_BMAN_MODE,
1269 		    BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1270 
1271 		/* Poll for buffer manager start indication */
1272 		for (i = 0; i < BGE_TIMEOUT; i++) {
1273 			if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1274 				break;
1275 			DELAY(10);
1276 		}
1277 
1278 		if (i == BGE_TIMEOUT) {
1279 			if_printf(&sc->arpcom.ac_if,
1280 				  "buffer manager failed to start\n");
1281 			return(ENXIO);
1282 		}
1283 	}
1284 
1285 	/* Enable flow-through queues */
1286 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1287 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1288 
1289 	/* Wait until queue initialization is complete */
1290 	for (i = 0; i < BGE_TIMEOUT; i++) {
1291 		if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1292 			break;
1293 		DELAY(10);
1294 	}
1295 
1296 	if (i == BGE_TIMEOUT) {
1297 		if_printf(&sc->arpcom.ac_if,
1298 			  "flow-through queue init failed\n");
1299 		return(ENXIO);
1300 	}
1301 
1302 	/* Initialize the standard RX ring control block */
1303 	rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
1304 	rcb->bge_hostaddr.bge_addr_lo =
1305 	    BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
1306 	rcb->bge_hostaddr.bge_addr_hi =
1307 	    BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
1308 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1309 	    sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
1310 	if (BGE_IS_5705_PLUS(sc))
1311 		rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1312 	else
1313 		rcb->bge_maxlen_flags =
1314 		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1315 	rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1316 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1317 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1318 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1319 	CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1320 
1321 	/*
1322 	 * Initialize the jumbo RX ring control block
1323 	 * We set the 'ring disabled' bit in the flags
1324 	 * field until we're actually ready to start
1325 	 * using this ring (i.e. once we set the MTU
1326 	 * high enough to require it).
1327 	 */
1328 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
1329 		rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1330 
1331 		rcb->bge_hostaddr.bge_addr_lo =
1332 		    BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1333 		rcb->bge_hostaddr.bge_addr_hi =
1334 		    BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
1335 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1336 		    sc->bge_cdata.bge_rx_jumbo_ring_map,
1337 		    BUS_DMASYNC_PREREAD);
1338 		rcb->bge_maxlen_flags =
1339 		    BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1340 		    BGE_RCB_FLAG_RING_DISABLED);
1341 		rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1342 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1343 		    rcb->bge_hostaddr.bge_addr_hi);
1344 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1345 		    rcb->bge_hostaddr.bge_addr_lo);
1346 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1347 		    rcb->bge_maxlen_flags);
1348 		CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1349 
1350 		/* Set up dummy disabled mini ring RCB */
1351 		rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
1352 		rcb->bge_maxlen_flags =
1353 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1354 		CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1355 		    rcb->bge_maxlen_flags);
1356 	}
1357 
1358 	/*
1359 	 * Set the BD ring replentish thresholds. The recommended
1360 	 * values are 1/8th the number of descriptors allocated to
1361 	 * each ring.
1362 	 */
1363 	if (BGE_IS_5705_PLUS(sc))
1364 		val = 8;
1365 	else
1366 		val = BGE_STD_RX_RING_CNT / 8;
1367 	CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
1368 	CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1369 
1370 	/*
1371 	 * Disable all unused send rings by setting the 'ring disabled'
1372 	 * bit in the flags field of all the TX send ring control blocks.
1373 	 * These are located in NIC memory.
1374 	 */
1375 	vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1376 	for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1377 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1378 		    BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1379 		RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1380 		vrcb += sizeof(struct bge_rcb);
1381 	}
1382 
1383 	/* Configure TX RCB 0 (we use only the first ring) */
1384 	vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1385 	BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
1386 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1387 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1388 	RCB_WRITE_4(sc, vrcb, bge_nicaddr,
1389 	    BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1390 	if (!BGE_IS_5705_PLUS(sc)) {
1391 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1392 		    BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1393 	}
1394 
1395 	/* Disable all unused RX return rings */
1396 	vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1397 	for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1398 		RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
1399 		RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
1400 		RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1401 		    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1402 		    BGE_RCB_FLAG_RING_DISABLED));
1403 		RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
1404 		CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1405 		    (i * (sizeof(uint64_t))), 0);
1406 		vrcb += sizeof(struct bge_rcb);
1407 	}
1408 
1409 	/* Initialize RX ring indexes */
1410 	CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1411 	CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1412 	CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1413 
1414 	/*
1415 	 * Set up RX return ring 0
1416 	 * Note that the NIC address for RX return rings is 0x00000000.
1417 	 * The return rings live entirely within the host, so the
1418 	 * nicaddr field in the RCB isn't used.
1419 	 */
1420 	vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1421 	BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
1422 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1423 	RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1424 	RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000);
1425 	RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
1426 	    BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1427 
1428 	/* Set random backoff seed for TX */
1429 	CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1430 	    sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1431 	    sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1432 	    sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1433 	    BGE_TX_BACKOFF_SEED_MASK);
1434 
1435 	/* Set inter-packet gap */
1436 	CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1437 
1438 	/*
1439 	 * Specify which ring to use for packets that don't match
1440 	 * any RX rules.
1441 	 */
1442 	CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1443 
1444 	/*
1445 	 * Configure number of RX lists. One interrupt distribution
1446 	 * list, sixteen active lists, one bad frames class.
1447 	 */
1448 	CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1449 
1450 	/* Inialize RX list placement stats mask. */
1451 	CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1452 	CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1453 
1454 	/* Disable host coalescing until we get it set up */
1455 	CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1456 
1457 	/* Poll to make sure it's shut down. */
1458 	for (i = 0; i < BGE_TIMEOUT; i++) {
1459 		if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1460 			break;
1461 		DELAY(10);
1462 	}
1463 
1464 	if (i == BGE_TIMEOUT) {
1465 		if_printf(&sc->arpcom.ac_if,
1466 			  "host coalescing engine failed to idle\n");
1467 		return(ENXIO);
1468 	}
1469 
1470 	/* Set up host coalescing defaults */
1471 	CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1472 	CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1473 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1474 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1475 	if (!BGE_IS_5705_PLUS(sc)) {
1476 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1477 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1478 	}
1479 	CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
1480 	CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
1481 
1482 	/* Set up address of statistics block */
1483 	if (!BGE_IS_5705_PLUS(sc)) {
1484 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
1485 		    BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
1486 		CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1487 		    BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
1488 
1489 		CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1490 		CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1491 		CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1492 	}
1493 
1494 	/* Set up address of status block */
1495 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
1496 	    BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
1497 	CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1498 	    BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
1499 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0;
1500 	sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0;
1501 
1502 	/* Turn on host coalescing state machine */
1503 	CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1504 
1505 	/* Turn on RX BD completion state machine and enable attentions */
1506 	CSR_WRITE_4(sc, BGE_RBDC_MODE,
1507 	    BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1508 
1509 	/* Turn on RX list placement state machine */
1510 	CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1511 
1512 	/* Turn on RX list selector state machine. */
1513 	if (!BGE_IS_5705_PLUS(sc))
1514 		CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1515 
1516 	/* Turn on DMA, clear stats */
1517 	CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1518 	    BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1519 	    BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1520 	    BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1521 	    ((sc->bge_flags & BGE_FLAG_TBI) ?
1522 	     BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1523 
1524 	/* Set misc. local control, enable interrupts on attentions */
1525 	CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1526 
1527 #ifdef notdef
1528 	/* Assert GPIO pins for PHY reset */
1529 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1530 	    BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1531 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1532 	    BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1533 #endif
1534 
1535 	/* Turn on DMA completion state machine */
1536 	if (!BGE_IS_5705_PLUS(sc))
1537 		CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1538 
1539 	/* Turn on write DMA state machine */
1540 	val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1541 	if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1542 	    sc->bge_asicrev == BGE_ASICREV_BCM5787)
1543 		val |= (1 << 29);	/* Enable host coalescing bug fix. */
1544 	CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1545 	DELAY(40);
1546 
1547 	/* Turn on read DMA state machine */
1548 	val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1549 	if (sc->bge_flags & BGE_FLAG_PCIE)
1550 		val |= BGE_RDMAMODE_FIFO_LONG_BURST;
1551 	CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
1552 	DELAY(40);
1553 
1554 	/* Turn on RX data completion state machine */
1555 	CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1556 
1557 	/* Turn on RX BD initiator state machine */
1558 	CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1559 
1560 	/* Turn on RX data and RX BD initiator state machine */
1561 	CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1562 
1563 	/* Turn on Mbuf cluster free state machine */
1564 	if (!BGE_IS_5705_PLUS(sc))
1565 		CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1566 
1567 	/* Turn on send BD completion state machine */
1568 	CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1569 
1570 	/* Turn on send data completion state machine */
1571 	CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1572 
1573 	/* Turn on send data initiator state machine */
1574 	CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1575 
1576 	/* Turn on send BD initiator state machine */
1577 	CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1578 
1579 	/* Turn on send BD selector state machine */
1580 	CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1581 
1582 	CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1583 	CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1584 	    BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1585 
1586 	/* ack/clear link change events */
1587 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1588 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1589 	    BGE_MACSTAT_LINK_CHANGED);
1590 	CSR_WRITE_4(sc, BGE_MI_STS, 0);
1591 
1592 	/* Enable PHY auto polling (for MII/GMII only) */
1593 	if (sc->bge_flags & BGE_FLAG_TBI) {
1594 		CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1595  	} else {
1596 		BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1597 		if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1598 		    sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
1599 			CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1600 			    BGE_EVTENB_MI_INTERRUPT);
1601 		}
1602 	}
1603 
1604 	/*
1605 	 * Clear any pending link state attention.
1606 	 * Otherwise some link state change events may be lost until attention
1607 	 * is cleared by bge_intr() -> bge_softc.bge_link_upd() sequence.
1608 	 * It's not necessary on newer BCM chips - perhaps enabling link
1609 	 * state change attentions implies clearing pending attention.
1610 	 */
1611 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1612 	    BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1613 	    BGE_MACSTAT_LINK_CHANGED);
1614 
1615 	/* Enable link state change attentions. */
1616 	BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1617 
1618 	return(0);
1619 }
1620 
1621 /*
1622  * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1623  * against our list and return its name if we find a match. Note
1624  * that since the Broadcom controller contains VPD support, we
1625  * can get the device name string from the controller itself instead
1626  * of the compiled-in string. This is a little slow, but it guarantees
1627  * we'll always announce the right product name.
1628  */
1629 static int
1630 bge_probe(device_t dev)
1631 {
1632 	const struct bge_type *t;
1633 	uint16_t product, vendor;
1634 
1635 	product = pci_get_device(dev);
1636 	vendor = pci_get_vendor(dev);
1637 
1638 	for (t = bge_devs; t->bge_name != NULL; t++) {
1639 		if (vendor == t->bge_vid && product == t->bge_did)
1640 			break;
1641 	}
1642 	if (t->bge_name == NULL)
1643 		return(ENXIO);
1644 
1645 	device_set_desc(dev, t->bge_name);
1646 	if (pci_get_subvendor(dev) == PCI_VENDOR_DELL) {
1647 		struct bge_softc *sc = device_get_softc(dev);
1648 		sc->bge_flags |= BGE_FLAG_NO_3LED;
1649 	}
1650 	return(0);
1651 }
1652 
1653 static int
1654 bge_attach(device_t dev)
1655 {
1656 	struct ifnet *ifp;
1657 	struct bge_softc *sc;
1658 	uint32_t hwcfg = 0;
1659 	uint32_t mac_addr = 0;
1660 	int error = 0, rid;
1661 	uint8_t ether_addr[ETHER_ADDR_LEN];
1662 
1663 	sc = device_get_softc(dev);
1664 	sc->bge_dev = dev;
1665 	callout_init(&sc->bge_stat_timer);
1666 	lwkt_serialize_init(&sc->bge_jslot_serializer);
1667 
1668 	/*
1669 	 * Map control/status registers.
1670 	 */
1671 	pci_enable_busmaster(dev);
1672 
1673 	rid = BGE_PCI_BAR0;
1674 	sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1675 	    RF_ACTIVE);
1676 
1677 	if (sc->bge_res == NULL) {
1678 		device_printf(dev, "couldn't map memory\n");
1679 		return ENXIO;
1680 	}
1681 
1682 	sc->bge_btag = rman_get_bustag(sc->bge_res);
1683 	sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
1684 
1685 	/* Save various chip information */
1686 	sc->bge_chipid =
1687 	    pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1688 	    BGE_PCIMISCCTL_ASICREV;
1689 	sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
1690 	sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
1691 
1692 	/* Save chipset family. */
1693 	switch (sc->bge_asicrev) {
1694 	case BGE_ASICREV_BCM5700:
1695 	case BGE_ASICREV_BCM5701:
1696 	case BGE_ASICREV_BCM5703:
1697 	case BGE_ASICREV_BCM5704:
1698 		sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
1699 		break;
1700 
1701 	case BGE_ASICREV_BCM5714_A0:
1702 	case BGE_ASICREV_BCM5780:
1703 	case BGE_ASICREV_BCM5714:
1704 		sc->bge_flags |= BGE_FLAG_5714_FAMILY;
1705 		/* Fall through */
1706 
1707 	case BGE_ASICREV_BCM5750:
1708 	case BGE_ASICREV_BCM5752:
1709 	case BGE_ASICREV_BCM5755:
1710 	case BGE_ASICREV_BCM5787:
1711 		sc->bge_flags |= BGE_FLAG_575X_PLUS;
1712 		/* Fall through */
1713 
1714 	case BGE_ASICREV_BCM5705:
1715 		sc->bge_flags |= BGE_FLAG_5705_PLUS;
1716 		break;
1717 	}
1718 
1719 	/*
1720 	 * Set various quirk flags.
1721 	 */
1722 
1723 	sc->bge_flags |= BGE_FLAG_ETH_WIRESPEED;
1724 	if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1725 	    (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
1726 	     (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
1727 	      sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
1728 	    sc->bge_asicrev == BGE_ASICREV_BCM5906)
1729 		sc->bge_flags &= ~BGE_FLAG_ETH_WIRESPEED;
1730 
1731 	if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
1732 	    sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
1733 		sc->bge_flags |= BGE_FLAG_CRC_BUG;
1734 
1735 	if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
1736 	    sc->bge_chiprev == BGE_CHIPREV_5704_AX)
1737 		sc->bge_flags |= BGE_FLAG_ADC_BUG;
1738 
1739 	if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
1740 		sc->bge_flags |= BGE_FLAG_5704_A0_BUG;
1741 
1742 	if (BGE_IS_5705_PLUS(sc)) {
1743 		if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
1744 		    sc->bge_asicrev == BGE_ASICREV_BCM5787) {
1745 			uint32_t product = pci_get_device(dev);
1746 
1747 			if (product != PCI_PRODUCT_BROADCOM_BCM5722 &&
1748 			    product != PCI_PRODUCT_BROADCOM_BCM5756)
1749 				sc->bge_flags |= BGE_FLAG_JITTER_BUG;
1750 			if (product == PCI_PRODUCT_BROADCOM_BCM5755M)
1751 				sc->bge_flags |= BGE_FLAG_ADJUST_TRIM;
1752 		} else if (sc->bge_asicrev != BGE_ASICREV_BCM5906) {
1753 			sc->bge_flags |= BGE_FLAG_BER_BUG;
1754 		}
1755 	}
1756 
1757 	/* Allocate interrupt */
1758 	rid = 0;
1759 
1760 	sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1761 	    RF_SHAREABLE | RF_ACTIVE);
1762 
1763 	if (sc->bge_irq == NULL) {
1764 		device_printf(dev, "couldn't map interrupt\n");
1765 		error = ENXIO;
1766 		goto fail;
1767 	}
1768 
1769   	/*
1770 	 * Check if this is a PCI-X or PCI Express device.
1771   	 */
1772 	if (BGE_IS_5705_PLUS(sc)) {
1773 		sc->bge_expr_ptr = pci_get_pciecap_ptr(dev);
1774 
1775 		if (sc->bge_expr_ptr != 0) {
1776 			sc->bge_flags |= BGE_FLAG_PCIE;
1777 			bge_set_max_readrq(sc);
1778 		}
1779 	} else {
1780 		/*
1781 		 * Check if the device is in PCI-X Mode.
1782 		 * (This bit is not valid on PCI Express controllers.)
1783 		 */
1784 		if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1785 		    BGE_PCISTATE_PCI_BUSMODE) == 0)
1786 			sc->bge_flags |= BGE_FLAG_PCIX;
1787  	}
1788 
1789 	device_printf(dev, "CHIP ID 0x%08x; "
1790 		      "ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n",
1791 		      sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev,
1792 		      (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X"
1793 		      : ((sc->bge_flags & BGE_FLAG_PCIE) ?
1794 			"PCI-E" : "PCI"));
1795 
1796 	ifp = &sc->arpcom.ac_if;
1797 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1798 
1799 	/* Try to reset the chip. */
1800 	bge_reset(sc);
1801 
1802 	if (bge_chipinit(sc)) {
1803 		device_printf(dev, "chip initialization failed\n");
1804 		error = ENXIO;
1805 		goto fail;
1806 	}
1807 
1808 	/*
1809 	 * Get station address from the EEPROM.
1810 	 */
1811 	mac_addr = bge_readmem_ind(sc, 0x0c14);
1812 	if ((mac_addr >> 16) == 0x484b) {
1813 		ether_addr[0] = (uint8_t)(mac_addr >> 8);
1814 		ether_addr[1] = (uint8_t)mac_addr;
1815 		mac_addr = bge_readmem_ind(sc, 0x0c18);
1816 		ether_addr[2] = (uint8_t)(mac_addr >> 24);
1817 		ether_addr[3] = (uint8_t)(mac_addr >> 16);
1818 		ether_addr[4] = (uint8_t)(mac_addr >> 8);
1819 		ether_addr[5] = (uint8_t)mac_addr;
1820 	} else if (bge_read_eeprom(sc, ether_addr,
1821 	    BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
1822 		device_printf(dev, "failed to read station address\n");
1823 		error = ENXIO;
1824 		goto fail;
1825 	}
1826 
1827 	/* 5705/5750 limits RX return ring to 512 entries. */
1828 	if (BGE_IS_5705_PLUS(sc))
1829 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1830 	else
1831 		sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1832 
1833 	error = bge_dma_alloc(sc);
1834 	if (error)
1835 		goto fail;
1836 
1837 	/* Set default tuneable values. */
1838 	sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
1839 	sc->bge_rx_coal_ticks = bge_rx_coal_ticks;
1840 	sc->bge_tx_coal_ticks = bge_tx_coal_ticks;
1841 	sc->bge_rx_max_coal_bds = bge_rx_max_coal_bds;
1842 	sc->bge_tx_max_coal_bds = bge_tx_max_coal_bds;
1843 
1844 	/* Set up ifnet structure */
1845 	ifp->if_softc = sc;
1846 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1847 	ifp->if_ioctl = bge_ioctl;
1848 	ifp->if_start = bge_start;
1849 #ifdef DEVICE_POLLING
1850 	ifp->if_poll = bge_poll;
1851 #endif
1852 	ifp->if_watchdog = bge_watchdog;
1853 	ifp->if_init = bge_init;
1854 	ifp->if_mtu = ETHERMTU;
1855 	ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1856 	ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1857 	ifq_set_ready(&ifp->if_snd);
1858 
1859 	/*
1860 	 * 5700 B0 chips do not support checksumming correctly due
1861 	 * to hardware bugs.
1862 	 */
1863 	if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) {
1864 		ifp->if_capabilities |= IFCAP_HWCSUM;
1865 		ifp->if_hwassist = BGE_CSUM_FEATURES;
1866 	}
1867 	ifp->if_capenable = ifp->if_capabilities;
1868 
1869 	/*
1870 	 * Figure out what sort of media we have by checking the
1871 	 * hardware config word in the first 32k of NIC internal memory,
1872 	 * or fall back to examining the EEPROM if necessary.
1873 	 * Note: on some BCM5700 cards, this value appears to be unset.
1874 	 * If that's the case, we have to rely on identifying the NIC
1875 	 * by its PCI subsystem ID, as we do below for the SysKonnect
1876 	 * SK-9D41.
1877 	 */
1878 	if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
1879 		hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1880 	else {
1881 		if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
1882 				    sizeof(hwcfg))) {
1883 			device_printf(dev, "failed to read EEPROM\n");
1884 			error = ENXIO;
1885 			goto fail;
1886 		}
1887 		hwcfg = ntohl(hwcfg);
1888 	}
1889 
1890 	if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1891 		sc->bge_flags |= BGE_FLAG_TBI;
1892 
1893 	/* The SysKonnect SK-9D41 is a 1000baseSX card. */
1894 	if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41)
1895 		sc->bge_flags |= BGE_FLAG_TBI;
1896 
1897 	if (sc->bge_flags & BGE_FLAG_TBI) {
1898 		ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
1899 		    bge_ifmedia_upd, bge_ifmedia_sts);
1900 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1901 		ifmedia_add(&sc->bge_ifmedia,
1902 		    IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
1903 		ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1904 		ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
1905 		sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
1906 	} else {
1907 		/*
1908 		 * Do transceiver setup.
1909 		 */
1910 		if (mii_phy_probe(dev, &sc->bge_miibus,
1911 		    bge_ifmedia_upd, bge_ifmedia_sts)) {
1912 			device_printf(dev, "MII without any PHY!\n");
1913 			error = ENXIO;
1914 			goto fail;
1915 		}
1916 	}
1917 
1918 	/*
1919 	 * When using the BCM5701 in PCI-X mode, data corruption has
1920 	 * been observed in the first few bytes of some received packets.
1921 	 * Aligning the packet buffer in memory eliminates the corruption.
1922 	 * Unfortunately, this misaligns the packet payloads.  On platforms
1923 	 * which do not support unaligned accesses, we will realign the
1924 	 * payloads by copying the received packets.
1925 	 */
1926 	if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1927 	    (sc->bge_flags & BGE_FLAG_PCIX))
1928 		sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
1929 
1930 	if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
1931 	    sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
1932 		sc->bge_link_upd = bge_bcm5700_link_upd;
1933 		sc->bge_link_chg = BGE_MACSTAT_MI_INTERRUPT;
1934 	} else if (sc->bge_flags & BGE_FLAG_TBI) {
1935 		sc->bge_link_upd = bge_tbi_link_upd;
1936 		sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
1937 	} else {
1938 		sc->bge_link_upd = bge_copper_link_upd;
1939 		sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED;
1940 	}
1941 
1942 	/*
1943 	 * Create sysctl nodes.
1944 	 */
1945 	sysctl_ctx_init(&sc->bge_sysctl_ctx);
1946 	sc->bge_sysctl_tree = SYSCTL_ADD_NODE(&sc->bge_sysctl_ctx,
1947 					      SYSCTL_STATIC_CHILDREN(_hw),
1948 					      OID_AUTO,
1949 					      device_get_nameunit(dev),
1950 					      CTLFLAG_RD, 0, "");
1951 	if (sc->bge_sysctl_tree == NULL) {
1952 		device_printf(dev, "can't add sysctl node\n");
1953 		error = ENXIO;
1954 		goto fail;
1955 	}
1956 
1957 	SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
1958 			SYSCTL_CHILDREN(sc->bge_sysctl_tree),
1959 			OID_AUTO, "rx_coal_ticks",
1960 			CTLTYPE_INT | CTLFLAG_RW,
1961 			sc, 0, bge_sysctl_rx_coal_ticks, "I",
1962 			"Receive coalescing ticks (usec).");
1963 	SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
1964 			SYSCTL_CHILDREN(sc->bge_sysctl_tree),
1965 			OID_AUTO, "tx_coal_ticks",
1966 			CTLTYPE_INT | CTLFLAG_RW,
1967 			sc, 0, bge_sysctl_tx_coal_ticks, "I",
1968 			"Transmit coalescing ticks (usec).");
1969 	SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
1970 			SYSCTL_CHILDREN(sc->bge_sysctl_tree),
1971 			OID_AUTO, "rx_max_coal_bds",
1972 			CTLTYPE_INT | CTLFLAG_RW,
1973 			sc, 0, bge_sysctl_rx_max_coal_bds, "I",
1974 			"Receive max coalesced BD count.");
1975 	SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx,
1976 			SYSCTL_CHILDREN(sc->bge_sysctl_tree),
1977 			OID_AUTO, "tx_max_coal_bds",
1978 			CTLTYPE_INT | CTLFLAG_RW,
1979 			sc, 0, bge_sysctl_tx_max_coal_bds, "I",
1980 			"Transmit max coalesced BD count.");
1981 
1982 	/*
1983 	 * Call MI attach routine.
1984 	 */
1985 	ether_ifattach(ifp, ether_addr, NULL);
1986 
1987 	error = bus_setup_intr(dev, sc->bge_irq, INTR_NETSAFE,
1988 			       bge_intr, sc, &sc->bge_intrhand,
1989 			       ifp->if_serializer);
1990 	if (error) {
1991 		ether_ifdetach(ifp);
1992 		device_printf(dev, "couldn't set up irq\n");
1993 		goto fail;
1994 	}
1995 
1996 	ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->bge_irq));
1997 	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
1998 
1999 	return(0);
2000 fail:
2001 	bge_detach(dev);
2002 	return(error);
2003 }
2004 
2005 static int
2006 bge_detach(device_t dev)
2007 {
2008 	struct bge_softc *sc = device_get_softc(dev);
2009 
2010 	if (device_is_attached(dev)) {
2011 		struct ifnet *ifp = &sc->arpcom.ac_if;
2012 
2013 		lwkt_serialize_enter(ifp->if_serializer);
2014 		bge_stop(sc);
2015 		bge_reset(sc);
2016 		bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
2017 		lwkt_serialize_exit(ifp->if_serializer);
2018 
2019 		ether_ifdetach(ifp);
2020 	}
2021 
2022 	if (sc->bge_flags & BGE_FLAG_TBI)
2023 		ifmedia_removeall(&sc->bge_ifmedia);
2024 	if (sc->bge_miibus)
2025 		device_delete_child(dev, sc->bge_miibus);
2026 	bus_generic_detach(dev);
2027 
2028         if (sc->bge_irq != NULL)
2029 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
2030 
2031         if (sc->bge_res != NULL)
2032 		bus_release_resource(dev, SYS_RES_MEMORY,
2033 		    BGE_PCI_BAR0, sc->bge_res);
2034 
2035 	if (sc->bge_sysctl_tree != NULL)
2036 		sysctl_ctx_free(&sc->bge_sysctl_ctx);
2037 
2038 	bge_dma_free(sc);
2039 
2040 	return 0;
2041 }
2042 
2043 static void
2044 bge_reset(struct bge_softc *sc)
2045 {
2046 	device_t dev;
2047 	uint32_t cachesize, command, pcistate, reset;
2048 	void (*write_op)(struct bge_softc *, uint32_t, uint32_t);
2049 	int i, val = 0;
2050 
2051 	dev = sc->bge_dev;
2052 
2053 	if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc)) {
2054 		if (sc->bge_flags & BGE_FLAG_PCIE)
2055 			write_op = bge_writemem_direct;
2056 		else
2057 			write_op = bge_writemem_ind;
2058 	} else {
2059 		write_op = bge_writereg_ind;
2060 	}
2061 
2062 	/* Save some important PCI state. */
2063 	cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
2064 	command = pci_read_config(dev, BGE_PCI_CMD, 4);
2065 	pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
2066 
2067 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2068 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2069 	    BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2070 
2071 	/* Disable fastboot on controllers that support it. */
2072 	if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
2073 	    sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2074 	    sc->bge_asicrev == BGE_ASICREV_BCM5787) {
2075 		if (bootverbose)
2076 			if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n");
2077 		CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
2078 	}
2079 
2080 	/*
2081 	 * Write the magic number to SRAM at offset 0xB50.
2082 	 * When firmware finishes its initialization it will
2083 	 * write ~BGE_MAGIC_NUMBER to the same location.
2084 	 */
2085 	bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2086 
2087 	reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2088 
2089 	/* XXX: Broadcom Linux driver. */
2090 	if (sc->bge_flags & BGE_FLAG_PCIE) {
2091 		if (CSR_READ_4(sc, 0x7e2c) == 0x60)	/* PCIE 1.0 */
2092 			CSR_WRITE_4(sc, 0x7e2c, 0x20);
2093 		if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2094 			/* Prevent PCIE link training during global reset */
2095 			CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2096 			reset |= (1<<29);
2097 		}
2098 	}
2099 
2100 	/*
2101 	 * Set GPHY Power Down Override to leave GPHY
2102 	 * powered up in D0 uninitialized.
2103 	 */
2104 	if (BGE_IS_5705_PLUS(sc))
2105 		reset |= 0x04000000;
2106 
2107 	/* Issue global reset */
2108 	write_op(sc, BGE_MISC_CFG, reset);
2109 
2110 	DELAY(1000);
2111 
2112 	/* XXX: Broadcom Linux driver. */
2113 	if (sc->bge_flags & BGE_FLAG_PCIE) {
2114 		if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2115 			uint32_t v;
2116 
2117 			DELAY(500000); /* wait for link training to complete */
2118 			v = pci_read_config(dev, 0xc4, 4);
2119 			pci_write_config(dev, 0xc4, v | (1<<15), 4);
2120 		}
2121 		/*
2122 		 * Set PCIE max payload size to 128 bytes and
2123 		 * clear error status.
2124 		 */
2125 		pci_write_config(dev, 0xd8, 0xf5000, 4);
2126 	}
2127 
2128 	/* Reset some of the PCI state that got zapped by reset */
2129 	pci_write_config(dev, BGE_PCI_MISC_CTL,
2130 	    BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2131 	    BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4);
2132 	pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
2133 	pci_write_config(dev, BGE_PCI_CMD, command, 4);
2134 	write_op(sc, BGE_MISC_CFG, (65 << 1));
2135 
2136 	/* Enable memory arbiter. */
2137 	if (BGE_IS_5714_FAMILY(sc)) {
2138 		uint32_t val;
2139 
2140 		val = CSR_READ_4(sc, BGE_MARB_MODE);
2141 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2142 	} else {
2143 		CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2144 	}
2145 
2146 	/*
2147 	 * Poll until we see the 1's complement of the magic number.
2148 	 * This indicates that the firmware initialization
2149 	 * is complete.
2150 	 */
2151 	for (i = 0; i < BGE_TIMEOUT; i++) {
2152 		val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2153 		if (val == ~BGE_MAGIC_NUMBER)
2154 			break;
2155 		DELAY(10);
2156 	}
2157 
2158 	if (i == BGE_TIMEOUT) {
2159 		if_printf(&sc->arpcom.ac_if, "firmware handshake timed out,"
2160 			  "found 0x%08x\n", val);
2161 		return;
2162 	}
2163 
2164 	/*
2165 	 * XXX Wait for the value of the PCISTATE register to
2166 	 * return to its original pre-reset state. This is a
2167 	 * fairly good indicator of reset completion. If we don't
2168 	 * wait for the reset to fully complete, trying to read
2169 	 * from the device's non-PCI registers may yield garbage
2170 	 * results.
2171 	 */
2172 	for (i = 0; i < BGE_TIMEOUT; i++) {
2173 		if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
2174 			break;
2175 		DELAY(10);
2176 	}
2177 
2178 	if (sc->bge_flags & BGE_FLAG_PCIE) {
2179 		reset = bge_readmem_ind(sc, 0x7c00);
2180 		bge_writemem_ind(sc, 0x7c00, reset | (1 << 25));
2181 	}
2182 
2183 	/* Fix up byte swapping */
2184 	CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS |
2185 	    BGE_MODECTL_BYTESWAP_DATA);
2186 
2187 	CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2188 
2189 	/*
2190 	 * The 5704 in TBI mode apparently needs some special
2191 	 * adjustment to insure the SERDES drive level is set
2192 	 * to 1.2V.
2193 	 */
2194 	if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
2195 	    (sc->bge_flags & BGE_FLAG_TBI)) {
2196 		uint32_t serdescfg;
2197 
2198 		serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2199 		serdescfg = (serdescfg & ~0xFFF) | 0x880;
2200 		CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2201 	}
2202 
2203 	/* XXX: Broadcom Linux driver. */
2204 	if ((sc->bge_flags & BGE_FLAG_PCIE) &&
2205 	    sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2206 		uint32_t v;
2207 
2208 		v = CSR_READ_4(sc, 0x7c00);
2209 		CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2210 	}
2211 
2212 	DELAY(10000);
2213 }
2214 
2215 /*
2216  * Frame reception handling. This is called if there's a frame
2217  * on the receive return list.
2218  *
2219  * Note: we have to be able to handle two possibilities here:
2220  * 1) the frame is from the jumbo recieve ring
2221  * 2) the frame is from the standard receive ring
2222  */
2223 
2224 static void
2225 bge_rxeof(struct bge_softc *sc)
2226 {
2227 	struct ifnet *ifp;
2228 	int stdcnt = 0, jumbocnt = 0;
2229 #ifdef ETHER_INPUT_CHAIN
2230 	struct mbuf_chain chain[MAXCPU];
2231 #endif
2232 
2233 	if (sc->bge_rx_saved_considx ==
2234 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx)
2235 		return;
2236 
2237 #ifdef ETHER_INPUT_CHAIN
2238 	ether_input_chain_init(chain);
2239 #endif
2240 
2241 	ifp = &sc->arpcom.ac_if;
2242 
2243 	bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
2244 			sc->bge_cdata.bge_rx_return_ring_map,
2245 			BUS_DMASYNC_POSTREAD);
2246 	bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2247 			sc->bge_cdata.bge_rx_std_ring_map,
2248 			BUS_DMASYNC_POSTREAD);
2249 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
2250 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2251 				sc->bge_cdata.bge_rx_jumbo_ring_map,
2252 				BUS_DMASYNC_POSTREAD);
2253 	}
2254 
2255 	while (sc->bge_rx_saved_considx !=
2256 	       sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) {
2257 		struct bge_rx_bd	*cur_rx;
2258 		uint32_t		rxidx;
2259 		struct mbuf		*m = NULL;
2260 		uint16_t		vlan_tag = 0;
2261 		int			have_tag = 0;
2262 
2263 		cur_rx =
2264 	    &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx];
2265 
2266 		rxidx = cur_rx->bge_idx;
2267 		BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2268 		logif(rx_pkt);
2269 
2270 		if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2271 			have_tag = 1;
2272 			vlan_tag = cur_rx->bge_vlan_tag;
2273 		}
2274 
2275 		if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2276 			BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2277 			m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2278 			sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2279 			jumbocnt++;
2280 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2281 				ifp->if_ierrors++;
2282 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2283 				continue;
2284 			}
2285 			if (bge_newbuf_jumbo(sc,
2286 			    sc->bge_jumbo, NULL) == ENOBUFS) {
2287 				ifp->if_ierrors++;
2288 				bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2289 				continue;
2290 			}
2291 		} else {
2292 			BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2293 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2294 					sc->bge_cdata.bge_rx_std_dmamap[rxidx],
2295 					BUS_DMASYNC_POSTREAD);
2296 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2297 				sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
2298 			m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2299 			sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2300 			stdcnt++;
2301 			if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2302 				ifp->if_ierrors++;
2303 				bge_newbuf_std(sc, sc->bge_std, m);
2304 				continue;
2305 			}
2306 			if (bge_newbuf_std(sc, sc->bge_std,
2307 			    NULL) == ENOBUFS) {
2308 				ifp->if_ierrors++;
2309 				bge_newbuf_std(sc, sc->bge_std, m);
2310 				continue;
2311 			}
2312 		}
2313 
2314 		ifp->if_ipackets++;
2315 #ifndef __i386__
2316 		/*
2317 		 * The i386 allows unaligned accesses, but for other
2318 		 * platforms we must make sure the payload is aligned.
2319 		 */
2320 		if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
2321 			bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2322 			    cur_rx->bge_len);
2323 			m->m_data += ETHER_ALIGN;
2324 		}
2325 #endif
2326 		m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2327 		m->m_pkthdr.rcvif = ifp;
2328 
2329 		if (ifp->if_capenable & IFCAP_RXCSUM) {
2330 			if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
2331 				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2332 				if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2333 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2334 			}
2335 			if ((cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) &&
2336 			    m->m_pkthdr.len >= BGE_MIN_FRAME) {
2337 				m->m_pkthdr.csum_data =
2338 					cur_rx->bge_tcp_udp_csum;
2339 				m->m_pkthdr.csum_flags |=
2340 					CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2341 			}
2342 		}
2343 
2344 		/*
2345 		 * If we received a packet with a vlan tag, pass it
2346 		 * to vlan_input() instead of ether_input().
2347 		 */
2348 		if (have_tag) {
2349 			m->m_flags |= M_VLANTAG;
2350 			m->m_pkthdr.ether_vlantag = vlan_tag;
2351 			have_tag = vlan_tag = 0;
2352 		}
2353 #ifdef ETHER_INPUT_CHAIN
2354 #ifdef ETHER_INPUT2
2355 		ether_input_chain2(ifp, m, chain);
2356 #else
2357 		ether_input_chain(ifp, m, chain);
2358 #endif
2359 #else
2360 		ifp->if_input(ifp, m);
2361 #endif
2362 	}
2363 
2364 #ifdef ETHER_INPUT_CHAIN
2365 	ether_input_dispatch(chain);
2366 #endif
2367 
2368 	if (stdcnt > 0) {
2369 		bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2370 				sc->bge_cdata.bge_rx_std_ring_map,
2371 				BUS_DMASYNC_PREWRITE);
2372 	}
2373 
2374 	if (BGE_IS_JUMBO_CAPABLE(sc) && jumbocnt > 0) {
2375 		bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2376 				sc->bge_cdata.bge_rx_jumbo_ring_map,
2377 				BUS_DMASYNC_PREWRITE);
2378 	}
2379 
2380 	CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2381 	if (stdcnt)
2382 		CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2383 	if (jumbocnt)
2384 		CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2385 }
2386 
2387 static void
2388 bge_txeof(struct bge_softc *sc)
2389 {
2390 	struct bge_tx_bd *cur_tx = NULL;
2391 	struct ifnet *ifp;
2392 
2393 	if (sc->bge_tx_saved_considx ==
2394 	    sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx)
2395 		return;
2396 
2397 	ifp = &sc->arpcom.ac_if;
2398 
2399 	bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
2400 			sc->bge_cdata.bge_tx_ring_map,
2401 			BUS_DMASYNC_POSTREAD);
2402 
2403 	/*
2404 	 * Go through our tx ring and free mbufs for those
2405 	 * frames that have been sent.
2406 	 */
2407 	while (sc->bge_tx_saved_considx !=
2408 	       sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) {
2409 		uint32_t idx = 0;
2410 
2411 		idx = sc->bge_tx_saved_considx;
2412 		cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
2413 		if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2414 			ifp->if_opackets++;
2415 		if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2416 			bus_dmamap_sync(sc->bge_cdata.bge_mtag,
2417 					sc->bge_cdata.bge_tx_dmamap[idx],
2418 					BUS_DMASYNC_POSTWRITE);
2419 			bus_dmamap_unload(sc->bge_cdata.bge_mtag,
2420 			    sc->bge_cdata.bge_tx_dmamap[idx]);
2421 			m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2422 			sc->bge_cdata.bge_tx_chain[idx] = NULL;
2423 		}
2424 		sc->bge_txcnt--;
2425 		BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2426 		logif(tx_pkt);
2427 	}
2428 
2429 	if (cur_tx != NULL &&
2430 	    (BGE_TX_RING_CNT - sc->bge_txcnt) >=
2431 	    (BGE_NSEG_RSVD + BGE_NSEG_SPARE))
2432 		ifp->if_flags &= ~IFF_OACTIVE;
2433 
2434 	if (sc->bge_txcnt == 0)
2435 		ifp->if_timer = 0;
2436 
2437 	if (!ifq_is_empty(&ifp->if_snd))
2438 		if_devstart(ifp);
2439 }
2440 
2441 #ifdef DEVICE_POLLING
2442 
2443 static void
2444 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2445 {
2446 	struct bge_softc *sc = ifp->if_softc;
2447  	uint32_t status;
2448 
2449 	switch(cmd) {
2450 	case POLL_REGISTER:
2451 		bge_disable_intr(sc);
2452 		break;
2453 	case POLL_DEREGISTER:
2454 		bge_enable_intr(sc);
2455 		break;
2456 	case POLL_AND_CHECK_STATUS:
2457 		bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2458 				sc->bge_cdata.bge_status_map,
2459 				BUS_DMASYNC_POSTREAD);
2460 
2461 		/*
2462 		 * Process link state changes.
2463 		 */
2464 		status = CSR_READ_4(sc, BGE_MAC_STS);
2465 		if ((status & sc->bge_link_chg) || sc->bge_link_evt) {
2466 			sc->bge_link_evt = 0;
2467 			sc->bge_link_upd(sc, status);
2468 		}
2469 		/* fall through */
2470 	case POLL_ONLY:
2471 		if (ifp->if_flags & IFF_RUNNING) {
2472 			bge_rxeof(sc);
2473 			bge_txeof(sc);
2474 		}
2475 		break;
2476 	}
2477 }
2478 
2479 #endif
2480 
2481 static void
2482 bge_intr(void *xsc)
2483 {
2484 	struct bge_softc *sc = xsc;
2485 	struct ifnet *ifp = &sc->arpcom.ac_if;
2486 	uint32_t status;
2487 
2488 	logif(intr);
2489 
2490  	/*
2491 	 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO.  Don't
2492 	 * disable interrupts by writing nonzero like we used to, since with
2493 	 * our current organization this just gives complications and
2494 	 * pessimizations for re-enabling interrupts.  We used to have races
2495 	 * instead of the necessary complications.  Disabling interrupts
2496 	 * would just reduce the chance of a status update while we are
2497 	 * running (by switching to the interrupt-mode coalescence
2498 	 * parameters), but this chance is already very low so it is more
2499 	 * efficient to get another interrupt than prevent it.
2500 	 *
2501 	 * We do the ack first to ensure another interrupt if there is a
2502 	 * status update after the ack.  We don't check for the status
2503 	 * changing later because it is more efficient to get another
2504 	 * interrupt than prevent it, not quite as above (not checking is
2505 	 * a smaller optimization than not toggling the interrupt enable,
2506 	 * since checking doesn't involve PCI accesses and toggling require
2507 	 * the status check).  So toggling would probably be a pessimization
2508 	 * even with MSI.  It would only be needed for using a task queue.
2509 	 */
2510 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2511 
2512 	bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2513 			sc->bge_cdata.bge_status_map,
2514 			BUS_DMASYNC_POSTREAD);
2515 
2516 	/*
2517 	 * Process link state changes.
2518 	 */
2519 	status = CSR_READ_4(sc, BGE_MAC_STS);
2520 	if ((status & sc->bge_link_chg) || sc->bge_link_evt) {
2521 		sc->bge_link_evt = 0;
2522 		sc->bge_link_upd(sc, status);
2523 	}
2524 
2525 	if (ifp->if_flags & IFF_RUNNING) {
2526 		/* Check RX return ring producer/consumer */
2527 		bge_rxeof(sc);
2528 
2529 		/* Check TX ring producer/consumer */
2530 		bge_txeof(sc);
2531 	}
2532 
2533 	if (sc->bge_coal_chg)
2534 		bge_coal_change(sc);
2535 }
2536 
2537 static void
2538 bge_tick(void *xsc)
2539 {
2540 	struct bge_softc *sc = xsc;
2541 	struct ifnet *ifp = &sc->arpcom.ac_if;
2542 
2543 	lwkt_serialize_enter(ifp->if_serializer);
2544 
2545 	if (BGE_IS_5705_PLUS(sc))
2546 		bge_stats_update_regs(sc);
2547 	else
2548 		bge_stats_update(sc);
2549 
2550 	if (sc->bge_flags & BGE_FLAG_TBI) {
2551 		/*
2552 		 * Since in TBI mode auto-polling can't be used we should poll
2553 		 * link status manually. Here we register pending link event
2554 		 * and trigger interrupt.
2555 		 */
2556 		sc->bge_link_evt++;
2557 		BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
2558 	} else if (!sc->bge_link) {
2559 		mii_tick(device_get_softc(sc->bge_miibus));
2560 	}
2561 
2562 	callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2563 
2564 	lwkt_serialize_exit(ifp->if_serializer);
2565 }
2566 
2567 static void
2568 bge_stats_update_regs(struct bge_softc *sc)
2569 {
2570 	struct ifnet *ifp = &sc->arpcom.ac_if;
2571 	struct bge_mac_stats_regs stats;
2572 	uint32_t *s;
2573 	int i;
2574 
2575 	s = (uint32_t *)&stats;
2576 	for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2577 		*s = CSR_READ_4(sc, BGE_RX_STATS + i);
2578 		s++;
2579 	}
2580 
2581 	ifp->if_collisions +=
2582 	   (stats.dot3StatsSingleCollisionFrames +
2583 	   stats.dot3StatsMultipleCollisionFrames +
2584 	   stats.dot3StatsExcessiveCollisions +
2585 	   stats.dot3StatsLateCollisions) -
2586 	   ifp->if_collisions;
2587 }
2588 
2589 static void
2590 bge_stats_update(struct bge_softc *sc)
2591 {
2592 	struct ifnet *ifp = &sc->arpcom.ac_if;
2593 	bus_size_t stats;
2594 
2595 	stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2596 
2597 #define READ_STAT(sc, stats, stat)	\
2598 	CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2599 
2600 	ifp->if_collisions +=
2601 	   (READ_STAT(sc, stats,
2602 		txstats.dot3StatsSingleCollisionFrames.bge_addr_lo) +
2603 	    READ_STAT(sc, stats,
2604 		txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo) +
2605 	    READ_STAT(sc, stats,
2606 		txstats.dot3StatsExcessiveCollisions.bge_addr_lo) +
2607 	    READ_STAT(sc, stats,
2608 		txstats.dot3StatsLateCollisions.bge_addr_lo)) -
2609 	   ifp->if_collisions;
2610 
2611 #undef READ_STAT
2612 
2613 #ifdef notdef
2614 	ifp->if_collisions +=
2615 	   (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2616 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2617 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2618 	   sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2619 	   ifp->if_collisions;
2620 #endif
2621 }
2622 
2623 /*
2624  * Encapsulate an mbuf chain in the tx ring  by coupling the mbuf data
2625  * pointers to descriptors.
2626  */
2627 static int
2628 bge_encap(struct bge_softc *sc, struct mbuf **m_head0, uint32_t *txidx)
2629 {
2630 	struct bge_tx_bd *d = NULL;
2631 	uint16_t csum_flags = 0;
2632 	struct bge_dmamap_arg ctx;
2633 	bus_dma_segment_t segs[BGE_NSEG_NEW];
2634 	bus_dmamap_t map;
2635 	int error, maxsegs, idx, i;
2636 	struct mbuf *m_head = *m_head0;
2637 
2638 	if (m_head->m_pkthdr.csum_flags) {
2639 		if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2640 			csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2641 		if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2642 			csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2643 		if (m_head->m_flags & M_LASTFRAG)
2644 			csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2645 		else if (m_head->m_flags & M_FRAG)
2646 			csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2647 	}
2648 
2649 	idx = *txidx;
2650 	map = sc->bge_cdata.bge_tx_dmamap[idx];
2651 
2652 	maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - BGE_NSEG_RSVD;
2653 	KASSERT(maxsegs >= BGE_NSEG_SPARE,
2654 		("not enough segments %d\n", maxsegs));
2655 
2656 	if (maxsegs > BGE_NSEG_NEW)
2657 		maxsegs = BGE_NSEG_NEW;
2658 
2659 	/*
2660 	 * Pad outbound frame to BGE_MIN_FRAME for an unusual reason.
2661 	 * The bge hardware will pad out Tx runts to BGE_MIN_FRAME,
2662 	 * but when such padded frames employ the bge IP/TCP checksum
2663 	 * offload, the hardware checksum assist gives incorrect results
2664 	 * (possibly from incorporating its own padding into the UDP/TCP
2665 	 * checksum; who knows).  If we pad such runts with zeros, the
2666 	 * onboard checksum comes out correct.  We do this by pretending
2667 	 * the mbuf chain has too many fragments so the coalescing code
2668 	 * below can assemble the packet into a single buffer that's
2669 	 * padded out to the mininum frame size.
2670 	 */
2671 	if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2672 	    m_head->m_pkthdr.len < BGE_MIN_FRAME) {
2673 		error = EFBIG;
2674 	} else {
2675 		ctx.bge_segs = segs;
2676 		ctx.bge_maxsegs = maxsegs;
2677 		error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map,
2678 					     m_head, bge_dma_map_mbuf, &ctx,
2679 					     BUS_DMA_NOWAIT);
2680 	}
2681 	if (error == EFBIG || ctx.bge_maxsegs == 0) {
2682 		struct mbuf *m_new;
2683 
2684 		m_new = m_defrag(m_head, MB_DONTWAIT);
2685 		if (m_new == NULL) {
2686 			if_printf(&sc->arpcom.ac_if,
2687 				  "could not defrag TX mbuf\n");
2688 			error = ENOBUFS;
2689 			goto back;
2690 		} else {
2691 			m_head = m_new;
2692 			*m_head0 = m_head;
2693 		}
2694 
2695 		/*
2696 		 * Manually pad short frames, and zero the pad space
2697 		 * to avoid leaking data.
2698 		 */
2699 		if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) &&
2700 		    m_head->m_pkthdr.len < BGE_MIN_FRAME) {
2701 			int pad_len = BGE_MIN_FRAME - m_head->m_pkthdr.len;
2702 
2703 			bzero(mtod(m_head, char *) + m_head->m_pkthdr.len,
2704 			      pad_len);
2705 			m_head->m_pkthdr.len += pad_len;
2706 			m_head->m_len = m_head->m_pkthdr.len;
2707 		}
2708 
2709 		ctx.bge_segs = segs;
2710 		ctx.bge_maxsegs = maxsegs;
2711 		error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map,
2712 					     m_head, bge_dma_map_mbuf, &ctx,
2713 					     BUS_DMA_NOWAIT);
2714 		if (error || ctx.bge_maxsegs == 0) {
2715 			if_printf(&sc->arpcom.ac_if,
2716 				  "could not defrag TX mbuf\n");
2717 			if (error == 0)
2718 				error = EFBIG;
2719 			goto back;
2720 		}
2721 	} else if (error) {
2722 		if_printf(&sc->arpcom.ac_if, "could not map TX mbuf\n");
2723 		goto back;
2724 	}
2725 
2726 	bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
2727 
2728 	for (i = 0; ; i++) {
2729 		d = &sc->bge_ldata.bge_tx_ring[idx];
2730 
2731 		d->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_segs[i].ds_addr);
2732 		d->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_segs[i].ds_addr);
2733 		d->bge_len = segs[i].ds_len;
2734 		d->bge_flags = csum_flags;
2735 
2736 		if (i == ctx.bge_maxsegs - 1)
2737 			break;
2738 		BGE_INC(idx, BGE_TX_RING_CNT);
2739 	}
2740 	/* Mark the last segment as end of packet... */
2741 	d->bge_flags |= BGE_TXBDFLAG_END;
2742 
2743 	/* Set vlan tag to the first segment of the packet. */
2744 	d = &sc->bge_ldata.bge_tx_ring[*txidx];
2745 	if (m_head->m_flags & M_VLANTAG) {
2746 		d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2747 		d->bge_vlan_tag = m_head->m_pkthdr.ether_vlantag;
2748 	} else {
2749 		d->bge_vlan_tag = 0;
2750 	}
2751 
2752 	/*
2753 	 * Insure that the map for this transmission is placed at
2754 	 * the array index of the last descriptor in this chain.
2755 	 */
2756 	sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
2757 	sc->bge_cdata.bge_tx_dmamap[idx] = map;
2758 	sc->bge_cdata.bge_tx_chain[idx] = m_head;
2759 	sc->bge_txcnt += ctx.bge_maxsegs;
2760 
2761 	BGE_INC(idx, BGE_TX_RING_CNT);
2762 	*txidx = idx;
2763 back:
2764 	if (error) {
2765 		m_freem(m_head);
2766 		*m_head0 = NULL;
2767 	}
2768 	return error;
2769 }
2770 
2771 /*
2772  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2773  * to the mbuf data regions directly in the transmit descriptors.
2774  */
2775 static void
2776 bge_start(struct ifnet *ifp)
2777 {
2778 	struct bge_softc *sc = ifp->if_softc;
2779 	struct mbuf *m_head = NULL;
2780 	uint32_t prodidx;
2781 	int need_trans;
2782 
2783 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
2784 		return;
2785 
2786 	prodidx = sc->bge_tx_prodidx;
2787 
2788 	need_trans = 0;
2789 	while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2790 		m_head = ifq_dequeue(&ifp->if_snd, NULL);
2791 		if (m_head == NULL)
2792 			break;
2793 
2794 		/*
2795 		 * XXX
2796 		 * The code inside the if() block is never reached since we
2797 		 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting
2798 		 * requests to checksum TCP/UDP in a fragmented packet.
2799 		 *
2800 		 * XXX
2801 		 * safety overkill.  If this is a fragmented packet chain
2802 		 * with delayed TCP/UDP checksums, then only encapsulate
2803 		 * it if we have enough descriptors to handle the entire
2804 		 * chain at once.
2805 		 * (paranoia -- may not actually be needed)
2806 		 */
2807 		if ((m_head->m_flags & M_FIRSTFRAG) &&
2808 		    (m_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA)) {
2809 			if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2810 			    m_head->m_pkthdr.csum_data + BGE_NSEG_RSVD) {
2811 				ifp->if_flags |= IFF_OACTIVE;
2812 				ifq_prepend(&ifp->if_snd, m_head);
2813 				break;
2814 			}
2815 		}
2816 
2817 		/*
2818 		 * Sanity check: avoid coming within BGE_NSEG_RSVD
2819 		 * descriptors of the end of the ring.  Also make
2820 		 * sure there are BGE_NSEG_SPARE descriptors for
2821 		 * jumbo buffers' defragmentation.
2822 		 */
2823 		if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2824 		    (BGE_NSEG_RSVD + BGE_NSEG_SPARE)) {
2825 			ifp->if_flags |= IFF_OACTIVE;
2826 			ifq_prepend(&ifp->if_snd, m_head);
2827 			break;
2828 		}
2829 
2830 		/*
2831 		 * Pack the data into the transmit ring. If we
2832 		 * don't have room, set the OACTIVE flag and wait
2833 		 * for the NIC to drain the ring.
2834 		 */
2835 		if (bge_encap(sc, &m_head, &prodidx)) {
2836 			ifp->if_flags |= IFF_OACTIVE;
2837 			break;
2838 		}
2839 		need_trans = 1;
2840 
2841 		ETHER_BPF_MTAP(ifp, m_head);
2842 	}
2843 
2844 	if (!need_trans)
2845 		return;
2846 
2847 	/* Transmit */
2848 	CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2849 	/* 5700 b2 errata */
2850 	if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
2851 		CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2852 
2853 	sc->bge_tx_prodidx = prodidx;
2854 
2855 	/*
2856 	 * Set a timeout in case the chip goes out to lunch.
2857 	 */
2858 	ifp->if_timer = 5;
2859 }
2860 
2861 static void
2862 bge_init(void *xsc)
2863 {
2864 	struct bge_softc *sc = xsc;
2865 	struct ifnet *ifp = &sc->arpcom.ac_if;
2866 	uint16_t *m;
2867 
2868 	ASSERT_SERIALIZED(ifp->if_serializer);
2869 
2870 	if (ifp->if_flags & IFF_RUNNING)
2871 		return;
2872 
2873 	/* Cancel pending I/O and flush buffers. */
2874 	bge_stop(sc);
2875 	bge_reset(sc);
2876 	bge_chipinit(sc);
2877 
2878 	/*
2879 	 * Init the various state machines, ring
2880 	 * control blocks and firmware.
2881 	 */
2882 	if (bge_blockinit(sc)) {
2883 		if_printf(ifp, "initialization failure\n");
2884 		return;
2885 	}
2886 
2887 	/* Specify MTU. */
2888 	CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2889 	    ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN);
2890 
2891 	/* Load our MAC address. */
2892 	m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
2893 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2894 	CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2895 
2896 	/* Enable or disable promiscuous mode as needed. */
2897 	bge_setpromisc(sc);
2898 
2899 	/* Program multicast filter. */
2900 	bge_setmulti(sc);
2901 
2902 	/* Init RX ring. */
2903 	bge_init_rx_ring_std(sc);
2904 
2905 	/*
2906 	 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
2907 	 * memory to insure that the chip has in fact read the first
2908 	 * entry of the ring.
2909 	 */
2910 	if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
2911 		uint32_t		v, i;
2912 		for (i = 0; i < 10; i++) {
2913 			DELAY(20);
2914 			v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
2915 			if (v == (MCLBYTES - ETHER_ALIGN))
2916 				break;
2917 		}
2918 		if (i == 10)
2919 			if_printf(ifp, "5705 A0 chip failed to load RX ring\n");
2920 	}
2921 
2922 	/* Init jumbo RX ring. */
2923 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2924 		bge_init_rx_ring_jumbo(sc);
2925 
2926 	/* Init our RX return ring index */
2927 	sc->bge_rx_saved_considx = 0;
2928 
2929 	/* Init TX ring. */
2930 	bge_init_tx_ring(sc);
2931 
2932 	/* Turn on transmitter */
2933 	BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
2934 
2935 	/* Turn on receiver */
2936 	BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2937 
2938 	/* Tell firmware we're alive. */
2939 	BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2940 
2941 	/* Enable host interrupts if polling(4) is not enabled. */
2942 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
2943 #ifdef DEVICE_POLLING
2944 	if (ifp->if_flags & IFF_POLLING)
2945 		bge_disable_intr(sc);
2946 	else
2947 #endif
2948 	bge_enable_intr(sc);
2949 
2950 	bge_ifmedia_upd(ifp);
2951 
2952 	ifp->if_flags |= IFF_RUNNING;
2953 	ifp->if_flags &= ~IFF_OACTIVE;
2954 
2955 	callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2956 }
2957 
2958 /*
2959  * Set media options.
2960  */
2961 static int
2962 bge_ifmedia_upd(struct ifnet *ifp)
2963 {
2964 	struct bge_softc *sc = ifp->if_softc;
2965 
2966 	/* If this is a 1000baseX NIC, enable the TBI port. */
2967 	if (sc->bge_flags & BGE_FLAG_TBI) {
2968 		struct ifmedia *ifm = &sc->bge_ifmedia;
2969 
2970 		if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2971 			return(EINVAL);
2972 
2973 		switch(IFM_SUBTYPE(ifm->ifm_media)) {
2974 		case IFM_AUTO:
2975 			/*
2976 			 * The BCM5704 ASIC appears to have a special
2977 			 * mechanism for programming the autoneg
2978 			 * advertisement registers in TBI mode.
2979 			 */
2980 			if (!bge_fake_autoneg &&
2981 			    sc->bge_asicrev == BGE_ASICREV_BCM5704) {
2982 				uint32_t sgdig;
2983 
2984 				CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
2985 				sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
2986 				sgdig |= BGE_SGDIGCFG_AUTO |
2987 					 BGE_SGDIGCFG_PAUSE_CAP |
2988 					 BGE_SGDIGCFG_ASYM_PAUSE;
2989 				CSR_WRITE_4(sc, BGE_SGDIG_CFG,
2990 					    sgdig | BGE_SGDIGCFG_SEND);
2991 				DELAY(5);
2992 				CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
2993 			}
2994 			break;
2995 		case IFM_1000_SX:
2996 			if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
2997 				BGE_CLRBIT(sc, BGE_MAC_MODE,
2998 				    BGE_MACMODE_HALF_DUPLEX);
2999 			} else {
3000 				BGE_SETBIT(sc, BGE_MAC_MODE,
3001 				    BGE_MACMODE_HALF_DUPLEX);
3002 			}
3003 			break;
3004 		default:
3005 			return(EINVAL);
3006 		}
3007 	} else {
3008 		struct mii_data *mii = device_get_softc(sc->bge_miibus);
3009 
3010 		sc->bge_link_evt++;
3011 		sc->bge_link = 0;
3012 		if (mii->mii_instance) {
3013 			struct mii_softc *miisc;
3014 
3015 			LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3016 				mii_phy_reset(miisc);
3017 		}
3018 		mii_mediachg(mii);
3019 	}
3020 	return(0);
3021 }
3022 
3023 /*
3024  * Report current media status.
3025  */
3026 static void
3027 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3028 {
3029 	struct bge_softc *sc = ifp->if_softc;
3030 
3031 	if (sc->bge_flags & BGE_FLAG_TBI) {
3032 		ifmr->ifm_status = IFM_AVALID;
3033 		ifmr->ifm_active = IFM_ETHER;
3034 		if (CSR_READ_4(sc, BGE_MAC_STS) &
3035 		    BGE_MACSTAT_TBI_PCS_SYNCHED) {
3036 			ifmr->ifm_status |= IFM_ACTIVE;
3037 		} else {
3038 			ifmr->ifm_active |= IFM_NONE;
3039 			return;
3040 		}
3041 
3042 		ifmr->ifm_active |= IFM_1000_SX;
3043 		if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3044 			ifmr->ifm_active |= IFM_HDX;
3045 		else
3046 			ifmr->ifm_active |= IFM_FDX;
3047 	} else {
3048 		struct mii_data *mii = device_get_softc(sc->bge_miibus);
3049 
3050 		mii_pollstat(mii);
3051 		ifmr->ifm_active = mii->mii_media_active;
3052 		ifmr->ifm_status = mii->mii_media_status;
3053 	}
3054 }
3055 
3056 static int
3057 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
3058 {
3059 	struct bge_softc *sc = ifp->if_softc;
3060 	struct ifreq *ifr = (struct ifreq *)data;
3061 	int mask, error = 0;
3062 
3063 	ASSERT_SERIALIZED(ifp->if_serializer);
3064 
3065 	switch (command) {
3066 	case SIOCSIFMTU:
3067 		if ((!BGE_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) ||
3068 		    (BGE_IS_JUMBO_CAPABLE(sc) &&
3069 		     ifr->ifr_mtu > BGE_JUMBO_MTU)) {
3070 			error = EINVAL;
3071 		} else if (ifp->if_mtu != ifr->ifr_mtu) {
3072 			ifp->if_mtu = ifr->ifr_mtu;
3073 			ifp->if_flags &= ~IFF_RUNNING;
3074 			bge_init(sc);
3075 		}
3076 		break;
3077 	case SIOCSIFFLAGS:
3078 		if (ifp->if_flags & IFF_UP) {
3079 			if (ifp->if_flags & IFF_RUNNING) {
3080 				mask = ifp->if_flags ^ sc->bge_if_flags;
3081 
3082 				/*
3083 				 * If only the state of the PROMISC flag
3084 				 * changed, then just use the 'set promisc
3085 				 * mode' command instead of reinitializing
3086 				 * the entire NIC. Doing a full re-init
3087 				 * means reloading the firmware and waiting
3088 				 * for it to start up, which may take a
3089 				 * second or two.  Similarly for ALLMULTI.
3090 				 */
3091 				if (mask & IFF_PROMISC)
3092 					bge_setpromisc(sc);
3093 				if (mask & IFF_ALLMULTI)
3094 					bge_setmulti(sc);
3095 			} else {
3096 				bge_init(sc);
3097 			}
3098 		} else {
3099 			if (ifp->if_flags & IFF_RUNNING)
3100 				bge_stop(sc);
3101 		}
3102 		sc->bge_if_flags = ifp->if_flags;
3103 		break;
3104 	case SIOCADDMULTI:
3105 	case SIOCDELMULTI:
3106 		if (ifp->if_flags & IFF_RUNNING)
3107 			bge_setmulti(sc);
3108 		break;
3109 	case SIOCSIFMEDIA:
3110 	case SIOCGIFMEDIA:
3111 		if (sc->bge_flags & BGE_FLAG_TBI) {
3112 			error = ifmedia_ioctl(ifp, ifr,
3113 			    &sc->bge_ifmedia, command);
3114 		} else {
3115 			struct mii_data *mii;
3116 
3117 			mii = device_get_softc(sc->bge_miibus);
3118 			error = ifmedia_ioctl(ifp, ifr,
3119 					      &mii->mii_media, command);
3120 		}
3121 		break;
3122         case SIOCSIFCAP:
3123 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3124 		if (mask & IFCAP_HWCSUM) {
3125 			ifp->if_capenable ^= IFCAP_HWCSUM;
3126 			if (IFCAP_HWCSUM & ifp->if_capenable)
3127 				ifp->if_hwassist = BGE_CSUM_FEATURES;
3128 			else
3129 				ifp->if_hwassist = 0;
3130 		}
3131 		break;
3132 	default:
3133 		error = ether_ioctl(ifp, command, data);
3134 		break;
3135 	}
3136 	return error;
3137 }
3138 
3139 static void
3140 bge_watchdog(struct ifnet *ifp)
3141 {
3142 	struct bge_softc *sc = ifp->if_softc;
3143 
3144 	if_printf(ifp, "watchdog timeout -- resetting\n");
3145 
3146 	ifp->if_flags &= ~IFF_RUNNING;
3147 	bge_init(sc);
3148 
3149 	ifp->if_oerrors++;
3150 
3151 	if (!ifq_is_empty(&ifp->if_snd))
3152 		if_devstart(ifp);
3153 }
3154 
3155 /*
3156  * Stop the adapter and free any mbufs allocated to the
3157  * RX and TX lists.
3158  */
3159 static void
3160 bge_stop(struct bge_softc *sc)
3161 {
3162 	struct ifnet *ifp = &sc->arpcom.ac_if;
3163 	struct ifmedia_entry *ifm;
3164 	struct mii_data *mii = NULL;
3165 	int mtmp, itmp;
3166 
3167 	ASSERT_SERIALIZED(ifp->if_serializer);
3168 
3169 	if ((sc->bge_flags & BGE_FLAG_TBI) == 0)
3170 		mii = device_get_softc(sc->bge_miibus);
3171 
3172 	callout_stop(&sc->bge_stat_timer);
3173 
3174 	/*
3175 	 * Disable all of the receiver blocks
3176 	 */
3177 	BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3178 	BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3179 	BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3180 	if (!BGE_IS_5705_PLUS(sc))
3181 		BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3182 	BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3183 	BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3184 	BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3185 
3186 	/*
3187 	 * Disable all of the transmit blocks
3188 	 */
3189 	BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3190 	BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3191 	BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3192 	BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3193 	BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3194 	if (!BGE_IS_5705_PLUS(sc))
3195 		BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3196 	BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3197 
3198 	/*
3199 	 * Shut down all of the memory managers and related
3200 	 * state machines.
3201 	 */
3202 	BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3203 	BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3204 	if (!BGE_IS_5705_PLUS(sc))
3205 		BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3206 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3207 	CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3208 	if (!BGE_IS_5705_PLUS(sc)) {
3209 		BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3210 		BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3211 	}
3212 
3213 	/* Disable host interrupts. */
3214 	bge_disable_intr(sc);
3215 
3216 	/*
3217 	 * Tell firmware we're shutting down.
3218 	 */
3219 	BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3220 
3221 	/* Free the RX lists. */
3222 	bge_free_rx_ring_std(sc);
3223 
3224 	/* Free jumbo RX list. */
3225 	if (BGE_IS_JUMBO_CAPABLE(sc))
3226 		bge_free_rx_ring_jumbo(sc);
3227 
3228 	/* Free TX buffers. */
3229 	bge_free_tx_ring(sc);
3230 
3231 	/*
3232 	 * Isolate/power down the PHY, but leave the media selection
3233 	 * unchanged so that things will be put back to normal when
3234 	 * we bring the interface back up.
3235 	 *
3236 	 * 'mii' may be NULL in the following cases:
3237 	 * - The device uses TBI.
3238 	 * - bge_stop() is called by bge_detach().
3239 	 */
3240 	if (mii != NULL) {
3241 		itmp = ifp->if_flags;
3242 		ifp->if_flags |= IFF_UP;
3243 		ifm = mii->mii_media.ifm_cur;
3244 		mtmp = ifm->ifm_media;
3245 		ifm->ifm_media = IFM_ETHER|IFM_NONE;
3246 		mii_mediachg(mii);
3247 		ifm->ifm_media = mtmp;
3248 		ifp->if_flags = itmp;
3249 	}
3250 
3251 	sc->bge_link = 0;
3252 	sc->bge_coal_chg = 0;
3253 
3254 	sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3255 
3256 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3257 	ifp->if_timer = 0;
3258 }
3259 
3260 /*
3261  * Stop all chip I/O so that the kernel's probe routines don't
3262  * get confused by errant DMAs when rebooting.
3263  */
3264 static void
3265 bge_shutdown(device_t dev)
3266 {
3267 	struct bge_softc *sc = device_get_softc(dev);
3268 	struct ifnet *ifp = &sc->arpcom.ac_if;
3269 
3270 	lwkt_serialize_enter(ifp->if_serializer);
3271 	bge_stop(sc);
3272 	bge_reset(sc);
3273 	lwkt_serialize_exit(ifp->if_serializer);
3274 }
3275 
3276 static int
3277 bge_suspend(device_t dev)
3278 {
3279 	struct bge_softc *sc = device_get_softc(dev);
3280 	struct ifnet *ifp = &sc->arpcom.ac_if;
3281 
3282 	lwkt_serialize_enter(ifp->if_serializer);
3283 	bge_stop(sc);
3284 	lwkt_serialize_exit(ifp->if_serializer);
3285 
3286 	return 0;
3287 }
3288 
3289 static int
3290 bge_resume(device_t dev)
3291 {
3292 	struct bge_softc *sc = device_get_softc(dev);
3293 	struct ifnet *ifp = &sc->arpcom.ac_if;
3294 
3295 	lwkt_serialize_enter(ifp->if_serializer);
3296 
3297 	if (ifp->if_flags & IFF_UP) {
3298 		bge_init(sc);
3299 
3300 		if (!ifq_is_empty(&ifp->if_snd))
3301 			if_devstart(ifp);
3302 	}
3303 
3304 	lwkt_serialize_exit(ifp->if_serializer);
3305 
3306 	return 0;
3307 }
3308 
3309 static void
3310 bge_setpromisc(struct bge_softc *sc)
3311 {
3312 	struct ifnet *ifp = &sc->arpcom.ac_if;
3313 
3314 	if (ifp->if_flags & IFF_PROMISC)
3315 		BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3316 	else
3317 		BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3318 }
3319 
3320 static void
3321 bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
3322 {
3323 	struct bge_dmamap_arg *ctx = arg;
3324 
3325 	if (error)
3326 		return;
3327 
3328 	KASSERT(nsegs == 1 && ctx->bge_maxsegs == 1,
3329 		("only one segment is allowed\n"));
3330 
3331 	ctx->bge_segs[0] = *segs;
3332 }
3333 
3334 static void
3335 bge_dma_map_mbuf(void *arg, bus_dma_segment_t *segs, int nsegs,
3336 		 bus_size_t mapsz __unused, int error)
3337 {
3338 	struct bge_dmamap_arg *ctx = arg;
3339 	int i;
3340 
3341 	if (error)
3342 		return;
3343 
3344 	if (nsegs > ctx->bge_maxsegs) {
3345 		ctx->bge_maxsegs = 0;
3346 		return;
3347 	}
3348 
3349 	ctx->bge_maxsegs = nsegs;
3350 	for (i = 0; i < nsegs; ++i)
3351 		ctx->bge_segs[i] = segs[i];
3352 }
3353 
3354 static void
3355 bge_dma_free(struct bge_softc *sc)
3356 {
3357 	int i;
3358 
3359 	/* Destroy RX/TX mbuf DMA stuffs. */
3360 	if (sc->bge_cdata.bge_mtag != NULL) {
3361 		for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3362 			if (sc->bge_cdata.bge_rx_std_dmamap[i]) {
3363 				bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3364 				    sc->bge_cdata.bge_rx_std_dmamap[i]);
3365 			}
3366 		}
3367 
3368 		for (i = 0; i < BGE_TX_RING_CNT; i++) {
3369 			if (sc->bge_cdata.bge_tx_dmamap[i]) {
3370 				bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3371 				    sc->bge_cdata.bge_tx_dmamap[i]);
3372 			}
3373 		}
3374 		bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
3375 	}
3376 
3377 	/* Destroy standard RX ring */
3378 	bge_dma_block_free(sc->bge_cdata.bge_rx_std_ring_tag,
3379 			   sc->bge_cdata.bge_rx_std_ring_map,
3380 			   sc->bge_ldata.bge_rx_std_ring);
3381 
3382 	if (BGE_IS_JUMBO_CAPABLE(sc))
3383 		bge_free_jumbo_mem(sc);
3384 
3385 	/* Destroy RX return ring */
3386 	bge_dma_block_free(sc->bge_cdata.bge_rx_return_ring_tag,
3387 			   sc->bge_cdata.bge_rx_return_ring_map,
3388 			   sc->bge_ldata.bge_rx_return_ring);
3389 
3390 	/* Destroy TX ring */
3391 	bge_dma_block_free(sc->bge_cdata.bge_tx_ring_tag,
3392 			   sc->bge_cdata.bge_tx_ring_map,
3393 			   sc->bge_ldata.bge_tx_ring);
3394 
3395 	/* Destroy status block */
3396 	bge_dma_block_free(sc->bge_cdata.bge_status_tag,
3397 			   sc->bge_cdata.bge_status_map,
3398 			   sc->bge_ldata.bge_status_block);
3399 
3400 	/* Destroy statistics block */
3401 	bge_dma_block_free(sc->bge_cdata.bge_stats_tag,
3402 			   sc->bge_cdata.bge_stats_map,
3403 			   sc->bge_ldata.bge_stats);
3404 
3405 	/* Destroy the parent tag */
3406 	if (sc->bge_cdata.bge_parent_tag != NULL)
3407 		bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
3408 }
3409 
3410 static int
3411 bge_dma_alloc(struct bge_softc *sc)
3412 {
3413 	struct ifnet *ifp = &sc->arpcom.ac_if;
3414 	int nseg, i, error;
3415 
3416 	/*
3417 	 * Allocate the parent bus DMA tag appropriate for PCI.
3418 	 */
3419 	error = bus_dma_tag_create(NULL, 1, 0,
3420 				   BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3421 				   NULL, NULL,
3422 				   MAXBSIZE, BGE_NSEG_NEW,
3423 				   BUS_SPACE_MAXSIZE_32BIT,
3424 				   0, &sc->bge_cdata.bge_parent_tag);
3425 	if (error) {
3426 		if_printf(ifp, "could not allocate parent dma tag\n");
3427 		return error;
3428 	}
3429 
3430 	/*
3431 	 * Create DMA tag for mbufs.
3432 	 */
3433 	nseg = BGE_NSEG_NEW;
3434 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
3435 				   BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3436 				   NULL, NULL,
3437 				   MCLBYTES * nseg, nseg, MCLBYTES,
3438 				   BUS_DMA_ALLOCNOW, &sc->bge_cdata.bge_mtag);
3439 	if (error) {
3440 		if_printf(ifp, "could not allocate mbuf dma tag\n");
3441 		return error;
3442 	}
3443 
3444 	/*
3445 	 * Create DMA maps for TX/RX mbufs.
3446 	 */
3447 	for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3448 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
3449 					  &sc->bge_cdata.bge_rx_std_dmamap[i]);
3450 		if (error) {
3451 			int j;
3452 
3453 			for (j = 0; j < i; ++j) {
3454 				bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3455 					sc->bge_cdata.bge_rx_std_dmamap[j]);
3456 			}
3457 			bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
3458 			sc->bge_cdata.bge_mtag = NULL;
3459 
3460 			if_printf(ifp, "could not create DMA map for RX\n");
3461 			return error;
3462 		}
3463 	}
3464 
3465 	for (i = 0; i < BGE_TX_RING_CNT; i++) {
3466 		error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
3467 					  &sc->bge_cdata.bge_tx_dmamap[i]);
3468 		if (error) {
3469 			int j;
3470 
3471 			for (j = 0; j < BGE_STD_RX_RING_CNT; ++j) {
3472 				bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3473 					sc->bge_cdata.bge_rx_std_dmamap[j]);
3474 			}
3475 			for (j = 0; j < i; ++j) {
3476 				bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
3477 					sc->bge_cdata.bge_tx_dmamap[j]);
3478 			}
3479 			bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
3480 			sc->bge_cdata.bge_mtag = NULL;
3481 
3482 			if_printf(ifp, "could not create DMA map for TX\n");
3483 			return error;
3484 		}
3485 	}
3486 
3487 	/*
3488 	 * Create DMA stuffs for standard RX ring.
3489 	 */
3490 	error = bge_dma_block_alloc(sc, BGE_STD_RX_RING_SZ,
3491 				    &sc->bge_cdata.bge_rx_std_ring_tag,
3492 				    &sc->bge_cdata.bge_rx_std_ring_map,
3493 				    (void **)&sc->bge_ldata.bge_rx_std_ring,
3494 				    &sc->bge_ldata.bge_rx_std_ring_paddr);
3495 	if (error) {
3496 		if_printf(ifp, "could not create std RX ring\n");
3497 		return error;
3498 	}
3499 
3500 	/*
3501 	 * Create jumbo buffer pool.
3502 	 */
3503 	if (BGE_IS_JUMBO_CAPABLE(sc)) {
3504 		error = bge_alloc_jumbo_mem(sc);
3505 		if (error) {
3506 			if_printf(ifp, "could not create jumbo buffer pool\n");
3507 			return error;
3508 		}
3509 	}
3510 
3511 	/*
3512 	 * Create DMA stuffs for RX return ring.
3513 	 */
3514 	error = bge_dma_block_alloc(sc, BGE_RX_RTN_RING_SZ(sc),
3515 				    &sc->bge_cdata.bge_rx_return_ring_tag,
3516 				    &sc->bge_cdata.bge_rx_return_ring_map,
3517 				    (void **)&sc->bge_ldata.bge_rx_return_ring,
3518 				    &sc->bge_ldata.bge_rx_return_ring_paddr);
3519 	if (error) {
3520 		if_printf(ifp, "could not create RX ret ring\n");
3521 		return error;
3522 	}
3523 
3524 	/*
3525 	 * Create DMA stuffs for TX ring.
3526 	 */
3527 	error = bge_dma_block_alloc(sc, BGE_TX_RING_SZ,
3528 				    &sc->bge_cdata.bge_tx_ring_tag,
3529 				    &sc->bge_cdata.bge_tx_ring_map,
3530 				    (void **)&sc->bge_ldata.bge_tx_ring,
3531 				    &sc->bge_ldata.bge_tx_ring_paddr);
3532 	if (error) {
3533 		if_printf(ifp, "could not create TX ring\n");
3534 		return error;
3535 	}
3536 
3537 	/*
3538 	 * Create DMA stuffs for status block.
3539 	 */
3540 	error = bge_dma_block_alloc(sc, BGE_STATUS_BLK_SZ,
3541 				    &sc->bge_cdata.bge_status_tag,
3542 				    &sc->bge_cdata.bge_status_map,
3543 				    (void **)&sc->bge_ldata.bge_status_block,
3544 				    &sc->bge_ldata.bge_status_block_paddr);
3545 	if (error) {
3546 		if_printf(ifp, "could not create status block\n");
3547 		return error;
3548 	}
3549 
3550 	/*
3551 	 * Create DMA stuffs for statistics block.
3552 	 */
3553 	error = bge_dma_block_alloc(sc, BGE_STATS_SZ,
3554 				    &sc->bge_cdata.bge_stats_tag,
3555 				    &sc->bge_cdata.bge_stats_map,
3556 				    (void **)&sc->bge_ldata.bge_stats,
3557 				    &sc->bge_ldata.bge_stats_paddr);
3558 	if (error) {
3559 		if_printf(ifp, "could not create stats block\n");
3560 		return error;
3561 	}
3562 	return 0;
3563 }
3564 
3565 static int
3566 bge_dma_block_alloc(struct bge_softc *sc, bus_size_t size, bus_dma_tag_t *tag,
3567 		    bus_dmamap_t *map, void **addr, bus_addr_t *paddr)
3568 {
3569 	struct ifnet *ifp = &sc->arpcom.ac_if;
3570 	struct bge_dmamap_arg ctx;
3571 	bus_dma_segment_t seg;
3572 	int error;
3573 
3574 	/*
3575 	 * Create DMA tag
3576 	 */
3577 	error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0,
3578 				   BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3579 				   NULL, NULL, size, 1, size, 0, tag);
3580 	if (error) {
3581 		if_printf(ifp, "could not allocate dma tag\n");
3582 		return error;
3583 	}
3584 
3585 	/*
3586 	 * Allocate DMA'able memory
3587 	 */
3588 	error = bus_dmamem_alloc(*tag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO,
3589 				 map);
3590         if (error) {
3591 		if_printf(ifp, "could not allocate dma memory\n");
3592 		bus_dma_tag_destroy(*tag);
3593 		*tag = NULL;
3594                 return error;
3595 	}
3596 
3597 	/*
3598 	 * Load the DMA'able memory
3599 	 */
3600 	ctx.bge_maxsegs = 1;
3601 	ctx.bge_segs = &seg;
3602 	error = bus_dmamap_load(*tag, *map, *addr, size, bge_dma_map_addr, &ctx,
3603 				BUS_DMA_WAITOK);
3604 	if (error) {
3605 		if_printf(ifp, "could not load dma memory\n");
3606 		bus_dmamem_free(*tag, *addr, *map);
3607 		bus_dma_tag_destroy(*tag);
3608 		*tag = NULL;
3609 		return error;
3610 	}
3611 	*paddr = ctx.bge_segs[0].ds_addr;
3612 
3613 	return 0;
3614 }
3615 
3616 static void
3617 bge_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr)
3618 {
3619 	if (tag != NULL) {
3620 		bus_dmamap_unload(tag, map);
3621 		bus_dmamem_free(tag, addr, map);
3622 		bus_dma_tag_destroy(tag);
3623 	}
3624 }
3625 
3626 /*
3627  * Grrr. The link status word in the status block does
3628  * not work correctly on the BCM5700 rev AX and BX chips,
3629  * according to all available information. Hence, we have
3630  * to enable MII interrupts in order to properly obtain
3631  * async link changes. Unfortunately, this also means that
3632  * we have to read the MAC status register to detect link
3633  * changes, thereby adding an additional register access to
3634  * the interrupt handler.
3635  *
3636  * XXX: perhaps link state detection procedure used for
3637  * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
3638  */
3639 static void
3640 bge_bcm5700_link_upd(struct bge_softc *sc, uint32_t status __unused)
3641 {
3642 	struct ifnet *ifp = &sc->arpcom.ac_if;
3643 	struct mii_data *mii = device_get_softc(sc->bge_miibus);
3644 
3645 	mii_pollstat(mii);
3646 
3647 	if (!sc->bge_link &&
3648 	    (mii->mii_media_status & IFM_ACTIVE) &&
3649 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3650 		sc->bge_link++;
3651 		if (bootverbose)
3652 			if_printf(ifp, "link UP\n");
3653 	} else if (sc->bge_link &&
3654 	    (!(mii->mii_media_status & IFM_ACTIVE) ||
3655 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3656 		sc->bge_link = 0;
3657 		if (bootverbose)
3658 			if_printf(ifp, "link DOWN\n");
3659 	}
3660 
3661 	/* Clear the interrupt. */
3662 	CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT);
3663 	bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
3664 	bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, BRGPHY_INTRS);
3665 }
3666 
3667 static void
3668 bge_tbi_link_upd(struct bge_softc *sc, uint32_t status)
3669 {
3670 	struct ifnet *ifp = &sc->arpcom.ac_if;
3671 
3672 #define PCS_ENCODE_ERR	(BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE)
3673 
3674 	/*
3675 	 * Sometimes PCS encoding errors are detected in
3676 	 * TBI mode (on fiber NICs), and for some reason
3677 	 * the chip will signal them as link changes.
3678 	 * If we get a link change event, but the 'PCS
3679 	 * encoding error' bit in the MAC status register
3680 	 * is set, don't bother doing a link check.
3681 	 * This avoids spurious "gigabit link up" messages
3682 	 * that sometimes appear on fiber NICs during
3683 	 * periods of heavy traffic.
3684 	 */
3685 	if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3686 		if (!sc->bge_link) {
3687 			sc->bge_link++;
3688 			if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
3689 				BGE_CLRBIT(sc, BGE_MAC_MODE,
3690 				    BGE_MACMODE_TBI_SEND_CFGS);
3691 			}
3692 			CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3693 
3694 			if (bootverbose)
3695 				if_printf(ifp, "link UP\n");
3696 
3697 			ifp->if_link_state = LINK_STATE_UP;
3698 			if_link_state_change(ifp);
3699 		}
3700 	} else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) {
3701 		if (sc->bge_link) {
3702 			sc->bge_link = 0;
3703 
3704 			if (bootverbose)
3705 				if_printf(ifp, "link DOWN\n");
3706 
3707 			ifp->if_link_state = LINK_STATE_DOWN;
3708 			if_link_state_change(ifp);
3709 		}
3710 	}
3711 
3712 #undef PCS_ENCODE_ERR
3713 
3714 	/* Clear the attention. */
3715 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3716 	    BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3717 	    BGE_MACSTAT_LINK_CHANGED);
3718 }
3719 
3720 static void
3721 bge_copper_link_upd(struct bge_softc *sc, uint32_t status __unused)
3722 {
3723 	/*
3724 	 * Check that the AUTOPOLL bit is set before
3725 	 * processing the event as a real link change.
3726 	 * Turning AUTOPOLL on and off in the MII read/write
3727 	 * functions will often trigger a link status
3728 	 * interrupt for no reason.
3729 	 */
3730 	if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
3731 		struct ifnet *ifp = &sc->arpcom.ac_if;
3732 		struct mii_data *mii = device_get_softc(sc->bge_miibus);
3733 
3734 		mii_pollstat(mii);
3735 
3736 		if (!sc->bge_link &&
3737 		    (mii->mii_media_status & IFM_ACTIVE) &&
3738 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3739 			sc->bge_link++;
3740 			if (bootverbose)
3741 				if_printf(ifp, "link UP\n");
3742 		} else if (sc->bge_link &&
3743 		    (!(mii->mii_media_status & IFM_ACTIVE) ||
3744 		    IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3745 			sc->bge_link = 0;
3746 			if (bootverbose)
3747 				if_printf(ifp, "link DOWN\n");
3748 		}
3749 	}
3750 
3751 	/* Clear the attention. */
3752 	CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
3753 	    BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
3754 	    BGE_MACSTAT_LINK_CHANGED);
3755 }
3756 
3757 static int
3758 bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS)
3759 {
3760 	struct bge_softc *sc = arg1;
3761 
3762 	return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
3763 				   &sc->bge_rx_coal_ticks,
3764 				   BGE_RX_COAL_TICKS_CHG);
3765 }
3766 
3767 static int
3768 bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS)
3769 {
3770 	struct bge_softc *sc = arg1;
3771 
3772 	return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
3773 				   &sc->bge_tx_coal_ticks,
3774 				   BGE_TX_COAL_TICKS_CHG);
3775 }
3776 
3777 static int
3778 bge_sysctl_rx_max_coal_bds(SYSCTL_HANDLER_ARGS)
3779 {
3780 	struct bge_softc *sc = arg1;
3781 
3782 	return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
3783 				   &sc->bge_rx_max_coal_bds,
3784 				   BGE_RX_MAX_COAL_BDS_CHG);
3785 }
3786 
3787 static int
3788 bge_sysctl_tx_max_coal_bds(SYSCTL_HANDLER_ARGS)
3789 {
3790 	struct bge_softc *sc = arg1;
3791 
3792 	return bge_sysctl_coal_chg(oidp, arg1, arg2, req,
3793 				   &sc->bge_tx_max_coal_bds,
3794 				   BGE_TX_MAX_COAL_BDS_CHG);
3795 }
3796 
3797 static int
3798 bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal,
3799 		    uint32_t coal_chg_mask)
3800 {
3801 	struct bge_softc *sc = arg1;
3802 	struct ifnet *ifp = &sc->arpcom.ac_if;
3803 	int error = 0, v;
3804 
3805 	lwkt_serialize_enter(ifp->if_serializer);
3806 
3807 	v = *coal;
3808 	error = sysctl_handle_int(oidp, &v, 0, req);
3809 	if (!error && req->newptr != NULL) {
3810 		if (v < 0) {
3811 			error = EINVAL;
3812 		} else {
3813 			*coal = v;
3814 			sc->bge_coal_chg |= coal_chg_mask;
3815 		}
3816 	}
3817 
3818 	lwkt_serialize_exit(ifp->if_serializer);
3819 	return error;
3820 }
3821 
3822 static void
3823 bge_coal_change(struct bge_softc *sc)
3824 {
3825 	struct ifnet *ifp = &sc->arpcom.ac_if;
3826 	uint32_t val;
3827 
3828 	ASSERT_SERIALIZED(ifp->if_serializer);
3829 
3830 	if (sc->bge_coal_chg & BGE_RX_COAL_TICKS_CHG) {
3831 		CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS,
3832 			    sc->bge_rx_coal_ticks);
3833 		DELAY(10);
3834 		val = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
3835 
3836 		if (bootverbose) {
3837 			if_printf(ifp, "rx_coal_ticks -> %u\n",
3838 				  sc->bge_rx_coal_ticks);
3839 		}
3840 	}
3841 
3842 	if (sc->bge_coal_chg & BGE_TX_COAL_TICKS_CHG) {
3843 		CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS,
3844 			    sc->bge_tx_coal_ticks);
3845 		DELAY(10);
3846 		val = CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS);
3847 
3848 		if (bootverbose) {
3849 			if_printf(ifp, "tx_coal_ticks -> %u\n",
3850 				  sc->bge_tx_coal_ticks);
3851 		}
3852 	}
3853 
3854 	if (sc->bge_coal_chg & BGE_RX_MAX_COAL_BDS_CHG) {
3855 		CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS,
3856 			    sc->bge_rx_max_coal_bds);
3857 		DELAY(10);
3858 		val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
3859 
3860 		if (bootverbose) {
3861 			if_printf(ifp, "rx_max_coal_bds -> %u\n",
3862 				  sc->bge_rx_max_coal_bds);
3863 		}
3864 	}
3865 
3866 	if (sc->bge_coal_chg & BGE_TX_MAX_COAL_BDS_CHG) {
3867 		CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS,
3868 			    sc->bge_tx_max_coal_bds);
3869 		DELAY(10);
3870 		val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS);
3871 
3872 		if (bootverbose) {
3873 			if_printf(ifp, "tx_max_coal_bds -> %u\n",
3874 				  sc->bge_tx_max_coal_bds);
3875 		}
3876 	}
3877 
3878 	sc->bge_coal_chg = 0;
3879 }
3880 
3881 static void
3882 bge_enable_intr(struct bge_softc *sc)
3883 {
3884 	struct ifnet *ifp = &sc->arpcom.ac_if;
3885 
3886 	lwkt_serialize_handler_enable(ifp->if_serializer);
3887 
3888 	/*
3889 	 * Enable interrupt.
3890 	 */
3891 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3892 
3893 	/*
3894 	 * Unmask the interrupt when we stop polling.
3895 	 */
3896 	BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3897 
3898 	/*
3899 	 * Trigger another interrupt, since above writing
3900 	 * to interrupt mailbox0 may acknowledge pending
3901 	 * interrupt.
3902 	 */
3903 	BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
3904 }
3905 
3906 static void
3907 bge_disable_intr(struct bge_softc *sc)
3908 {
3909 	struct ifnet *ifp = &sc->arpcom.ac_if;
3910 
3911 	/*
3912 	 * Mask the interrupt when we start polling.
3913 	 */
3914 	BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3915 
3916 	/*
3917 	 * Acknowledge possible asserted interrupt.
3918 	 */
3919 	CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3920 
3921 	lwkt_serialize_handler_disable(ifp->if_serializer);
3922 }
3923