1 /* 2 * Copyright (c) 2001 Wind River Systems 3 * Copyright (c) 1997, 1998, 1999, 2001 4 * Bill Paul <wpaul@windriver.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $ 34 */ 35 36 /* 37 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Engineer, Wind River Systems 41 */ 42 43 /* 44 * The Broadcom BCM5700 is based on technology originally developed by 45 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 46 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 47 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 48 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 49 * frames, highly configurable RX filtering, and 16 RX and TX queues 50 * (which, along with RX filter rules, can be used for QOS applications). 51 * Other features, such as TCP segmentation, may be available as part 52 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 53 * firmware images can be stored in hardware and need not be compiled 54 * into the driver. 55 * 56 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 57 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. 58 * 59 * The BCM5701 is a single-chip solution incorporating both the BCM5700 60 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 61 * does not support external SSRAM. 62 * 63 * Broadcom also produces a variation of the BCM5700 under the "Altima" 64 * brand name, which is functionally similar but lacks PCI-X support. 65 * 66 * Without external SSRAM, you can only have at most 4 TX rings, 67 * and the use of the mini RX ring is disabled. This seems to imply 68 * that these features are simply not available on the BCM5701. As a 69 * result, this driver does not implement any support for the mini RX 70 * ring. 71 */ 72 73 #include "opt_polling.h" 74 75 #include <sys/param.h> 76 #include <sys/bus.h> 77 #include <sys/endian.h> 78 #include <sys/kernel.h> 79 #include <sys/ktr.h> 80 #include <sys/interrupt.h> 81 #include <sys/mbuf.h> 82 #include <sys/malloc.h> 83 #include <sys/queue.h> 84 #include <sys/rman.h> 85 #include <sys/serialize.h> 86 #include <sys/socket.h> 87 #include <sys/sockio.h> 88 #include <sys/sysctl.h> 89 90 #include <net/bpf.h> 91 #include <net/ethernet.h> 92 #include <net/if.h> 93 #include <net/if_arp.h> 94 #include <net/if_dl.h> 95 #include <net/if_media.h> 96 #include <net/if_types.h> 97 #include <net/ifq_var.h> 98 #include <net/vlan/if_vlan_var.h> 99 #include <net/vlan/if_vlan_ether.h> 100 101 #include <dev/netif/mii_layer/mii.h> 102 #include <dev/netif/mii_layer/miivar.h> 103 #include <dev/netif/mii_layer/brgphyreg.h> 104 105 #include <bus/pci/pcidevs.h> 106 #include <bus/pci/pcireg.h> 107 #include <bus/pci/pcivar.h> 108 109 #include <dev/netif/bge/if_bgereg.h> 110 #include <dev/netif/bge/if_bgevar.h> 111 112 /* "device miibus" required. See GENERIC if you get errors here. */ 113 #include "miibus_if.h" 114 115 #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP) 116 117 static const struct bge_type { 118 uint16_t bge_vid; 119 uint16_t bge_did; 120 char *bge_name; 121 } bge_devs[] = { 122 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996, 123 "3COM 3C996 Gigabit Ethernet" }, 124 125 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700, 126 "Alteon BCM5700 Gigabit Ethernet" }, 127 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701, 128 "Alteon BCM5701 Gigabit Ethernet" }, 129 130 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000, 131 "Altima AC1000 Gigabit Ethernet" }, 132 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001, 133 "Altima AC1002 Gigabit Ethernet" }, 134 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100, 135 "Altima AC9100 Gigabit Ethernet" }, 136 137 { PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701, 138 "Apple BCM5701 Gigabit Ethernet" }, 139 140 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700, 141 "Broadcom BCM5700 Gigabit Ethernet" }, 142 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701, 143 "Broadcom BCM5701 Gigabit Ethernet" }, 144 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702, 145 "Broadcom BCM5702 Gigabit Ethernet" }, 146 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X, 147 "Broadcom BCM5702X Gigabit Ethernet" }, 148 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT, 149 "Broadcom BCM5702 Gigabit Ethernet" }, 150 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703, 151 "Broadcom BCM5703 Gigabit Ethernet" }, 152 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X, 153 "Broadcom BCM5703X Gigabit Ethernet" }, 154 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703A3, 155 "Broadcom BCM5703 Gigabit Ethernet" }, 156 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C, 157 "Broadcom BCM5704C Dual Gigabit Ethernet" }, 158 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S, 159 "Broadcom BCM5704S Dual Gigabit Ethernet" }, 160 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT, 161 "Broadcom BCM5704S Dual Gigabit Ethernet" }, 162 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705, 163 "Broadcom BCM5705 Gigabit Ethernet" }, 164 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F, 165 "Broadcom BCM5705F Gigabit Ethernet" }, 166 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K, 167 "Broadcom BCM5705K Gigabit Ethernet" }, 168 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M, 169 "Broadcom BCM5705M Gigabit Ethernet" }, 170 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT, 171 "Broadcom BCM5705M Gigabit Ethernet" }, 172 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714, 173 "Broadcom BCM5714C Gigabit Ethernet" }, 174 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S, 175 "Broadcom BCM5714S Gigabit Ethernet" }, 176 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715, 177 "Broadcom BCM5715 Gigabit Ethernet" }, 178 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S, 179 "Broadcom BCM5715S Gigabit Ethernet" }, 180 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720, 181 "Broadcom BCM5720 Gigabit Ethernet" }, 182 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721, 183 "Broadcom BCM5721 Gigabit Ethernet" }, 184 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722, 185 "Broadcom BCM5722 Gigabit Ethernet" }, 186 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5723, 187 "Broadcom BCM5723 Gigabit Ethernet" }, 188 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750, 189 "Broadcom BCM5750 Gigabit Ethernet" }, 190 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M, 191 "Broadcom BCM5750M Gigabit Ethernet" }, 192 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751, 193 "Broadcom BCM5751 Gigabit Ethernet" }, 194 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F, 195 "Broadcom BCM5751F Gigabit Ethernet" }, 196 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M, 197 "Broadcom BCM5751M Gigabit Ethernet" }, 198 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752, 199 "Broadcom BCM5752 Gigabit Ethernet" }, 200 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M, 201 "Broadcom BCM5752M Gigabit Ethernet" }, 202 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753, 203 "Broadcom BCM5753 Gigabit Ethernet" }, 204 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F, 205 "Broadcom BCM5753F Gigabit Ethernet" }, 206 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M, 207 "Broadcom BCM5753M Gigabit Ethernet" }, 208 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754, 209 "Broadcom BCM5754 Gigabit Ethernet" }, 210 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M, 211 "Broadcom BCM5754M Gigabit Ethernet" }, 212 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755, 213 "Broadcom BCM5755 Gigabit Ethernet" }, 214 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M, 215 "Broadcom BCM5755M Gigabit Ethernet" }, 216 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756, 217 "Broadcom BCM5756 Gigabit Ethernet" }, 218 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761, 219 "Broadcom BCM5761 Gigabit Ethernet" }, 220 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761E, 221 "Broadcom BCM5761E Gigabit Ethernet" }, 222 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761S, 223 "Broadcom BCM5761S Gigabit Ethernet" }, 224 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761SE, 225 "Broadcom BCM5761SE Gigabit Ethernet" }, 226 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5764, 227 "Broadcom BCM5764 Gigabit Ethernet" }, 228 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780, 229 "Broadcom BCM5780 Gigabit Ethernet" }, 230 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S, 231 "Broadcom BCM5780S Gigabit Ethernet" }, 232 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781, 233 "Broadcom BCM5781 Gigabit Ethernet" }, 234 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782, 235 "Broadcom BCM5782 Gigabit Ethernet" }, 236 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5784, 237 "Broadcom BCM5784 Gigabit Ethernet" }, 238 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785F, 239 "Broadcom BCM5785F Gigabit Ethernet" }, 240 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785G, 241 "Broadcom BCM5785G Gigabit Ethernet" }, 242 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786, 243 "Broadcom BCM5786 Gigabit Ethernet" }, 244 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787, 245 "Broadcom BCM5787 Gigabit Ethernet" }, 246 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F, 247 "Broadcom BCM5787F Gigabit Ethernet" }, 248 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M, 249 "Broadcom BCM5787M Gigabit Ethernet" }, 250 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788, 251 "Broadcom BCM5788 Gigabit Ethernet" }, 252 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789, 253 "Broadcom BCM5789 Gigabit Ethernet" }, 254 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901, 255 "Broadcom BCM5901 Fast Ethernet" }, 256 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2, 257 "Broadcom BCM5901A2 Fast Ethernet" }, 258 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M, 259 "Broadcom BCM5903M Fast Ethernet" }, 260 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906, 261 "Broadcom BCM5906 Fast Ethernet"}, 262 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906M, 263 "Broadcom BCM5906M Fast Ethernet"}, 264 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57760, 265 "Broadcom BCM57760 Gigabit Ethernet"}, 266 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57780, 267 "Broadcom BCM57780 Gigabit Ethernet"}, 268 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57788, 269 "Broadcom BCM57788 Gigabit Ethernet"}, 270 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57790, 271 "Broadcom BCM57790 Gigabit Ethernet"}, 272 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1, 273 "SysKonnect Gigabit Ethernet" }, 274 275 { 0, 0, NULL } 276 }; 277 278 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO) 279 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY) 280 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS) 281 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY) 282 #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS) 283 #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS) 284 #define BGE_IS_5788(sc) ((sc)->bge_flags & BGE_FLAG_5788) 285 286 #define BGE_IS_CRIPPLED(sc) \ 287 (BGE_IS_5788((sc)) || (sc)->bge_asicrev == BGE_ASICREV_BCM5700) 288 289 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]); 290 291 static int bge_probe(device_t); 292 static int bge_attach(device_t); 293 static int bge_detach(device_t); 294 static void bge_txeof(struct bge_softc *, uint16_t); 295 static void bge_rxeof(struct bge_softc *, uint16_t); 296 297 static void bge_tick(void *); 298 static void bge_stats_update(struct bge_softc *); 299 static void bge_stats_update_regs(struct bge_softc *); 300 static struct mbuf * 301 bge_defrag_shortdma(struct mbuf *); 302 static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *); 303 304 #ifdef DEVICE_POLLING 305 static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); 306 #endif 307 static void bge_intr_crippled(void *); 308 static void bge_intr_legacy(void *); 309 static void bge_msi(void *); 310 static void bge_msi_oneshot(void *); 311 static void bge_intr(struct bge_softc *); 312 static void bge_enable_intr(struct bge_softc *); 313 static void bge_disable_intr(struct bge_softc *); 314 static void bge_start(struct ifnet *); 315 static int bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 316 static void bge_init(void *); 317 static void bge_stop(struct bge_softc *); 318 static void bge_watchdog(struct ifnet *); 319 static void bge_shutdown(device_t); 320 static int bge_suspend(device_t); 321 static int bge_resume(device_t); 322 static int bge_ifmedia_upd(struct ifnet *); 323 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 324 325 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *); 326 static int bge_read_nvram(struct bge_softc *, caddr_t, int, int); 327 328 static uint8_t bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *); 329 static int bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t); 330 331 static void bge_setmulti(struct bge_softc *); 332 static void bge_setpromisc(struct bge_softc *); 333 static void bge_enable_msi(struct bge_softc *sc); 334 335 static int bge_alloc_jumbo_mem(struct bge_softc *); 336 static void bge_free_jumbo_mem(struct bge_softc *); 337 static struct bge_jslot 338 *bge_jalloc(struct bge_softc *); 339 static void bge_jfree(void *); 340 static void bge_jref(void *); 341 static int bge_newbuf_std(struct bge_softc *, int, int); 342 static int bge_newbuf_jumbo(struct bge_softc *, int, int); 343 static void bge_setup_rxdesc_std(struct bge_softc *, int); 344 static void bge_setup_rxdesc_jumbo(struct bge_softc *, int); 345 static int bge_init_rx_ring_std(struct bge_softc *); 346 static void bge_free_rx_ring_std(struct bge_softc *); 347 static int bge_init_rx_ring_jumbo(struct bge_softc *); 348 static void bge_free_rx_ring_jumbo(struct bge_softc *); 349 static void bge_free_tx_ring(struct bge_softc *); 350 static int bge_init_tx_ring(struct bge_softc *); 351 352 static int bge_chipinit(struct bge_softc *); 353 static int bge_blockinit(struct bge_softc *); 354 static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t); 355 356 static uint32_t bge_readmem_ind(struct bge_softc *, uint32_t); 357 static void bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t); 358 #ifdef notdef 359 static uint32_t bge_readreg_ind(struct bge_softc *, uint32_t); 360 #endif 361 static void bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t); 362 static void bge_writemem_direct(struct bge_softc *, uint32_t, uint32_t); 363 static void bge_writembx(struct bge_softc *, int, int); 364 365 static int bge_miibus_readreg(device_t, int, int); 366 static int bge_miibus_writereg(device_t, int, int, int); 367 static void bge_miibus_statchg(device_t); 368 static void bge_bcm5700_link_upd(struct bge_softc *, uint32_t); 369 static void bge_tbi_link_upd(struct bge_softc *, uint32_t); 370 static void bge_copper_link_upd(struct bge_softc *, uint32_t); 371 static void bge_autopoll_link_upd(struct bge_softc *, uint32_t); 372 static void bge_link_poll(struct bge_softc *); 373 374 static void bge_reset(struct bge_softc *); 375 376 static int bge_dma_alloc(struct bge_softc *); 377 static void bge_dma_free(struct bge_softc *); 378 static int bge_dma_block_alloc(struct bge_softc *, bus_size_t, 379 bus_dma_tag_t *, bus_dmamap_t *, 380 void **, bus_addr_t *); 381 static void bge_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *); 382 383 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]); 384 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]); 385 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]); 386 static int bge_get_eaddr(struct bge_softc *, uint8_t[]); 387 388 static void bge_coal_change(struct bge_softc *); 389 static int bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS); 390 static int bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS); 391 static int bge_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS); 392 static int bge_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS); 393 static int bge_sysctl_rx_coal_ticks_int(SYSCTL_HANDLER_ARGS); 394 static int bge_sysctl_tx_coal_ticks_int(SYSCTL_HANDLER_ARGS); 395 static int bge_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS); 396 static int bge_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS); 397 static int bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *, 398 int, int, uint32_t); 399 400 /* 401 * Set following tunable to 1 for some IBM blade servers with the DNLK 402 * switch module. Auto negotiation is broken for those configurations. 403 */ 404 static int bge_fake_autoneg = 0; 405 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg); 406 407 static int bge_msi_enable = 1; 408 TUNABLE_INT("hw.bge.msi.enable", &bge_msi_enable); 409 410 #if !defined(KTR_IF_BGE) 411 #define KTR_IF_BGE KTR_ALL 412 #endif 413 KTR_INFO_MASTER(if_bge); 414 KTR_INFO(KTR_IF_BGE, if_bge, intr, 0, "intr"); 415 KTR_INFO(KTR_IF_BGE, if_bge, rx_pkt, 1, "rx_pkt"); 416 KTR_INFO(KTR_IF_BGE, if_bge, tx_pkt, 2, "tx_pkt"); 417 #define logif(name) KTR_LOG(if_bge_ ## name) 418 419 static device_method_t bge_methods[] = { 420 /* Device interface */ 421 DEVMETHOD(device_probe, bge_probe), 422 DEVMETHOD(device_attach, bge_attach), 423 DEVMETHOD(device_detach, bge_detach), 424 DEVMETHOD(device_shutdown, bge_shutdown), 425 DEVMETHOD(device_suspend, bge_suspend), 426 DEVMETHOD(device_resume, bge_resume), 427 428 /* bus interface */ 429 DEVMETHOD(bus_print_child, bus_generic_print_child), 430 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 431 432 /* MII interface */ 433 DEVMETHOD(miibus_readreg, bge_miibus_readreg), 434 DEVMETHOD(miibus_writereg, bge_miibus_writereg), 435 DEVMETHOD(miibus_statchg, bge_miibus_statchg), 436 437 { 0, 0 } 438 }; 439 440 static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc)); 441 static devclass_t bge_devclass; 442 443 DECLARE_DUMMY_MODULE(if_bge); 444 DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, NULL, NULL); 445 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, NULL, NULL); 446 447 static uint32_t 448 bge_readmem_ind(struct bge_softc *sc, uint32_t off) 449 { 450 device_t dev = sc->bge_dev; 451 uint32_t val; 452 453 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && 454 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) 455 return 0; 456 457 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 458 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4); 459 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 460 return (val); 461 } 462 463 static void 464 bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val) 465 { 466 device_t dev = sc->bge_dev; 467 468 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && 469 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) 470 return; 471 472 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 473 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 474 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 475 } 476 477 #ifdef notdef 478 static uint32_t 479 bge_readreg_ind(struct bge_softc *sc, uin32_t off) 480 { 481 device_t dev = sc->bge_dev; 482 483 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 484 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4)); 485 } 486 #endif 487 488 static void 489 bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val) 490 { 491 device_t dev = sc->bge_dev; 492 493 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 494 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); 495 } 496 497 static void 498 bge_writemem_direct(struct bge_softc *sc, uint32_t off, uint32_t val) 499 { 500 CSR_WRITE_4(sc, off, val); 501 } 502 503 static void 504 bge_writembx(struct bge_softc *sc, int off, int val) 505 { 506 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 507 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; 508 509 CSR_WRITE_4(sc, off, val); 510 if (sc->bge_mbox_reorder) 511 CSR_READ_4(sc, off); 512 } 513 514 static uint8_t 515 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 516 { 517 uint32_t access, byte = 0; 518 int i; 519 520 /* Lock. */ 521 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 522 for (i = 0; i < 8000; i++) { 523 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) 524 break; 525 DELAY(20); 526 } 527 if (i == 8000) 528 return (1); 529 530 /* Enable access. */ 531 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); 532 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); 533 534 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); 535 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); 536 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 537 DELAY(10); 538 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { 539 DELAY(10); 540 break; 541 } 542 } 543 544 if (i == BGE_TIMEOUT * 10) { 545 if_printf(&sc->arpcom.ac_if, "nvram read timed out\n"); 546 return (1); 547 } 548 549 /* Get result. */ 550 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); 551 552 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; 553 554 /* Disable access. */ 555 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); 556 557 /* Unlock. */ 558 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); 559 CSR_READ_4(sc, BGE_NVRAM_SWARB); 560 561 return (0); 562 } 563 564 /* 565 * Read a sequence of bytes from NVRAM. 566 */ 567 static int 568 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt) 569 { 570 int err = 0, i; 571 uint8_t byte = 0; 572 573 if (sc->bge_asicrev != BGE_ASICREV_BCM5906) 574 return (1); 575 576 for (i = 0; i < cnt; i++) { 577 err = bge_nvram_getbyte(sc, off + i, &byte); 578 if (err) 579 break; 580 *(dest + i) = byte; 581 } 582 583 return (err ? 1 : 0); 584 } 585 586 /* 587 * Read a byte of data stored in the EEPROM at address 'addr.' The 588 * BCM570x supports both the traditional bitbang interface and an 589 * auto access interface for reading the EEPROM. We use the auto 590 * access method. 591 */ 592 static uint8_t 593 bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest) 594 { 595 int i; 596 uint32_t byte = 0; 597 598 /* 599 * Enable use of auto EEPROM access so we can avoid 600 * having to use the bitbang method. 601 */ 602 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 603 604 /* Reset the EEPROM, load the clock period. */ 605 CSR_WRITE_4(sc, BGE_EE_ADDR, 606 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 607 DELAY(20); 608 609 /* Issue the read EEPROM command. */ 610 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 611 612 /* Wait for completion */ 613 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 614 DELAY(10); 615 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 616 break; 617 } 618 619 if (i == BGE_TIMEOUT) { 620 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n"); 621 return(1); 622 } 623 624 /* Get result. */ 625 byte = CSR_READ_4(sc, BGE_EE_DATA); 626 627 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 628 629 return(0); 630 } 631 632 /* 633 * Read a sequence of bytes from the EEPROM. 634 */ 635 static int 636 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len) 637 { 638 size_t i; 639 int err; 640 uint8_t byte; 641 642 for (byte = 0, err = 0, i = 0; i < len; i++) { 643 err = bge_eeprom_getbyte(sc, off + i, &byte); 644 if (err) 645 break; 646 *(dest + i) = byte; 647 } 648 649 return(err ? 1 : 0); 650 } 651 652 static int 653 bge_miibus_readreg(device_t dev, int phy, int reg) 654 { 655 struct bge_softc *sc = device_get_softc(dev); 656 uint32_t val; 657 int i; 658 659 KASSERT(phy == sc->bge_phyno, 660 ("invalid phyno %d, should be %d", phy, sc->bge_phyno)); 661 662 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ 663 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) { 664 CSR_WRITE_4(sc, BGE_MI_MODE, 665 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL); 666 DELAY(80); 667 } 668 669 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | 670 BGE_MIPHY(phy) | BGE_MIREG(reg)); 671 672 /* Poll for the PHY register access to complete. */ 673 for (i = 0; i < BGE_TIMEOUT; i++) { 674 DELAY(10); 675 val = CSR_READ_4(sc, BGE_MI_COMM); 676 if ((val & BGE_MICOMM_BUSY) == 0) { 677 DELAY(5); 678 val = CSR_READ_4(sc, BGE_MI_COMM); 679 break; 680 } 681 } 682 if (i == BGE_TIMEOUT) { 683 if_printf(&sc->arpcom.ac_if, "PHY read timed out " 684 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val); 685 val = 0; 686 } 687 688 /* Restore the autopoll bit if necessary. */ 689 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) { 690 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode); 691 DELAY(80); 692 } 693 694 if (val & BGE_MICOMM_READFAIL) 695 return 0; 696 697 return (val & 0xFFFF); 698 } 699 700 static int 701 bge_miibus_writereg(device_t dev, int phy, int reg, int val) 702 { 703 struct bge_softc *sc = device_get_softc(dev); 704 int i; 705 706 KASSERT(phy == sc->bge_phyno, 707 ("invalid phyno %d, should be %d", phy, sc->bge_phyno)); 708 709 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && 710 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL)) 711 return 0; 712 713 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ 714 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) { 715 CSR_WRITE_4(sc, BGE_MI_MODE, 716 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL); 717 DELAY(80); 718 } 719 720 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | 721 BGE_MIPHY(phy) | BGE_MIREG(reg) | val); 722 723 for (i = 0; i < BGE_TIMEOUT; i++) { 724 DELAY(10); 725 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { 726 DELAY(5); 727 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */ 728 break; 729 } 730 } 731 if (i == BGE_TIMEOUT) { 732 if_printf(&sc->arpcom.ac_if, "PHY write timed out " 733 "(phy %d, reg %d, val %d)\n", phy, reg, val); 734 } 735 736 /* Restore the autopoll bit if necessary. */ 737 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) { 738 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode); 739 DELAY(80); 740 } 741 742 return 0; 743 } 744 745 static void 746 bge_miibus_statchg(device_t dev) 747 { 748 struct bge_softc *sc; 749 struct mii_data *mii; 750 751 sc = device_get_softc(dev); 752 mii = device_get_softc(sc->bge_miibus); 753 754 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 755 (IFM_ACTIVE | IFM_AVALID)) { 756 switch (IFM_SUBTYPE(mii->mii_media_active)) { 757 case IFM_10_T: 758 case IFM_100_TX: 759 sc->bge_link = 1; 760 break; 761 case IFM_1000_T: 762 case IFM_1000_SX: 763 case IFM_2500_SX: 764 if (sc->bge_asicrev != BGE_ASICREV_BCM5906) 765 sc->bge_link = 1; 766 else 767 sc->bge_link = 0; 768 break; 769 default: 770 sc->bge_link = 0; 771 break; 772 } 773 } else { 774 sc->bge_link = 0; 775 } 776 if (sc->bge_link == 0) 777 return; 778 779 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 780 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 781 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) { 782 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 783 } else { 784 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 785 } 786 787 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 788 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 789 } else { 790 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 791 } 792 } 793 794 /* 795 * Memory management for jumbo frames. 796 */ 797 static int 798 bge_alloc_jumbo_mem(struct bge_softc *sc) 799 { 800 struct ifnet *ifp = &sc->arpcom.ac_if; 801 struct bge_jslot *entry; 802 uint8_t *ptr; 803 bus_addr_t paddr; 804 int i, error; 805 806 /* 807 * Create tag for jumbo mbufs. 808 * This is really a bit of a kludge. We allocate a special 809 * jumbo buffer pool which (thanks to the way our DMA 810 * memory allocation works) will consist of contiguous 811 * pages. This means that even though a jumbo buffer might 812 * be larger than a page size, we don't really need to 813 * map it into more than one DMA segment. However, the 814 * default mbuf tag will result in multi-segment mappings, 815 * so we have to create a special jumbo mbuf tag that 816 * lets us get away with mapping the jumbo buffers as 817 * a single segment. I think eventually the driver should 818 * be changed so that it uses ordinary mbufs and cluster 819 * buffers, i.e. jumbo frames can span multiple DMA 820 * descriptors. But that's a project for another day. 821 */ 822 823 /* 824 * Create DMA stuffs for jumbo RX ring. 825 */ 826 error = bge_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ, 827 &sc->bge_cdata.bge_rx_jumbo_ring_tag, 828 &sc->bge_cdata.bge_rx_jumbo_ring_map, 829 (void *)&sc->bge_ldata.bge_rx_jumbo_ring, 830 &sc->bge_ldata.bge_rx_jumbo_ring_paddr); 831 if (error) { 832 if_printf(ifp, "could not create jumbo RX ring\n"); 833 return error; 834 } 835 836 /* 837 * Create DMA stuffs for jumbo buffer block. 838 */ 839 error = bge_dma_block_alloc(sc, BGE_JMEM, 840 &sc->bge_cdata.bge_jumbo_tag, 841 &sc->bge_cdata.bge_jumbo_map, 842 (void **)&sc->bge_ldata.bge_jumbo_buf, 843 &paddr); 844 if (error) { 845 if_printf(ifp, "could not create jumbo buffer\n"); 846 return error; 847 } 848 849 SLIST_INIT(&sc->bge_jfree_listhead); 850 851 /* 852 * Now divide it up into 9K pieces and save the addresses 853 * in an array. Note that we play an evil trick here by using 854 * the first few bytes in the buffer to hold the the address 855 * of the softc structure for this interface. This is because 856 * bge_jfree() needs it, but it is called by the mbuf management 857 * code which will not pass it to us explicitly. 858 */ 859 for (i = 0, ptr = sc->bge_ldata.bge_jumbo_buf; i < BGE_JSLOTS; i++) { 860 entry = &sc->bge_cdata.bge_jslots[i]; 861 entry->bge_sc = sc; 862 entry->bge_buf = ptr; 863 entry->bge_paddr = paddr; 864 entry->bge_inuse = 0; 865 entry->bge_slot = i; 866 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link); 867 868 ptr += BGE_JLEN; 869 paddr += BGE_JLEN; 870 } 871 return 0; 872 } 873 874 static void 875 bge_free_jumbo_mem(struct bge_softc *sc) 876 { 877 /* Destroy jumbo RX ring. */ 878 bge_dma_block_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, 879 sc->bge_cdata.bge_rx_jumbo_ring_map, 880 sc->bge_ldata.bge_rx_jumbo_ring); 881 882 /* Destroy jumbo buffer block. */ 883 bge_dma_block_free(sc->bge_cdata.bge_jumbo_tag, 884 sc->bge_cdata.bge_jumbo_map, 885 sc->bge_ldata.bge_jumbo_buf); 886 } 887 888 /* 889 * Allocate a jumbo buffer. 890 */ 891 static struct bge_jslot * 892 bge_jalloc(struct bge_softc *sc) 893 { 894 struct bge_jslot *entry; 895 896 lwkt_serialize_enter(&sc->bge_jslot_serializer); 897 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 898 if (entry) { 899 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jslot_link); 900 entry->bge_inuse = 1; 901 } else { 902 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n"); 903 } 904 lwkt_serialize_exit(&sc->bge_jslot_serializer); 905 return(entry); 906 } 907 908 /* 909 * Adjust usage count on a jumbo buffer. 910 */ 911 static void 912 bge_jref(void *arg) 913 { 914 struct bge_jslot *entry = (struct bge_jslot *)arg; 915 struct bge_softc *sc = entry->bge_sc; 916 917 if (sc == NULL) 918 panic("bge_jref: can't find softc pointer!"); 919 920 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) { 921 panic("bge_jref: asked to reference buffer " 922 "that we don't manage!"); 923 } else if (entry->bge_inuse == 0) { 924 panic("bge_jref: buffer already free!"); 925 } else { 926 atomic_add_int(&entry->bge_inuse, 1); 927 } 928 } 929 930 /* 931 * Release a jumbo buffer. 932 */ 933 static void 934 bge_jfree(void *arg) 935 { 936 struct bge_jslot *entry = (struct bge_jslot *)arg; 937 struct bge_softc *sc = entry->bge_sc; 938 939 if (sc == NULL) 940 panic("bge_jfree: can't find softc pointer!"); 941 942 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) { 943 panic("bge_jfree: asked to free buffer that we don't manage!"); 944 } else if (entry->bge_inuse == 0) { 945 panic("bge_jfree: buffer already free!"); 946 } else { 947 /* 948 * Possible MP race to 0, use the serializer. The atomic insn 949 * is still needed for races against bge_jref(). 950 */ 951 lwkt_serialize_enter(&sc->bge_jslot_serializer); 952 atomic_subtract_int(&entry->bge_inuse, 1); 953 if (entry->bge_inuse == 0) { 954 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 955 entry, jslot_link); 956 } 957 lwkt_serialize_exit(&sc->bge_jslot_serializer); 958 } 959 } 960 961 962 /* 963 * Intialize a standard receive ring descriptor. 964 */ 965 static int 966 bge_newbuf_std(struct bge_softc *sc, int i, int init) 967 { 968 struct mbuf *m_new = NULL; 969 bus_dma_segment_t seg; 970 bus_dmamap_t map; 971 int error, nsegs; 972 973 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 974 if (m_new == NULL) 975 return ENOBUFS; 976 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 977 978 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) 979 m_adj(m_new, ETHER_ALIGN); 980 981 error = bus_dmamap_load_mbuf_segment(sc->bge_cdata.bge_rx_mtag, 982 sc->bge_cdata.bge_rx_tmpmap, m_new, 983 &seg, 1, &nsegs, BUS_DMA_NOWAIT); 984 if (error) { 985 m_freem(m_new); 986 return error; 987 } 988 989 if (!init) { 990 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, 991 sc->bge_cdata.bge_rx_std_dmamap[i], 992 BUS_DMASYNC_POSTREAD); 993 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, 994 sc->bge_cdata.bge_rx_std_dmamap[i]); 995 } 996 997 map = sc->bge_cdata.bge_rx_tmpmap; 998 sc->bge_cdata.bge_rx_tmpmap = sc->bge_cdata.bge_rx_std_dmamap[i]; 999 sc->bge_cdata.bge_rx_std_dmamap[i] = map; 1000 1001 sc->bge_cdata.bge_rx_std_chain[i].bge_mbuf = m_new; 1002 sc->bge_cdata.bge_rx_std_chain[i].bge_paddr = seg.ds_addr; 1003 1004 bge_setup_rxdesc_std(sc, i); 1005 return 0; 1006 } 1007 1008 static void 1009 bge_setup_rxdesc_std(struct bge_softc *sc, int i) 1010 { 1011 struct bge_rxchain *rc; 1012 struct bge_rx_bd *r; 1013 1014 rc = &sc->bge_cdata.bge_rx_std_chain[i]; 1015 r = &sc->bge_ldata.bge_rx_std_ring[i]; 1016 1017 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr); 1018 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr); 1019 r->bge_len = rc->bge_mbuf->m_len; 1020 r->bge_idx = i; 1021 r->bge_flags = BGE_RXBDFLAG_END; 1022 } 1023 1024 /* 1025 * Initialize a jumbo receive ring descriptor. This allocates 1026 * a jumbo buffer from the pool managed internally by the driver. 1027 */ 1028 static int 1029 bge_newbuf_jumbo(struct bge_softc *sc, int i, int init) 1030 { 1031 struct mbuf *m_new = NULL; 1032 struct bge_jslot *buf; 1033 bus_addr_t paddr; 1034 1035 /* Allocate the mbuf. */ 1036 MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA); 1037 if (m_new == NULL) 1038 return ENOBUFS; 1039 1040 /* Allocate the jumbo buffer */ 1041 buf = bge_jalloc(sc); 1042 if (buf == NULL) { 1043 m_freem(m_new); 1044 return ENOBUFS; 1045 } 1046 1047 /* Attach the buffer to the mbuf. */ 1048 m_new->m_ext.ext_arg = buf; 1049 m_new->m_ext.ext_buf = buf->bge_buf; 1050 m_new->m_ext.ext_free = bge_jfree; 1051 m_new->m_ext.ext_ref = bge_jref; 1052 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 1053 1054 m_new->m_flags |= M_EXT; 1055 1056 m_new->m_data = m_new->m_ext.ext_buf; 1057 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size; 1058 1059 paddr = buf->bge_paddr; 1060 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) { 1061 m_adj(m_new, ETHER_ALIGN); 1062 paddr += ETHER_ALIGN; 1063 } 1064 1065 /* Save necessary information */ 1066 sc->bge_cdata.bge_rx_jumbo_chain[i].bge_mbuf = m_new; 1067 sc->bge_cdata.bge_rx_jumbo_chain[i].bge_paddr = paddr; 1068 1069 /* Set up the descriptor. */ 1070 bge_setup_rxdesc_jumbo(sc, i); 1071 return 0; 1072 } 1073 1074 static void 1075 bge_setup_rxdesc_jumbo(struct bge_softc *sc, int i) 1076 { 1077 struct bge_rx_bd *r; 1078 struct bge_rxchain *rc; 1079 1080 r = &sc->bge_ldata.bge_rx_jumbo_ring[i]; 1081 rc = &sc->bge_cdata.bge_rx_jumbo_chain[i]; 1082 1083 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr); 1084 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr); 1085 r->bge_len = rc->bge_mbuf->m_len; 1086 r->bge_idx = i; 1087 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 1088 } 1089 1090 static int 1091 bge_init_rx_ring_std(struct bge_softc *sc) 1092 { 1093 int i, error; 1094 1095 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1096 error = bge_newbuf_std(sc, i, 1); 1097 if (error) 1098 return error; 1099 }; 1100 1101 sc->bge_std = BGE_STD_RX_RING_CNT - 1; 1102 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 1103 1104 return(0); 1105 } 1106 1107 static void 1108 bge_free_rx_ring_std(struct bge_softc *sc) 1109 { 1110 int i; 1111 1112 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1113 struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_std_chain[i]; 1114 1115 if (rc->bge_mbuf != NULL) { 1116 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, 1117 sc->bge_cdata.bge_rx_std_dmamap[i]); 1118 m_freem(rc->bge_mbuf); 1119 rc->bge_mbuf = NULL; 1120 } 1121 bzero(&sc->bge_ldata.bge_rx_std_ring[i], 1122 sizeof(struct bge_rx_bd)); 1123 } 1124 } 1125 1126 static int 1127 bge_init_rx_ring_jumbo(struct bge_softc *sc) 1128 { 1129 struct bge_rcb *rcb; 1130 int i, error; 1131 1132 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1133 error = bge_newbuf_jumbo(sc, i, 1); 1134 if (error) 1135 return error; 1136 }; 1137 1138 sc->bge_jumbo = BGE_JUMBO_RX_RING_CNT - 1; 1139 1140 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 1141 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0); 1142 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1143 1144 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1145 1146 return(0); 1147 } 1148 1149 static void 1150 bge_free_rx_ring_jumbo(struct bge_softc *sc) 1151 { 1152 int i; 1153 1154 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1155 struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_jumbo_chain[i]; 1156 1157 if (rc->bge_mbuf != NULL) { 1158 m_freem(rc->bge_mbuf); 1159 rc->bge_mbuf = NULL; 1160 } 1161 bzero(&sc->bge_ldata.bge_rx_jumbo_ring[i], 1162 sizeof(struct bge_rx_bd)); 1163 } 1164 } 1165 1166 static void 1167 bge_free_tx_ring(struct bge_softc *sc) 1168 { 1169 int i; 1170 1171 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1172 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 1173 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, 1174 sc->bge_cdata.bge_tx_dmamap[i]); 1175 m_freem(sc->bge_cdata.bge_tx_chain[i]); 1176 sc->bge_cdata.bge_tx_chain[i] = NULL; 1177 } 1178 bzero(&sc->bge_ldata.bge_tx_ring[i], 1179 sizeof(struct bge_tx_bd)); 1180 } 1181 } 1182 1183 static int 1184 bge_init_tx_ring(struct bge_softc *sc) 1185 { 1186 sc->bge_txcnt = 0; 1187 sc->bge_tx_saved_considx = 0; 1188 sc->bge_tx_prodidx = 0; 1189 1190 /* Initialize transmit producer index for host-memory send ring. */ 1191 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1192 1193 /* 5700 b2 errata */ 1194 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 1195 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1196 1197 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1198 /* 5700 b2 errata */ 1199 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 1200 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1201 1202 return(0); 1203 } 1204 1205 static void 1206 bge_setmulti(struct bge_softc *sc) 1207 { 1208 struct ifnet *ifp; 1209 struct ifmultiaddr *ifma; 1210 uint32_t hashes[4] = { 0, 0, 0, 0 }; 1211 int h, i; 1212 1213 ifp = &sc->arpcom.ac_if; 1214 1215 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 1216 for (i = 0; i < 4; i++) 1217 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 1218 return; 1219 } 1220 1221 /* First, zot all the existing filters. */ 1222 for (i = 0; i < 4; i++) 1223 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 1224 1225 /* Now program new ones. */ 1226 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1227 if (ifma->ifma_addr->sa_family != AF_LINK) 1228 continue; 1229 h = ether_crc32_le( 1230 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1231 ETHER_ADDR_LEN) & 0x7f; 1232 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1233 } 1234 1235 for (i = 0; i < 4; i++) 1236 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1237 } 1238 1239 /* 1240 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1241 * self-test results. 1242 */ 1243 static int 1244 bge_chipinit(struct bge_softc *sc) 1245 { 1246 int i; 1247 uint32_t dma_rw_ctl; 1248 uint16_t val; 1249 1250 /* Set endian type before we access any non-PCI registers. */ 1251 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, 1252 BGE_INIT | sc->bge_pci_miscctl, 4); 1253 1254 /* Clear the MAC control register */ 1255 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1256 1257 /* 1258 * Clear the MAC statistics block in the NIC's 1259 * internal memory. 1260 */ 1261 for (i = BGE_STATS_BLOCK; 1262 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 1263 BGE_MEMWIN_WRITE(sc, i, 0); 1264 1265 for (i = BGE_STATUS_BLOCK; 1266 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 1267 BGE_MEMWIN_WRITE(sc, i, 0); 1268 1269 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) { 1270 /* 1271 * Fix data corruption caused by non-qword write with WB. 1272 * Fix master abort in PCI mode. 1273 * Fix PCI latency timer. 1274 */ 1275 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2); 1276 val |= (1 << 10) | (1 << 12) | (1 << 13); 1277 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2); 1278 } 1279 1280 /* Set up the PCI DMA control register. */ 1281 if (sc->bge_flags & BGE_FLAG_PCIE) { 1282 /* PCI Express */ 1283 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1284 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1285 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1286 } else if (sc->bge_flags & BGE_FLAG_PCIX) { 1287 /* PCI-X bus */ 1288 if (BGE_IS_5714_FAMILY(sc)) { 1289 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD; 1290 dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */ 1291 /* XXX magic values, Broadcom-supplied Linux driver */ 1292 if (sc->bge_asicrev == BGE_ASICREV_BCM5780) { 1293 dma_rw_ctl |= (1 << 20) | (1 << 18) | 1294 BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1295 } else { 1296 dma_rw_ctl |= (1 << 20) | (1 << 18) | (1 << 15); 1297 } 1298 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) { 1299 /* 1300 * In the BCM5703, the DMA read watermark should 1301 * be set to less than or equal to the maximum 1302 * memory read byte count of the PCI-X command 1303 * register. 1304 */ 1305 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1306 (0x4 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1307 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1308 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { 1309 /* 1310 * The 5704 uses a different encoding of read/write 1311 * watermarks. 1312 */ 1313 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1314 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1315 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1316 } else { 1317 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1318 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1319 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1320 (0x0F); 1321 } 1322 1323 /* 1324 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround 1325 * for hardware bugs. 1326 */ 1327 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1328 sc->bge_asicrev == BGE_ASICREV_BCM5704) { 1329 uint32_t tmp; 1330 1331 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; 1332 if (tmp == 0x6 || tmp == 0x7) 1333 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1334 } 1335 } else { 1336 /* Conventional PCI bus */ 1337 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1338 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1339 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1340 (0x0F); 1341 } 1342 1343 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1344 sc->bge_asicrev == BGE_ASICREV_BCM5704 || 1345 sc->bge_asicrev == BGE_ASICREV_BCM5705) 1346 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 1347 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); 1348 1349 /* 1350 * Set up general mode register. 1351 */ 1352 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 1353 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1354 BGE_MODECTL_TX_NO_PHDR_CSUM); 1355 1356 /* 1357 * BCM5701 B5 have a bug causing data corruption when using 1358 * 64-bit DMA reads, which can be terminated early and then 1359 * completed later as 32-bit accesses, in combination with 1360 * certain bridges. 1361 */ 1362 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && 1363 sc->bge_chipid == BGE_CHIPID_BCM5701_B5) 1364 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32); 1365 1366 /* 1367 * Disable memory write invalidate. Apparently it is not supported 1368 * properly by these devices. Also ensure that INTx isn't disabled, 1369 * as these chips need it even when using MSI. 1370 */ 1371 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, 1372 (PCIM_CMD_MWRICEN | PCIM_CMD_INTxDIS), 4); 1373 1374 /* Set the timer prescaler (always 66Mhz) */ 1375 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1376 1377 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 1378 DELAY(40); /* XXX */ 1379 1380 /* Put PHY into ready state */ 1381 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); 1382 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */ 1383 DELAY(40); 1384 } 1385 1386 return(0); 1387 } 1388 1389 static int 1390 bge_blockinit(struct bge_softc *sc) 1391 { 1392 struct bge_rcb *rcb; 1393 bus_size_t vrcb; 1394 bge_hostaddr taddr; 1395 uint32_t val; 1396 int i, limit; 1397 1398 /* 1399 * Initialize the memory window pointer register so that 1400 * we can access the first 32K of internal NIC RAM. This will 1401 * allow us to set up the TX send ring RCBs and the RX return 1402 * ring RCBs, plus other things which live in NIC memory. 1403 */ 1404 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1405 1406 /* Note: the BCM5704 has a smaller mbuf space than other chips. */ 1407 1408 if (!BGE_IS_5705_PLUS(sc)) { 1409 /* Configure mbuf memory pool */ 1410 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); 1411 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1412 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1413 else 1414 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1415 1416 /* Configure DMA resource pool */ 1417 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1418 BGE_DMA_DESCRIPTORS); 1419 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1420 } 1421 1422 /* Configure mbuf pool watermarks */ 1423 if (!BGE_IS_5705_PLUS(sc)) { 1424 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1425 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1426 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1427 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 1428 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1429 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); 1430 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); 1431 } else { 1432 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1433 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1434 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1435 } 1436 1437 /* Configure DMA resource watermarks */ 1438 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1439 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1440 1441 /* Enable buffer manager */ 1442 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1443 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1444 1445 /* Poll for buffer manager start indication */ 1446 for (i = 0; i < BGE_TIMEOUT; i++) { 1447 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1448 break; 1449 DELAY(10); 1450 } 1451 1452 if (i == BGE_TIMEOUT) { 1453 if_printf(&sc->arpcom.ac_if, 1454 "buffer manager failed to start\n"); 1455 return(ENXIO); 1456 } 1457 1458 /* Enable flow-through queues */ 1459 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1460 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1461 1462 /* Wait until queue initialization is complete */ 1463 for (i = 0; i < BGE_TIMEOUT; i++) { 1464 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1465 break; 1466 DELAY(10); 1467 } 1468 1469 if (i == BGE_TIMEOUT) { 1470 if_printf(&sc->arpcom.ac_if, 1471 "flow-through queue init failed\n"); 1472 return(ENXIO); 1473 } 1474 1475 /* 1476 * Summary of rings supported by the controller: 1477 * 1478 * Standard Receive Producer Ring 1479 * - This ring is used to feed receive buffers for "standard" 1480 * sized frames (typically 1536 bytes) to the controller. 1481 * 1482 * Jumbo Receive Producer Ring 1483 * - This ring is used to feed receive buffers for jumbo sized 1484 * frames (i.e. anything bigger than the "standard" frames) 1485 * to the controller. 1486 * 1487 * Mini Receive Producer Ring 1488 * - This ring is used to feed receive buffers for "mini" 1489 * sized frames to the controller. 1490 * - This feature required external memory for the controller 1491 * but was never used in a production system. Should always 1492 * be disabled. 1493 * 1494 * Receive Return Ring 1495 * - After the controller has placed an incoming frame into a 1496 * receive buffer that buffer is moved into a receive return 1497 * ring. The driver is then responsible to passing the 1498 * buffer up to the stack. Many versions of the controller 1499 * support multiple RR rings. 1500 * 1501 * Send Ring 1502 * - This ring is used for outgoing frames. Many versions of 1503 * the controller support multiple send rings. 1504 */ 1505 1506 /* Initialize the standard receive producer ring control block. */ 1507 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; 1508 rcb->bge_hostaddr.bge_addr_lo = 1509 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); 1510 rcb->bge_hostaddr.bge_addr_hi = 1511 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); 1512 if (BGE_IS_5705_PLUS(sc)) { 1513 /* 1514 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32) 1515 * Bits 15-2 : Reserved (should be 0) 1516 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 1517 * Bit 0 : Reserved 1518 */ 1519 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1520 } else { 1521 /* 1522 * Ring size is always XXX entries 1523 * Bits 31-16: Maximum RX frame size 1524 * Bits 15-2 : Reserved (should be 0) 1525 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 1526 * Bit 0 : Reserved 1527 */ 1528 rcb->bge_maxlen_flags = 1529 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1530 } 1531 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1532 /* Write the standard receive producer ring control block. */ 1533 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1534 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1535 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1536 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1537 /* Reset the standard receive producer ring producer index. */ 1538 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1539 1540 /* 1541 * Initialize the jumbo RX producer ring control 1542 * block. We set the 'ring disabled' bit in the 1543 * flags field until we're actually ready to start 1544 * using this ring (i.e. once we set the MTU 1545 * high enough to require it). 1546 */ 1547 if (BGE_IS_JUMBO_CAPABLE(sc)) { 1548 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 1549 /* Get the jumbo receive producer ring RCB parameters. */ 1550 rcb->bge_hostaddr.bge_addr_lo = 1551 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1552 rcb->bge_hostaddr.bge_addr_hi = 1553 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1554 rcb->bge_maxlen_flags = 1555 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 1556 BGE_RCB_FLAG_RING_DISABLED); 1557 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1558 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1559 rcb->bge_hostaddr.bge_addr_hi); 1560 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1561 rcb->bge_hostaddr.bge_addr_lo); 1562 /* Program the jumbo receive producer ring RCB parameters. */ 1563 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1564 rcb->bge_maxlen_flags); 1565 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1566 /* Reset the jumbo receive producer ring producer index. */ 1567 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1568 } 1569 1570 /* Disable the mini receive producer ring RCB. */ 1571 if (BGE_IS_5700_FAMILY(sc)) { 1572 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; 1573 rcb->bge_maxlen_flags = 1574 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 1575 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 1576 rcb->bge_maxlen_flags); 1577 /* Reset the mini receive producer ring producer index. */ 1578 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1579 } 1580 1581 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */ 1582 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && 1583 (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 || 1584 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 || 1585 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)) { 1586 CSR_WRITE_4(sc, BGE_ISO_PKT_TX, 1587 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2); 1588 } 1589 1590 /* 1591 * The BD ring replenish thresholds control how often the 1592 * hardware fetches new BD's from the producer rings in host 1593 * memory. Setting the value too low on a busy system can 1594 * starve the hardware and recue the throughpout. 1595 * 1596 * Set the BD ring replentish thresholds. The recommended 1597 * values are 1/8th the number of descriptors allocated to 1598 * each ring. 1599 */ 1600 if (BGE_IS_5705_PLUS(sc)) 1601 val = 8; 1602 else 1603 val = BGE_STD_RX_RING_CNT / 8; 1604 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val); 1605 if (BGE_IS_JUMBO_CAPABLE(sc)) { 1606 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 1607 BGE_JUMBO_RX_RING_CNT/8); 1608 } 1609 1610 /* 1611 * Disable all send rings by setting the 'ring disabled' bit 1612 * in the flags field of all the TX send ring control blocks, 1613 * located in NIC memory. 1614 */ 1615 if (!BGE_IS_5705_PLUS(sc)) { 1616 /* 5700 to 5704 had 16 send rings. */ 1617 limit = BGE_TX_RINGS_EXTSSRAM_MAX; 1618 } else { 1619 limit = 1; 1620 } 1621 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1622 for (i = 0; i < limit; i++) { 1623 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1624 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 1625 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1626 vrcb += sizeof(struct bge_rcb); 1627 } 1628 1629 /* Configure send ring RCB 0 (we use only the first ring) */ 1630 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1631 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr); 1632 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1633 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1634 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 1635 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1636 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1637 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1638 1639 /* 1640 * Disable all receive return rings by setting the 1641 * 'ring diabled' bit in the flags field of all the receive 1642 * return ring control blocks, located in NIC memory. 1643 */ 1644 if (!BGE_IS_5705_PLUS(sc)) 1645 limit = BGE_RX_RINGS_MAX; 1646 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755) 1647 limit = 4; 1648 else 1649 limit = 1; 1650 /* Disable all receive return rings. */ 1651 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1652 for (i = 0; i < limit; i++) { 1653 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); 1654 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); 1655 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1656 BGE_RCB_FLAG_RING_DISABLED); 1657 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1658 bge_writembx(sc, BGE_MBX_RX_CONS0_LO + 1659 (i * (sizeof(uint64_t))), 0); 1660 vrcb += sizeof(struct bge_rcb); 1661 } 1662 1663 /* 1664 * Set up receive return ring 0. Note that the NIC address 1665 * for RX return rings is 0x0. The return rings live entirely 1666 * within the host, so the nicaddr field in the RCB isn't used. 1667 */ 1668 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1669 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr); 1670 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1671 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1672 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1673 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1674 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 1675 1676 /* Set random backoff seed for TX */ 1677 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1678 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + 1679 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + 1680 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] + 1681 BGE_TX_BACKOFF_SEED_MASK); 1682 1683 /* Set inter-packet gap */ 1684 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1685 1686 /* 1687 * Specify which ring to use for packets that don't match 1688 * any RX rules. 1689 */ 1690 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1691 1692 /* 1693 * Configure number of RX lists. One interrupt distribution 1694 * list, sixteen active lists, one bad frames class. 1695 */ 1696 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1697 1698 /* Inialize RX list placement stats mask. */ 1699 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1700 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1701 1702 /* Disable host coalescing until we get it set up */ 1703 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1704 1705 /* Poll to make sure it's shut down. */ 1706 for (i = 0; i < BGE_TIMEOUT; i++) { 1707 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1708 break; 1709 DELAY(10); 1710 } 1711 1712 if (i == BGE_TIMEOUT) { 1713 if_printf(&sc->arpcom.ac_if, 1714 "host coalescing engine failed to idle\n"); 1715 return(ENXIO); 1716 } 1717 1718 /* Set up host coalescing defaults */ 1719 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1720 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1721 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_coal_bds); 1722 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_coal_bds); 1723 if (!BGE_IS_5705_PLUS(sc)) { 1724 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 1725 sc->bge_rx_coal_ticks_int); 1726 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 1727 sc->bge_tx_coal_ticks_int); 1728 } 1729 /* 1730 * NOTE: 1731 * The datasheet (57XX-PG105-R) says BCM5705+ do not 1732 * have following two registers; obviously it is wrong. 1733 */ 1734 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, sc->bge_rx_coal_bds_int); 1735 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, sc->bge_tx_coal_bds_int); 1736 1737 /* Set up address of statistics block */ 1738 if (!BGE_IS_5705_PLUS(sc)) { 1739 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 1740 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); 1741 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, 1742 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); 1743 1744 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1745 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1746 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1747 } 1748 1749 /* Set up address of status block */ 1750 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ); 1751 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 1752 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); 1753 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1754 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); 1755 1756 /* 1757 * Set up status block partail update size. 1758 * 1759 * Because only single TX ring, RX produce ring and Rx return ring 1760 * are used, ask device to update only minimum part of status block 1761 * except for BCM5700 AX/BX, whose status block partial update size 1762 * can't be configured. 1763 */ 1764 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 1765 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) { 1766 /* XXX Actually reserved on BCM5700 AX/BX */ 1767 val = BGE_STATBLKSZ_FULL; 1768 } else { 1769 val = BGE_STATBLKSZ_32BYTE; 1770 } 1771 #if 0 1772 /* 1773 * Does not seem to have visible effect in both 1774 * bulk data (1472B UDP datagram) and tiny data 1775 * (18B UDP datagram) TX tests. 1776 */ 1777 if (!BGE_IS_CRIPPLED(sc)) 1778 val |= BGE_HCCMODE_CLRTICK_TX; 1779 #endif 1780 1781 /* Turn on host coalescing state machine */ 1782 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE); 1783 1784 /* Turn on RX BD completion state machine and enable attentions */ 1785 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1786 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1787 1788 /* Turn on RX list placement state machine */ 1789 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1790 1791 /* Turn on RX list selector state machine. */ 1792 if (!BGE_IS_5705_PLUS(sc)) 1793 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1794 1795 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | 1796 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | 1797 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | 1798 BGE_MACMODE_FRMHDR_DMA_ENB; 1799 1800 if (sc->bge_flags & BGE_FLAG_TBI) 1801 val |= BGE_PORTMODE_TBI; 1802 else if (sc->bge_flags & BGE_FLAG_MII_SERDES) 1803 val |= BGE_PORTMODE_GMII; 1804 else 1805 val |= BGE_PORTMODE_MII; 1806 1807 /* Turn on DMA, clear stats */ 1808 CSR_WRITE_4(sc, BGE_MAC_MODE, val); 1809 1810 /* Set misc. local control, enable interrupts on attentions */ 1811 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1812 1813 #ifdef notdef 1814 /* Assert GPIO pins for PHY reset */ 1815 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1816 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1817 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1818 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1819 #endif 1820 1821 /* Turn on DMA completion state machine */ 1822 if (!BGE_IS_5705_PLUS(sc)) 1823 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1824 1825 /* Turn on write DMA state machine */ 1826 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS; 1827 if (BGE_IS_5755_PLUS(sc)) { 1828 /* Enable host coalescing bug fix. */ 1829 val |= BGE_WDMAMODE_STATUS_TAG_FIX; 1830 } 1831 if (sc->bge_asicrev == BGE_ASICREV_BCM5785) { 1832 /* Request larger DMA burst size to get better performance. */ 1833 val |= BGE_WDMAMODE_BURST_ALL_DATA; 1834 } 1835 CSR_WRITE_4(sc, BGE_WDMA_MODE, val); 1836 DELAY(40); 1837 1838 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 || 1839 sc->bge_asicrev == BGE_ASICREV_BCM5784 || 1840 sc->bge_asicrev == BGE_ASICREV_BCM5785 || 1841 sc->bge_asicrev == BGE_ASICREV_BCM57780) { 1842 /* 1843 * Enable fix for read DMA FIFO overruns. 1844 * The fix is to limit the number of RX BDs 1845 * the hardware would fetch at a fime. 1846 */ 1847 val = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL); 1848 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL, 1849 val| BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 1850 } 1851 1852 /* Turn on read DMA state machine */ 1853 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 1854 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 || 1855 sc->bge_asicrev == BGE_ASICREV_BCM5785 || 1856 sc->bge_asicrev == BGE_ASICREV_BCM57780) 1857 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | 1858 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | 1859 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; 1860 if (sc->bge_flags & BGE_FLAG_PCIE) 1861 val |= BGE_RDMAMODE_FIFO_LONG_BURST; 1862 CSR_WRITE_4(sc, BGE_RDMA_MODE, val); 1863 DELAY(40); 1864 1865 /* Turn on RX data completion state machine */ 1866 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1867 1868 /* Turn on RX BD initiator state machine */ 1869 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1870 1871 /* Turn on RX data and RX BD initiator state machine */ 1872 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1873 1874 /* Turn on Mbuf cluster free state machine */ 1875 if (!BGE_IS_5705_PLUS(sc)) 1876 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1877 1878 /* Turn on send BD completion state machine */ 1879 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1880 1881 /* Turn on send data completion state machine */ 1882 val = BGE_SDCMODE_ENABLE; 1883 if (sc->bge_asicrev == BGE_ASICREV_BCM5761) 1884 val |= BGE_SDCMODE_CDELAY; 1885 CSR_WRITE_4(sc, BGE_SDC_MODE, val); 1886 1887 /* Turn on send data initiator state machine */ 1888 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1889 1890 /* Turn on send BD initiator state machine */ 1891 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1892 1893 /* Turn on send BD selector state machine */ 1894 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1895 1896 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1897 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1898 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1899 1900 /* ack/clear link change events */ 1901 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1902 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1903 BGE_MACSTAT_LINK_CHANGED); 1904 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1905 1906 /* 1907 * Enable attention when the link has changed state for 1908 * devices that use auto polling. 1909 */ 1910 if (sc->bge_flags & BGE_FLAG_TBI) { 1911 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1912 } else { 1913 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) { 1914 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode); 1915 DELAY(80); 1916 } 1917 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 1918 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) { 1919 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1920 BGE_EVTENB_MI_INTERRUPT); 1921 } 1922 } 1923 1924 /* 1925 * Clear any pending link state attention. 1926 * Otherwise some link state change events may be lost until attention 1927 * is cleared by bge_intr() -> bge_softc.bge_link_upd() sequence. 1928 * It's not necessary on newer BCM chips - perhaps enabling link 1929 * state change attentions implies clearing pending attention. 1930 */ 1931 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1932 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1933 BGE_MACSTAT_LINK_CHANGED); 1934 1935 /* Enable link state change attentions. */ 1936 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1937 1938 return(0); 1939 } 1940 1941 /* 1942 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1943 * against our list and return its name if we find a match. Note 1944 * that since the Broadcom controller contains VPD support, we 1945 * can get the device name string from the controller itself instead 1946 * of the compiled-in string. This is a little slow, but it guarantees 1947 * we'll always announce the right product name. 1948 */ 1949 static int 1950 bge_probe(device_t dev) 1951 { 1952 const struct bge_type *t; 1953 uint16_t product, vendor; 1954 1955 product = pci_get_device(dev); 1956 vendor = pci_get_vendor(dev); 1957 1958 for (t = bge_devs; t->bge_name != NULL; t++) { 1959 if (vendor == t->bge_vid && product == t->bge_did) 1960 break; 1961 } 1962 if (t->bge_name == NULL) 1963 return(ENXIO); 1964 1965 device_set_desc(dev, t->bge_name); 1966 return(0); 1967 } 1968 1969 static int 1970 bge_attach(device_t dev) 1971 { 1972 struct ifnet *ifp; 1973 struct bge_softc *sc; 1974 uint32_t hwcfg = 0, misccfg; 1975 int error = 0, rid, capmask; 1976 uint8_t ether_addr[ETHER_ADDR_LEN]; 1977 uint16_t product, vendor; 1978 driver_intr_t *intr_func; 1979 uintptr_t mii_priv = 0; 1980 u_int intr_flags; 1981 int msi_enable; 1982 1983 sc = device_get_softc(dev); 1984 sc->bge_dev = dev; 1985 callout_init(&sc->bge_stat_timer); 1986 lwkt_serialize_init(&sc->bge_jslot_serializer); 1987 1988 #ifndef BURN_BRIDGES 1989 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1990 uint32_t irq, mem; 1991 1992 irq = pci_read_config(dev, PCIR_INTLINE, 4); 1993 mem = pci_read_config(dev, BGE_PCI_BAR0, 4); 1994 1995 device_printf(dev, "chip is in D%d power mode " 1996 "-- setting to D0\n", pci_get_powerstate(dev)); 1997 1998 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1999 2000 pci_write_config(dev, PCIR_INTLINE, irq, 4); 2001 pci_write_config(dev, BGE_PCI_BAR0, mem, 4); 2002 } 2003 #endif /* !BURN_BRIDGE */ 2004 2005 /* 2006 * Map control/status registers. 2007 */ 2008 pci_enable_busmaster(dev); 2009 2010 rid = BGE_PCI_BAR0; 2011 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2012 RF_ACTIVE); 2013 2014 if (sc->bge_res == NULL) { 2015 device_printf(dev, "couldn't map memory\n"); 2016 return ENXIO; 2017 } 2018 2019 sc->bge_btag = rman_get_bustag(sc->bge_res); 2020 sc->bge_bhandle = rman_get_bushandle(sc->bge_res); 2021 2022 /* Save various chip information */ 2023 sc->bge_chipid = 2024 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 2025 BGE_PCIMISCCTL_ASICREV_SHIFT; 2026 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) 2027 sc->bge_chipid = pci_read_config(dev, BGE_PCI_PRODID_ASICREV, 4); 2028 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); 2029 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); 2030 2031 /* Save chipset family. */ 2032 switch (sc->bge_asicrev) { 2033 case BGE_ASICREV_BCM5755: 2034 case BGE_ASICREV_BCM5761: 2035 case BGE_ASICREV_BCM5784: 2036 case BGE_ASICREV_BCM5785: 2037 case BGE_ASICREV_BCM5787: 2038 case BGE_ASICREV_BCM57780: 2039 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS | 2040 BGE_FLAG_5705_PLUS; 2041 break; 2042 2043 case BGE_ASICREV_BCM5700: 2044 case BGE_ASICREV_BCM5701: 2045 case BGE_ASICREV_BCM5703: 2046 case BGE_ASICREV_BCM5704: 2047 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO; 2048 break; 2049 2050 case BGE_ASICREV_BCM5714_A0: 2051 case BGE_ASICREV_BCM5780: 2052 case BGE_ASICREV_BCM5714: 2053 sc->bge_flags |= BGE_FLAG_5714_FAMILY; 2054 /* Fall through */ 2055 2056 case BGE_ASICREV_BCM5750: 2057 case BGE_ASICREV_BCM5752: 2058 case BGE_ASICREV_BCM5906: 2059 sc->bge_flags |= BGE_FLAG_575X_PLUS; 2060 /* Fall through */ 2061 2062 case BGE_ASICREV_BCM5705: 2063 sc->bge_flags |= BGE_FLAG_5705_PLUS; 2064 break; 2065 } 2066 2067 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 2068 sc->bge_flags |= BGE_FLAG_NO_EEPROM; 2069 2070 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK; 2071 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 && 2072 (misccfg == BGE_MISCCFG_BOARD_ID_5788 || 2073 misccfg == BGE_MISCCFG_BOARD_ID_5788M)) 2074 sc->bge_flags |= BGE_FLAG_5788; 2075 2076 /* BCM5755 or higher and BCM5906 have short DMA bug. */ 2077 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906) 2078 sc->bge_flags |= BGE_FLAG_SHORTDMA; 2079 2080 /* 2081 * Check if this is a PCI-X or PCI Express device. 2082 */ 2083 if (BGE_IS_5705_PLUS(sc)) { 2084 if (pci_is_pcie(dev)) { 2085 sc->bge_flags |= BGE_FLAG_PCIE; 2086 sc->bge_pciecap = pci_get_pciecap_ptr(sc->bge_dev); 2087 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096); 2088 } 2089 } else { 2090 /* 2091 * Check if the device is in PCI-X Mode. 2092 * (This bit is not valid on PCI Express controllers.) 2093 */ 2094 if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) & 2095 BGE_PCISTATE_PCI_BUSMODE) == 0) { 2096 sc->bge_flags |= BGE_FLAG_PCIX; 2097 sc->bge_pcixcap = pci_get_pcixcap_ptr(sc->bge_dev); 2098 sc->bge_mbox_reorder = device_getenv_int(sc->bge_dev, 2099 "mbox_reorder", 0); 2100 } 2101 } 2102 device_printf(dev, "CHIP ID 0x%08x; " 2103 "ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n", 2104 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev, 2105 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" 2106 : ((sc->bge_flags & BGE_FLAG_PCIE) ? 2107 "PCI-E" : "PCI")); 2108 2109 /* 2110 * The 40bit DMA bug applies to the 5714/5715 controllers and is 2111 * not actually a MAC controller bug but an issue with the embedded 2112 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround. 2113 */ 2114 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX)) 2115 sc->bge_flags |= BGE_FLAG_MAXADDR_40BIT; 2116 2117 /* Identify the chips that use an CPMU. */ 2118 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 || 2119 sc->bge_asicrev == BGE_ASICREV_BCM5761 || 2120 sc->bge_asicrev == BGE_ASICREV_BCM5785 || 2121 sc->bge_asicrev == BGE_ASICREV_BCM57780) 2122 sc->bge_flags |= BGE_FLAG_CPMU; 2123 2124 /* 2125 * When using the BCM5701 in PCI-X mode, data corruption has 2126 * been observed in the first few bytes of some received packets. 2127 * Aligning the packet buffer in memory eliminates the corruption. 2128 * Unfortunately, this misaligns the packet payloads. On platforms 2129 * which do not support unaligned accesses, we will realign the 2130 * payloads by copying the received packets. 2131 */ 2132 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && 2133 (sc->bge_flags & BGE_FLAG_PCIX)) 2134 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG; 2135 2136 if (!BGE_IS_CRIPPLED(sc)) { 2137 if (device_getenv_int(dev, "status_tag", 1)) { 2138 sc->bge_flags |= BGE_FLAG_STATUS_TAG; 2139 sc->bge_pci_miscctl = BGE_PCIMISCCTL_TAGGED_STATUS; 2140 if (bootverbose) 2141 device_printf(dev, "enable status tag\n"); 2142 } 2143 } 2144 2145 /* 2146 * Set various PHY quirk flags. 2147 */ 2148 product = pci_get_device(dev); 2149 vendor = pci_get_vendor(dev); 2150 2151 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 || 2152 sc->bge_asicrev == BGE_ASICREV_BCM5701) && 2153 pci_get_subvendor(dev) == PCI_VENDOR_DELL) 2154 mii_priv |= BRGPHY_FLAG_NO_3LED; 2155 2156 capmask = MII_CAPMASK_DEFAULT; 2157 if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 && 2158 (misccfg == 0x4000 || misccfg == 0x8000)) || 2159 (sc->bge_asicrev == BGE_ASICREV_BCM5705 && 2160 vendor == PCI_VENDOR_BROADCOM && 2161 (product == PCI_PRODUCT_BROADCOM_BCM5901 || 2162 product == PCI_PRODUCT_BROADCOM_BCM5901A2 || 2163 product == PCI_PRODUCT_BROADCOM_BCM5705F)) || 2164 (vendor == PCI_VENDOR_BROADCOM && 2165 (product == PCI_PRODUCT_BROADCOM_BCM5751F || 2166 product == PCI_PRODUCT_BROADCOM_BCM5753F || 2167 product == PCI_PRODUCT_BROADCOM_BCM5787F)) || 2168 product == PCI_PRODUCT_BROADCOM_BCM57790 || 2169 sc->bge_asicrev == BGE_ASICREV_BCM5906) { 2170 /* 10/100 only */ 2171 capmask &= ~BMSR_EXTSTAT; 2172 } 2173 2174 mii_priv |= BRGPHY_FLAG_WIRESPEED; 2175 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || 2176 (sc->bge_asicrev == BGE_ASICREV_BCM5705 && 2177 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && 2178 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) || 2179 sc->bge_asicrev == BGE_ASICREV_BCM5906) 2180 mii_priv &= ~BRGPHY_FLAG_WIRESPEED; 2181 2182 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || 2183 sc->bge_chipid == BGE_CHIPID_BCM5701_B0) 2184 mii_priv |= BRGPHY_FLAG_CRC_BUG; 2185 2186 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX || 2187 sc->bge_chiprev == BGE_CHIPREV_5704_AX) 2188 mii_priv |= BRGPHY_FLAG_ADC_BUG; 2189 2190 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) 2191 mii_priv |= BRGPHY_FLAG_5704_A0; 2192 2193 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 2194 mii_priv |= BRGPHY_FLAG_5906; 2195 2196 if (BGE_IS_5705_PLUS(sc) && 2197 sc->bge_asicrev != BGE_ASICREV_BCM5906 && 2198 /* sc->bge_asicrev != BGE_ASICREV_BCM5717 && */ 2199 sc->bge_asicrev != BGE_ASICREV_BCM5785 && 2200 /* sc->bge_asicrev != BGE_ASICREV_BCM57765 && */ 2201 sc->bge_asicrev != BGE_ASICREV_BCM57780) { 2202 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || 2203 sc->bge_asicrev == BGE_ASICREV_BCM5761 || 2204 sc->bge_asicrev == BGE_ASICREV_BCM5784 || 2205 sc->bge_asicrev == BGE_ASICREV_BCM5787) { 2206 if (product != PCI_PRODUCT_BROADCOM_BCM5722 && 2207 product != PCI_PRODUCT_BROADCOM_BCM5756) 2208 mii_priv |= BRGPHY_FLAG_JITTER_BUG; 2209 if (product == PCI_PRODUCT_BROADCOM_BCM5755M) 2210 mii_priv |= BRGPHY_FLAG_ADJUST_TRIM; 2211 } else { 2212 mii_priv |= BRGPHY_FLAG_BER_BUG; 2213 } 2214 } 2215 2216 /* 2217 * Allocate interrupt 2218 */ 2219 msi_enable = bge_msi_enable; 2220 if ((sc->bge_flags & BGE_FLAG_STATUS_TAG) == 0) { 2221 /* If "tagged status" is disabled, don't enable MSI */ 2222 msi_enable = 0; 2223 } else if (msi_enable) { 2224 msi_enable = 0; /* Disable by default */ 2225 if (BGE_IS_575X_PLUS(sc)) { 2226 msi_enable = 1; 2227 /* XXX we filter all 5714 chips */ 2228 if (sc->bge_asicrev == BGE_ASICREV_BCM5714 || 2229 (sc->bge_asicrev == BGE_ASICREV_BCM5750 && 2230 (sc->bge_chiprev == BGE_CHIPREV_5750_AX || 2231 sc->bge_chiprev == BGE_CHIPREV_5750_BX))) 2232 msi_enable = 0; 2233 else if (BGE_IS_5755_PLUS(sc) || 2234 sc->bge_asicrev == BGE_ASICREV_BCM5906) 2235 sc->bge_flags |= BGE_FLAG_ONESHOT_MSI; 2236 } 2237 } 2238 if (msi_enable) { 2239 if (pci_find_extcap(dev, PCIY_MSI, &sc->bge_msicap)) { 2240 device_printf(dev, "no MSI capability\n"); 2241 msi_enable = 0; 2242 } 2243 } 2244 2245 sc->bge_irq_type = pci_alloc_1intr(dev, msi_enable, &sc->bge_irq_rid, 2246 &intr_flags); 2247 2248 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->bge_irq_rid, 2249 intr_flags); 2250 if (sc->bge_irq == NULL) { 2251 device_printf(dev, "couldn't map interrupt\n"); 2252 error = ENXIO; 2253 goto fail; 2254 } 2255 2256 if (sc->bge_irq_type == PCI_INTR_TYPE_MSI) 2257 bge_enable_msi(sc); 2258 else 2259 sc->bge_flags &= ~BGE_FLAG_ONESHOT_MSI; 2260 2261 /* Initialize if_name earlier, so if_printf could be used */ 2262 ifp = &sc->arpcom.ac_if; 2263 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2264 2265 /* Try to reset the chip. */ 2266 bge_reset(sc); 2267 2268 if (bge_chipinit(sc)) { 2269 device_printf(dev, "chip initialization failed\n"); 2270 error = ENXIO; 2271 goto fail; 2272 } 2273 2274 /* 2275 * Get station address 2276 */ 2277 error = bge_get_eaddr(sc, ether_addr); 2278 if (error) { 2279 device_printf(dev, "failed to read station address\n"); 2280 goto fail; 2281 } 2282 2283 /* 5705/5750 limits RX return ring to 512 entries. */ 2284 if (BGE_IS_5705_PLUS(sc)) 2285 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 2286 else 2287 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 2288 2289 error = bge_dma_alloc(sc); 2290 if (error) 2291 goto fail; 2292 2293 /* Set default tuneable values. */ 2294 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2295 sc->bge_rx_coal_ticks = BGE_RX_COAL_TICKS_DEF; 2296 sc->bge_tx_coal_ticks = BGE_TX_COAL_TICKS_DEF; 2297 sc->bge_rx_coal_bds = BGE_RX_COAL_BDS_DEF; 2298 sc->bge_tx_coal_bds = BGE_TX_COAL_BDS_DEF; 2299 if (sc->bge_flags & BGE_FLAG_STATUS_TAG) { 2300 sc->bge_rx_coal_ticks_int = BGE_RX_COAL_TICKS_DEF; 2301 sc->bge_tx_coal_ticks_int = BGE_TX_COAL_TICKS_DEF; 2302 sc->bge_rx_coal_bds_int = BGE_RX_COAL_BDS_DEF; 2303 sc->bge_tx_coal_bds_int = BGE_TX_COAL_BDS_DEF; 2304 } else { 2305 sc->bge_rx_coal_ticks_int = BGE_RX_COAL_TICKS_MIN; 2306 sc->bge_tx_coal_ticks_int = BGE_TX_COAL_TICKS_MIN; 2307 sc->bge_rx_coal_bds_int = BGE_RX_COAL_BDS_MIN; 2308 sc->bge_tx_coal_bds_int = BGE_TX_COAL_BDS_MIN; 2309 } 2310 2311 /* Set up ifnet structure */ 2312 ifp->if_softc = sc; 2313 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2314 ifp->if_ioctl = bge_ioctl; 2315 ifp->if_start = bge_start; 2316 #ifdef DEVICE_POLLING 2317 ifp->if_poll = bge_poll; 2318 #endif 2319 ifp->if_watchdog = bge_watchdog; 2320 ifp->if_init = bge_init; 2321 ifp->if_mtu = ETHERMTU; 2322 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 2323 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1); 2324 ifq_set_ready(&ifp->if_snd); 2325 2326 /* 2327 * 5700 B0 chips do not support checksumming correctly due 2328 * to hardware bugs. 2329 */ 2330 if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) { 2331 ifp->if_capabilities |= IFCAP_HWCSUM; 2332 ifp->if_hwassist = BGE_CSUM_FEATURES; 2333 } 2334 ifp->if_capenable = ifp->if_capabilities; 2335 2336 /* 2337 * Figure out what sort of media we have by checking the 2338 * hardware config word in the first 32k of NIC internal memory, 2339 * or fall back to examining the EEPROM if necessary. 2340 * Note: on some BCM5700 cards, this value appears to be unset. 2341 * If that's the case, we have to rely on identifying the NIC 2342 * by its PCI subsystem ID, as we do below for the SysKonnect 2343 * SK-9D41. 2344 */ 2345 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) { 2346 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2347 } else { 2348 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 2349 sizeof(hwcfg))) { 2350 device_printf(dev, "failed to read EEPROM\n"); 2351 error = ENXIO; 2352 goto fail; 2353 } 2354 hwcfg = ntohl(hwcfg); 2355 } 2356 2357 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2358 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 || 2359 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) { 2360 if (BGE_IS_5714_FAMILY(sc)) 2361 sc->bge_flags |= BGE_FLAG_MII_SERDES; 2362 else 2363 sc->bge_flags |= BGE_FLAG_TBI; 2364 } 2365 2366 /* Setup MI MODE */ 2367 if (sc->bge_flags & BGE_FLAG_CPMU) 2368 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST; 2369 else 2370 sc->bge_mi_mode = BGE_MIMODE_BASE; 2371 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705) { 2372 /* Enable auto polling for BCM570[0-5]. */ 2373 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL; 2374 } 2375 2376 /* Setup link status update stuffs */ 2377 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 2378 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) { 2379 sc->bge_link_upd = bge_bcm5700_link_upd; 2380 sc->bge_link_chg = BGE_MACSTAT_MI_INTERRUPT; 2381 } else if (sc->bge_flags & BGE_FLAG_TBI) { 2382 sc->bge_link_upd = bge_tbi_link_upd; 2383 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED; 2384 } else if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) { 2385 sc->bge_link_upd = bge_autopoll_link_upd; 2386 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED; 2387 } else { 2388 sc->bge_link_upd = bge_copper_link_upd; 2389 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED; 2390 } 2391 2392 /* 2393 * Broadcom's own driver always assumes the internal 2394 * PHY is at GMII address 1. On some chips, the PHY responds 2395 * to accesses at all addresses, which could cause us to 2396 * bogusly attach the PHY 32 times at probe type. Always 2397 * restricting the lookup to address 1 is simpler than 2398 * trying to figure out which chips revisions should be 2399 * special-cased. 2400 */ 2401 sc->bge_phyno = 1; 2402 2403 if (sc->bge_flags & BGE_FLAG_TBI) { 2404 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, 2405 bge_ifmedia_upd, bge_ifmedia_sts); 2406 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2407 ifmedia_add(&sc->bge_ifmedia, 2408 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 2409 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2410 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 2411 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 2412 } else { 2413 struct mii_probe_args mii_args; 2414 2415 mii_probe_args_init(&mii_args, bge_ifmedia_upd, bge_ifmedia_sts); 2416 mii_args.mii_probemask = 1 << sc->bge_phyno; 2417 mii_args.mii_capmask = capmask; 2418 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY; 2419 mii_args.mii_priv = mii_priv; 2420 2421 error = mii_probe(dev, &sc->bge_miibus, &mii_args); 2422 if (error) { 2423 device_printf(dev, "MII without any PHY!\n"); 2424 goto fail; 2425 } 2426 } 2427 2428 /* 2429 * Create sysctl nodes. 2430 */ 2431 sysctl_ctx_init(&sc->bge_sysctl_ctx); 2432 sc->bge_sysctl_tree = SYSCTL_ADD_NODE(&sc->bge_sysctl_ctx, 2433 SYSCTL_STATIC_CHILDREN(_hw), 2434 OID_AUTO, 2435 device_get_nameunit(dev), 2436 CTLFLAG_RD, 0, ""); 2437 if (sc->bge_sysctl_tree == NULL) { 2438 device_printf(dev, "can't add sysctl node\n"); 2439 error = ENXIO; 2440 goto fail; 2441 } 2442 2443 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx, 2444 SYSCTL_CHILDREN(sc->bge_sysctl_tree), 2445 OID_AUTO, "rx_coal_ticks", 2446 CTLTYPE_INT | CTLFLAG_RW, 2447 sc, 0, bge_sysctl_rx_coal_ticks, "I", 2448 "Receive coalescing ticks (usec)."); 2449 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx, 2450 SYSCTL_CHILDREN(sc->bge_sysctl_tree), 2451 OID_AUTO, "tx_coal_ticks", 2452 CTLTYPE_INT | CTLFLAG_RW, 2453 sc, 0, bge_sysctl_tx_coal_ticks, "I", 2454 "Transmit coalescing ticks (usec)."); 2455 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx, 2456 SYSCTL_CHILDREN(sc->bge_sysctl_tree), 2457 OID_AUTO, "rx_coal_bds", 2458 CTLTYPE_INT | CTLFLAG_RW, 2459 sc, 0, bge_sysctl_rx_coal_bds, "I", 2460 "Receive max coalesced BD count."); 2461 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx, 2462 SYSCTL_CHILDREN(sc->bge_sysctl_tree), 2463 OID_AUTO, "tx_coal_bds", 2464 CTLTYPE_INT | CTLFLAG_RW, 2465 sc, 0, bge_sysctl_tx_coal_bds, "I", 2466 "Transmit max coalesced BD count."); 2467 if (sc->bge_flags & BGE_FLAG_PCIE) { 2468 /* 2469 * A common design characteristic for many Broadcom 2470 * client controllers is that they only support a 2471 * single outstanding DMA read operation on the PCIe 2472 * bus. This means that it will take twice as long to 2473 * fetch a TX frame that is split into header and 2474 * payload buffers as it does to fetch a single, 2475 * contiguous TX frame (2 reads vs. 1 read). For these 2476 * controllers, coalescing buffers to reduce the number 2477 * of memory reads is effective way to get maximum 2478 * performance(about 940Mbps). Without collapsing TX 2479 * buffers the maximum TCP bulk transfer performance 2480 * is about 850Mbps. However forcing coalescing mbufs 2481 * consumes a lot of CPU cycles, so leave it off by 2482 * default. 2483 */ 2484 SYSCTL_ADD_INT(&sc->bge_sysctl_ctx, 2485 SYSCTL_CHILDREN(sc->bge_sysctl_tree), 2486 OID_AUTO, "force_defrag", CTLFLAG_RW, 2487 &sc->bge_force_defrag, 0, 2488 "Force defragment on TX path"); 2489 } 2490 if (sc->bge_flags & BGE_FLAG_STATUS_TAG) { 2491 if (!BGE_IS_5705_PLUS(sc)) { 2492 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx, 2493 SYSCTL_CHILDREN(sc->bge_sysctl_tree), OID_AUTO, 2494 "rx_coal_ticks_int", CTLTYPE_INT | CTLFLAG_RW, 2495 sc, 0, bge_sysctl_rx_coal_ticks_int, "I", 2496 "Receive coalescing ticks " 2497 "during interrupt (usec)."); 2498 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx, 2499 SYSCTL_CHILDREN(sc->bge_sysctl_tree), OID_AUTO, 2500 "tx_coal_ticks_int", CTLTYPE_INT | CTLFLAG_RW, 2501 sc, 0, bge_sysctl_tx_coal_ticks_int, "I", 2502 "Transmit coalescing ticks " 2503 "during interrupt (usec)."); 2504 } 2505 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx, 2506 SYSCTL_CHILDREN(sc->bge_sysctl_tree), OID_AUTO, 2507 "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW, 2508 sc, 0, bge_sysctl_rx_coal_bds_int, "I", 2509 "Receive max coalesced BD count during interrupt."); 2510 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx, 2511 SYSCTL_CHILDREN(sc->bge_sysctl_tree), OID_AUTO, 2512 "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW, 2513 sc, 0, bge_sysctl_tx_coal_bds_int, "I", 2514 "Transmit max coalesced BD count during interrupt."); 2515 } 2516 2517 /* 2518 * Call MI attach routine. 2519 */ 2520 ether_ifattach(ifp, ether_addr, NULL); 2521 2522 if (sc->bge_irq_type == PCI_INTR_TYPE_MSI) { 2523 if (sc->bge_flags & BGE_FLAG_ONESHOT_MSI) { 2524 intr_func = bge_msi_oneshot; 2525 if (bootverbose) 2526 device_printf(dev, "oneshot MSI\n"); 2527 } else { 2528 intr_func = bge_msi; 2529 } 2530 } else if (sc->bge_flags & BGE_FLAG_STATUS_TAG) { 2531 intr_func = bge_intr_legacy; 2532 } else { 2533 intr_func = bge_intr_crippled; 2534 } 2535 error = bus_setup_intr(dev, sc->bge_irq, INTR_MPSAFE, intr_func, sc, 2536 &sc->bge_intrhand, ifp->if_serializer); 2537 if (error) { 2538 ether_ifdetach(ifp); 2539 device_printf(dev, "couldn't set up irq\n"); 2540 goto fail; 2541 } 2542 2543 ifp->if_cpuid = rman_get_cpuid(sc->bge_irq); 2544 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 2545 2546 return(0); 2547 fail: 2548 bge_detach(dev); 2549 return(error); 2550 } 2551 2552 static int 2553 bge_detach(device_t dev) 2554 { 2555 struct bge_softc *sc = device_get_softc(dev); 2556 2557 if (device_is_attached(dev)) { 2558 struct ifnet *ifp = &sc->arpcom.ac_if; 2559 2560 lwkt_serialize_enter(ifp->if_serializer); 2561 bge_stop(sc); 2562 bge_reset(sc); 2563 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); 2564 lwkt_serialize_exit(ifp->if_serializer); 2565 2566 ether_ifdetach(ifp); 2567 } 2568 2569 if (sc->bge_flags & BGE_FLAG_TBI) 2570 ifmedia_removeall(&sc->bge_ifmedia); 2571 if (sc->bge_miibus) 2572 device_delete_child(dev, sc->bge_miibus); 2573 bus_generic_detach(dev); 2574 2575 if (sc->bge_irq != NULL) { 2576 bus_release_resource(dev, SYS_RES_IRQ, sc->bge_irq_rid, 2577 sc->bge_irq); 2578 } 2579 if (sc->bge_irq_type == PCI_INTR_TYPE_MSI) 2580 pci_release_msi(dev); 2581 2582 if (sc->bge_res != NULL) { 2583 bus_release_resource(dev, SYS_RES_MEMORY, 2584 BGE_PCI_BAR0, sc->bge_res); 2585 } 2586 2587 if (sc->bge_sysctl_tree != NULL) 2588 sysctl_ctx_free(&sc->bge_sysctl_ctx); 2589 2590 bge_dma_free(sc); 2591 2592 return 0; 2593 } 2594 2595 static void 2596 bge_reset(struct bge_softc *sc) 2597 { 2598 device_t dev; 2599 uint32_t cachesize, command, pcistate, reset; 2600 void (*write_op)(struct bge_softc *, uint32_t, uint32_t); 2601 int i, val = 0; 2602 2603 dev = sc->bge_dev; 2604 2605 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) && 2606 sc->bge_asicrev != BGE_ASICREV_BCM5906) { 2607 if (sc->bge_flags & BGE_FLAG_PCIE) 2608 write_op = bge_writemem_direct; 2609 else 2610 write_op = bge_writemem_ind; 2611 } else { 2612 write_op = bge_writereg_ind; 2613 } 2614 2615 /* Save some important PCI state. */ 2616 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 2617 command = pci_read_config(dev, BGE_PCI_CMD, 4); 2618 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 2619 2620 pci_write_config(dev, BGE_PCI_MISC_CTL, 2621 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2622 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW| 2623 sc->bge_pci_miscctl, 4); 2624 2625 /* Disable fastboot on controllers that support it. */ 2626 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 || 2627 BGE_IS_5755_PLUS(sc)) { 2628 if (bootverbose) 2629 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n"); 2630 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0); 2631 } 2632 2633 /* 2634 * Write the magic number to SRAM at offset 0xB50. 2635 * When firmware finishes its initialization it will 2636 * write ~BGE_MAGIC_NUMBER to the same location. 2637 */ 2638 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2639 2640 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1); 2641 2642 /* XXX: Broadcom Linux driver. */ 2643 if (sc->bge_flags & BGE_FLAG_PCIE) { 2644 /* Force PCI-E 1.0a mode */ 2645 if (sc->bge_asicrev != BGE_ASICREV_BCM5785 && 2646 CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) == 2647 (BGE_PCIE_PHY_TSTCTL_PSCRAM | 2648 BGE_PCIE_PHY_TSTCTL_PCIE10)) { 2649 CSR_WRITE_4(sc, BGE_PCIE_PHY_TSTCTL, 2650 BGE_PCIE_PHY_TSTCTL_PSCRAM); 2651 } 2652 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2653 /* Prevent PCIE link training during global reset */ 2654 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29)); 2655 reset |= (1<<29); 2656 } 2657 } 2658 2659 /* 2660 * Set GPHY Power Down Override to leave GPHY 2661 * powered up in D0 uninitialized. 2662 */ 2663 if (BGE_IS_5705_PLUS(sc) && (sc->bge_flags & BGE_FLAG_CPMU) == 0) 2664 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE; 2665 2666 /* Issue global reset */ 2667 write_op(sc, BGE_MISC_CFG, reset); 2668 2669 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 2670 uint32_t status, ctrl; 2671 2672 status = CSR_READ_4(sc, BGE_VCPU_STATUS); 2673 CSR_WRITE_4(sc, BGE_VCPU_STATUS, 2674 status | BGE_VCPU_STATUS_DRV_RESET); 2675 ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); 2676 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, 2677 ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU); 2678 } 2679 2680 DELAY(1000); 2681 2682 /* XXX: Broadcom Linux driver. */ 2683 if (sc->bge_flags & BGE_FLAG_PCIE) { 2684 uint16_t devctl; 2685 2686 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 2687 uint32_t v; 2688 2689 DELAY(500000); /* wait for link training to complete */ 2690 v = pci_read_config(dev, 0xc4, 4); 2691 pci_write_config(dev, 0xc4, v | (1<<15), 4); 2692 } 2693 2694 devctl = pci_read_config(dev, 2695 sc->bge_pciecap + PCIER_DEVCTRL, 2); 2696 2697 /* Disable no snoop and disable relaxed ordering. */ 2698 devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP); 2699 2700 /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */ 2701 if ((sc->bge_flags & BGE_FLAG_CPMU) == 0) { 2702 devctl &= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK; 2703 devctl |= PCIEM_DEVCTL_MAX_PAYLOAD_128; 2704 } 2705 2706 pci_write_config(dev, sc->bge_pciecap + PCIER_DEVCTRL, 2707 devctl, 2); 2708 2709 /* Clear error status. */ 2710 pci_write_config(dev, sc->bge_pciecap + PCIER_DEVSTS, 2711 PCIEM_DEVSTS_CORR_ERR | 2712 PCIEM_DEVSTS_NFATAL_ERR | 2713 PCIEM_DEVSTS_FATAL_ERR | 2714 PCIEM_DEVSTS_UNSUPP_REQ, 2); 2715 } 2716 2717 /* Reset some of the PCI state that got zapped by reset */ 2718 pci_write_config(dev, BGE_PCI_MISC_CTL, 2719 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2720 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW| 2721 sc->bge_pci_miscctl, 4); 2722 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 2723 pci_write_config(dev, BGE_PCI_CMD, command, 4); 2724 write_op(sc, BGE_MISC_CFG, (65 << 1)); 2725 2726 /* 2727 * Disable PCI-X relaxed ordering to ensure status block update 2728 * comes first then packet buffer DMA. Otherwise driver may 2729 * read stale status block. 2730 */ 2731 if (sc->bge_flags & BGE_FLAG_PCIX) { 2732 uint16_t devctl; 2733 2734 devctl = pci_read_config(dev, 2735 sc->bge_pcixcap + PCIXR_COMMAND, 2); 2736 devctl &= ~PCIXM_COMMAND_ERO; 2737 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) { 2738 devctl &= ~PCIXM_COMMAND_MAX_READ; 2739 devctl |= PCIXM_COMMAND_MAX_READ_2048; 2740 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { 2741 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS | 2742 PCIXM_COMMAND_MAX_READ); 2743 devctl |= PCIXM_COMMAND_MAX_READ_2048; 2744 } 2745 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND, 2746 devctl, 2); 2747 } 2748 2749 /* 2750 * Enable memory arbiter and re-enable MSI if necessary. 2751 */ 2752 if (BGE_IS_5714_FAMILY(sc)) { 2753 uint32_t val; 2754 2755 if (sc->bge_irq_type == PCI_INTR_TYPE_MSI) { 2756 /* 2757 * Resetting BCM5714 family will clear MSI 2758 * enable bit; restore it after resetting. 2759 */ 2760 PCI_SETBIT(sc->bge_dev, sc->bge_msicap + PCIR_MSI_CTRL, 2761 PCIM_MSICTRL_MSI_ENABLE, 2); 2762 BGE_SETBIT(sc, BGE_MSI_MODE, BGE_MSIMODE_ENABLE); 2763 } 2764 val = CSR_READ_4(sc, BGE_MARB_MODE); 2765 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); 2766 } else { 2767 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2768 } 2769 2770 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 2771 for (i = 0; i < BGE_TIMEOUT; i++) { 2772 val = CSR_READ_4(sc, BGE_VCPU_STATUS); 2773 if (val & BGE_VCPU_STATUS_INIT_DONE) 2774 break; 2775 DELAY(100); 2776 } 2777 if (i == BGE_TIMEOUT) { 2778 if_printf(&sc->arpcom.ac_if, "reset timed out\n"); 2779 return; 2780 } 2781 } else { 2782 /* 2783 * Poll until we see the 1's complement of the magic number. 2784 * This indicates that the firmware initialization 2785 * is complete. 2786 */ 2787 for (i = 0; i < BGE_FIRMWARE_TIMEOUT; i++) { 2788 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2789 if (val == ~BGE_MAGIC_NUMBER) 2790 break; 2791 DELAY(10); 2792 } 2793 if (i == BGE_FIRMWARE_TIMEOUT) { 2794 if_printf(&sc->arpcom.ac_if, "firmware handshake " 2795 "timed out, found 0x%08x\n", val); 2796 return; 2797 } 2798 } 2799 2800 /* 2801 * XXX Wait for the value of the PCISTATE register to 2802 * return to its original pre-reset state. This is a 2803 * fairly good indicator of reset completion. If we don't 2804 * wait for the reset to fully complete, trying to read 2805 * from the device's non-PCI registers may yield garbage 2806 * results. 2807 */ 2808 for (i = 0; i < BGE_TIMEOUT; i++) { 2809 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) 2810 break; 2811 DELAY(10); 2812 } 2813 2814 /* Fix up byte swapping */ 2815 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS | 2816 BGE_MODECTL_BYTESWAP_DATA); 2817 2818 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2819 2820 /* 2821 * The 5704 in TBI mode apparently needs some special 2822 * adjustment to insure the SERDES drive level is set 2823 * to 1.2V. 2824 */ 2825 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && 2826 (sc->bge_flags & BGE_FLAG_TBI)) { 2827 uint32_t serdescfg; 2828 2829 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 2830 serdescfg = (serdescfg & ~0xFFF) | 0x880; 2831 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 2832 } 2833 2834 /* XXX: Broadcom Linux driver. */ 2835 if ((sc->bge_flags & BGE_FLAG_PCIE) && 2836 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 && 2837 sc->bge_asicrev != BGE_ASICREV_BCM5785) { 2838 uint32_t v; 2839 2840 /* Enable Data FIFO protection. */ 2841 v = CSR_READ_4(sc, 0x7c00); 2842 CSR_WRITE_4(sc, 0x7c00, v | (1<<25)); 2843 } 2844 2845 DELAY(10000); 2846 } 2847 2848 /* 2849 * Frame reception handling. This is called if there's a frame 2850 * on the receive return list. 2851 * 2852 * Note: we have to be able to handle two possibilities here: 2853 * 1) the frame is from the jumbo recieve ring 2854 * 2) the frame is from the standard receive ring 2855 */ 2856 2857 static void 2858 bge_rxeof(struct bge_softc *sc, uint16_t rx_prod) 2859 { 2860 struct ifnet *ifp; 2861 int stdcnt = 0, jumbocnt = 0; 2862 2863 ifp = &sc->arpcom.ac_if; 2864 2865 while (sc->bge_rx_saved_considx != rx_prod) { 2866 struct bge_rx_bd *cur_rx; 2867 uint32_t rxidx; 2868 struct mbuf *m = NULL; 2869 uint16_t vlan_tag = 0; 2870 int have_tag = 0; 2871 2872 cur_rx = 2873 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx]; 2874 2875 rxidx = cur_rx->bge_idx; 2876 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); 2877 logif(rx_pkt); 2878 2879 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 2880 have_tag = 1; 2881 vlan_tag = cur_rx->bge_vlan_tag; 2882 } 2883 2884 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 2885 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 2886 jumbocnt++; 2887 2888 if (rxidx != sc->bge_jumbo) { 2889 ifp->if_ierrors++; 2890 if_printf(ifp, "sw jumbo index(%d) " 2891 "and hw jumbo index(%d) mismatch, drop!\n", 2892 sc->bge_jumbo, rxidx); 2893 bge_setup_rxdesc_jumbo(sc, rxidx); 2894 continue; 2895 } 2896 2897 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx].bge_mbuf; 2898 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2899 ifp->if_ierrors++; 2900 bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo); 2901 continue; 2902 } 2903 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 0)) { 2904 ifp->if_ierrors++; 2905 bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo); 2906 continue; 2907 } 2908 } else { 2909 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 2910 stdcnt++; 2911 2912 if (rxidx != sc->bge_std) { 2913 ifp->if_ierrors++; 2914 if_printf(ifp, "sw std index(%d) " 2915 "and hw std index(%d) mismatch, drop!\n", 2916 sc->bge_std, rxidx); 2917 bge_setup_rxdesc_std(sc, rxidx); 2918 continue; 2919 } 2920 2921 m = sc->bge_cdata.bge_rx_std_chain[rxidx].bge_mbuf; 2922 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2923 ifp->if_ierrors++; 2924 bge_setup_rxdesc_std(sc, sc->bge_std); 2925 continue; 2926 } 2927 if (bge_newbuf_std(sc, sc->bge_std, 0)) { 2928 ifp->if_ierrors++; 2929 bge_setup_rxdesc_std(sc, sc->bge_std); 2930 continue; 2931 } 2932 } 2933 2934 ifp->if_ipackets++; 2935 #if !defined(__i386__) && !defined(__x86_64__) 2936 /* 2937 * The x86 allows unaligned accesses, but for other 2938 * platforms we must make sure the payload is aligned. 2939 */ 2940 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) { 2941 bcopy(m->m_data, m->m_data + ETHER_ALIGN, 2942 cur_rx->bge_len); 2943 m->m_data += ETHER_ALIGN; 2944 } 2945 #endif 2946 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 2947 m->m_pkthdr.rcvif = ifp; 2948 2949 if (ifp->if_capenable & IFCAP_RXCSUM) { 2950 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { 2951 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2952 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0) 2953 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2954 } 2955 if ((cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) && 2956 m->m_pkthdr.len >= BGE_MIN_FRAMELEN) { 2957 m->m_pkthdr.csum_data = 2958 cur_rx->bge_tcp_udp_csum; 2959 m->m_pkthdr.csum_flags |= 2960 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2961 } 2962 } 2963 2964 /* 2965 * If we received a packet with a vlan tag, pass it 2966 * to vlan_input() instead of ether_input(). 2967 */ 2968 if (have_tag) { 2969 m->m_flags |= M_VLANTAG; 2970 m->m_pkthdr.ether_vlantag = vlan_tag; 2971 have_tag = vlan_tag = 0; 2972 } 2973 ifp->if_input(ifp, m); 2974 } 2975 2976 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 2977 if (stdcnt) 2978 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 2979 if (jumbocnt) 2980 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 2981 } 2982 2983 static void 2984 bge_txeof(struct bge_softc *sc, uint16_t tx_cons) 2985 { 2986 struct bge_tx_bd *cur_tx = NULL; 2987 struct ifnet *ifp; 2988 2989 ifp = &sc->arpcom.ac_if; 2990 2991 /* 2992 * Go through our tx ring and free mbufs for those 2993 * frames that have been sent. 2994 */ 2995 while (sc->bge_tx_saved_considx != tx_cons) { 2996 uint32_t idx = 0; 2997 2998 idx = sc->bge_tx_saved_considx; 2999 cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; 3000 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 3001 ifp->if_opackets++; 3002 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { 3003 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, 3004 sc->bge_cdata.bge_tx_dmamap[idx]); 3005 m_freem(sc->bge_cdata.bge_tx_chain[idx]); 3006 sc->bge_cdata.bge_tx_chain[idx] = NULL; 3007 } 3008 sc->bge_txcnt--; 3009 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 3010 logif(tx_pkt); 3011 } 3012 3013 if (cur_tx != NULL && 3014 (BGE_TX_RING_CNT - sc->bge_txcnt) >= 3015 (BGE_NSEG_RSVD + BGE_NSEG_SPARE)) 3016 ifp->if_flags &= ~IFF_OACTIVE; 3017 3018 if (sc->bge_txcnt == 0) 3019 ifp->if_timer = 0; 3020 3021 if (!ifq_is_empty(&ifp->if_snd)) 3022 if_devstart(ifp); 3023 } 3024 3025 #ifdef DEVICE_POLLING 3026 3027 static void 3028 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 3029 { 3030 struct bge_softc *sc = ifp->if_softc; 3031 struct bge_status_block *sblk = sc->bge_ldata.bge_status_block; 3032 uint16_t rx_prod, tx_cons; 3033 3034 switch(cmd) { 3035 case POLL_REGISTER: 3036 bge_disable_intr(sc); 3037 break; 3038 case POLL_DEREGISTER: 3039 bge_enable_intr(sc); 3040 break; 3041 case POLL_AND_CHECK_STATUS: 3042 /* 3043 * Process link state changes. 3044 */ 3045 bge_link_poll(sc); 3046 /* Fall through */ 3047 case POLL_ONLY: 3048 if (sc->bge_flags & BGE_FLAG_STATUS_TAG) { 3049 sc->bge_status_tag = sblk->bge_status_tag; 3050 /* 3051 * Use a load fence to ensure that status_tag 3052 * is saved before rx_prod and tx_cons. 3053 */ 3054 cpu_lfence(); 3055 } 3056 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx; 3057 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx; 3058 if (ifp->if_flags & IFF_RUNNING) { 3059 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx; 3060 if (sc->bge_rx_saved_considx != rx_prod) 3061 bge_rxeof(sc, rx_prod); 3062 3063 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx; 3064 if (sc->bge_tx_saved_considx != tx_cons) 3065 bge_txeof(sc, tx_cons); 3066 } 3067 break; 3068 } 3069 } 3070 3071 #endif 3072 3073 static void 3074 bge_intr_crippled(void *xsc) 3075 { 3076 struct bge_softc *sc = xsc; 3077 struct ifnet *ifp = &sc->arpcom.ac_if; 3078 3079 logif(intr); 3080 3081 /* 3082 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't 3083 * disable interrupts by writing nonzero like we used to, since with 3084 * our current organization this just gives complications and 3085 * pessimizations for re-enabling interrupts. We used to have races 3086 * instead of the necessary complications. Disabling interrupts 3087 * would just reduce the chance of a status update while we are 3088 * running (by switching to the interrupt-mode coalescence 3089 * parameters), but this chance is already very low so it is more 3090 * efficient to get another interrupt than prevent it. 3091 * 3092 * We do the ack first to ensure another interrupt if there is a 3093 * status update after the ack. We don't check for the status 3094 * changing later because it is more efficient to get another 3095 * interrupt than prevent it, not quite as above (not checking is 3096 * a smaller optimization than not toggling the interrupt enable, 3097 * since checking doesn't involve PCI accesses and toggling require 3098 * the status check). So toggling would probably be a pessimization 3099 * even with MSI. It would only be needed for using a task queue. 3100 */ 3101 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 3102 3103 /* 3104 * Process link state changes. 3105 */ 3106 bge_link_poll(sc); 3107 3108 if (ifp->if_flags & IFF_RUNNING) { 3109 struct bge_status_block *sblk = sc->bge_ldata.bge_status_block; 3110 uint16_t rx_prod, tx_cons; 3111 3112 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx; 3113 if (sc->bge_rx_saved_considx != rx_prod) 3114 bge_rxeof(sc, rx_prod); 3115 3116 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx; 3117 if (sc->bge_tx_saved_considx != tx_cons) 3118 bge_txeof(sc, tx_cons); 3119 } 3120 3121 if (sc->bge_coal_chg) 3122 bge_coal_change(sc); 3123 } 3124 3125 static void 3126 bge_intr_legacy(void *xsc) 3127 { 3128 struct bge_softc *sc = xsc; 3129 struct bge_status_block *sblk = sc->bge_ldata.bge_status_block; 3130 3131 if (sc->bge_status_tag == sblk->bge_status_tag) { 3132 uint32_t val; 3133 3134 val = pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4); 3135 if (val & BGE_PCISTAT_INTR_NOTACT) 3136 return; 3137 } 3138 3139 /* 3140 * NOTE: 3141 * Interrupt will have to be disabled if tagged status 3142 * is used, else interrupt will always be asserted on 3143 * certain chips (at least on BCM5750 AX/BX). 3144 */ 3145 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 3146 3147 bge_intr(sc); 3148 } 3149 3150 static void 3151 bge_msi(void *xsc) 3152 { 3153 struct bge_softc *sc = xsc; 3154 3155 /* Disable interrupt first */ 3156 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 3157 bge_intr(sc); 3158 } 3159 3160 static void 3161 bge_msi_oneshot(void *xsc) 3162 { 3163 bge_intr(xsc); 3164 } 3165 3166 static void 3167 bge_intr(struct bge_softc *sc) 3168 { 3169 struct ifnet *ifp = &sc->arpcom.ac_if; 3170 struct bge_status_block *sblk = sc->bge_ldata.bge_status_block; 3171 uint16_t rx_prod, tx_cons; 3172 uint32_t status; 3173 3174 sc->bge_status_tag = sblk->bge_status_tag; 3175 /* 3176 * Use a load fence to ensure that status_tag is saved 3177 * before rx_prod, tx_cons and status. 3178 */ 3179 cpu_lfence(); 3180 3181 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx; 3182 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx; 3183 status = sblk->bge_status; 3184 3185 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bge_link_evt) 3186 bge_link_poll(sc); 3187 3188 if (ifp->if_flags & IFF_RUNNING) { 3189 if (sc->bge_rx_saved_considx != rx_prod) 3190 bge_rxeof(sc, rx_prod); 3191 3192 if (sc->bge_tx_saved_considx != tx_cons) 3193 bge_txeof(sc, tx_cons); 3194 } 3195 3196 bge_writembx(sc, BGE_MBX_IRQ0_LO, sc->bge_status_tag << 24); 3197 3198 if (sc->bge_coal_chg) 3199 bge_coal_change(sc); 3200 } 3201 3202 static void 3203 bge_tick(void *xsc) 3204 { 3205 struct bge_softc *sc = xsc; 3206 struct ifnet *ifp = &sc->arpcom.ac_if; 3207 3208 lwkt_serialize_enter(ifp->if_serializer); 3209 3210 if (BGE_IS_5705_PLUS(sc)) 3211 bge_stats_update_regs(sc); 3212 else 3213 bge_stats_update(sc); 3214 3215 if (sc->bge_flags & BGE_FLAG_TBI) { 3216 /* 3217 * Since in TBI mode auto-polling can't be used we should poll 3218 * link status manually. Here we register pending link event 3219 * and trigger interrupt. 3220 */ 3221 sc->bge_link_evt++; 3222 if (BGE_IS_CRIPPLED(sc)) 3223 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 3224 else 3225 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 3226 } else if (!sc->bge_link) { 3227 mii_tick(device_get_softc(sc->bge_miibus)); 3228 } 3229 3230 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc); 3231 3232 lwkt_serialize_exit(ifp->if_serializer); 3233 } 3234 3235 static void 3236 bge_stats_update_regs(struct bge_softc *sc) 3237 { 3238 struct ifnet *ifp = &sc->arpcom.ac_if; 3239 struct bge_mac_stats_regs stats; 3240 uint32_t *s; 3241 int i; 3242 3243 s = (uint32_t *)&stats; 3244 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) { 3245 *s = CSR_READ_4(sc, BGE_RX_STATS + i); 3246 s++; 3247 } 3248 3249 ifp->if_collisions += 3250 (stats.dot3StatsSingleCollisionFrames + 3251 stats.dot3StatsMultipleCollisionFrames + 3252 stats.dot3StatsExcessiveCollisions + 3253 stats.dot3StatsLateCollisions) - 3254 ifp->if_collisions; 3255 } 3256 3257 static void 3258 bge_stats_update(struct bge_softc *sc) 3259 { 3260 struct ifnet *ifp = &sc->arpcom.ac_if; 3261 bus_size_t stats; 3262 3263 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 3264 3265 #define READ_STAT(sc, stats, stat) \ 3266 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 3267 3268 ifp->if_collisions += 3269 (READ_STAT(sc, stats, 3270 txstats.dot3StatsSingleCollisionFrames.bge_addr_lo) + 3271 READ_STAT(sc, stats, 3272 txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo) + 3273 READ_STAT(sc, stats, 3274 txstats.dot3StatsExcessiveCollisions.bge_addr_lo) + 3275 READ_STAT(sc, stats, 3276 txstats.dot3StatsLateCollisions.bge_addr_lo)) - 3277 ifp->if_collisions; 3278 3279 #undef READ_STAT 3280 3281 #ifdef notdef 3282 ifp->if_collisions += 3283 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 3284 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 3285 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 3286 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 3287 ifp->if_collisions; 3288 #endif 3289 } 3290 3291 /* 3292 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 3293 * pointers to descriptors. 3294 */ 3295 static int 3296 bge_encap(struct bge_softc *sc, struct mbuf **m_head0, uint32_t *txidx) 3297 { 3298 struct bge_tx_bd *d = NULL; 3299 uint16_t csum_flags = 0; 3300 bus_dma_segment_t segs[BGE_NSEG_NEW]; 3301 bus_dmamap_t map; 3302 int error, maxsegs, nsegs, idx, i; 3303 struct mbuf *m_head = *m_head0, *m_new; 3304 3305 if (m_head->m_pkthdr.csum_flags) { 3306 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 3307 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3308 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 3309 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3310 if (m_head->m_flags & M_LASTFRAG) 3311 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 3312 else if (m_head->m_flags & M_FRAG) 3313 csum_flags |= BGE_TXBDFLAG_IP_FRAG; 3314 } 3315 3316 idx = *txidx; 3317 map = sc->bge_cdata.bge_tx_dmamap[idx]; 3318 3319 maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - BGE_NSEG_RSVD; 3320 KASSERT(maxsegs >= BGE_NSEG_SPARE, 3321 ("not enough segments %d", maxsegs)); 3322 3323 if (maxsegs > BGE_NSEG_NEW) 3324 maxsegs = BGE_NSEG_NEW; 3325 3326 /* 3327 * Pad outbound frame to BGE_MIN_FRAMELEN for an unusual reason. 3328 * The bge hardware will pad out Tx runts to BGE_MIN_FRAMELEN, 3329 * but when such padded frames employ the bge IP/TCP checksum 3330 * offload, the hardware checksum assist gives incorrect results 3331 * (possibly from incorporating its own padding into the UDP/TCP 3332 * checksum; who knows). If we pad such runts with zeros, the 3333 * onboard checksum comes out correct. 3334 */ 3335 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) && 3336 m_head->m_pkthdr.len < BGE_MIN_FRAMELEN) { 3337 error = m_devpad(m_head, BGE_MIN_FRAMELEN); 3338 if (error) 3339 goto back; 3340 } 3341 3342 if ((sc->bge_flags & BGE_FLAG_SHORTDMA) && m_head->m_next != NULL) { 3343 m_new = bge_defrag_shortdma(m_head); 3344 if (m_new == NULL) { 3345 error = ENOBUFS; 3346 goto back; 3347 } 3348 *m_head0 = m_head = m_new; 3349 } 3350 if (sc->bge_force_defrag && (sc->bge_flags & BGE_FLAG_PCIE) && 3351 m_head->m_next != NULL) { 3352 /* 3353 * Forcefully defragment mbuf chain to overcome hardware 3354 * limitation which only support a single outstanding 3355 * DMA read operation. If it fails, keep moving on using 3356 * the original mbuf chain. 3357 */ 3358 m_new = m_defrag(m_head, MB_DONTWAIT); 3359 if (m_new != NULL) 3360 *m_head0 = m_head = m_new; 3361 } 3362 3363 error = bus_dmamap_load_mbuf_defrag(sc->bge_cdata.bge_tx_mtag, map, 3364 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 3365 if (error) 3366 goto back; 3367 3368 m_head = *m_head0; 3369 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE); 3370 3371 for (i = 0; ; i++) { 3372 d = &sc->bge_ldata.bge_tx_ring[idx]; 3373 3374 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); 3375 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); 3376 d->bge_len = segs[i].ds_len; 3377 d->bge_flags = csum_flags; 3378 3379 if (i == nsegs - 1) 3380 break; 3381 BGE_INC(idx, BGE_TX_RING_CNT); 3382 } 3383 /* Mark the last segment as end of packet... */ 3384 d->bge_flags |= BGE_TXBDFLAG_END; 3385 3386 /* Set vlan tag to the first segment of the packet. */ 3387 d = &sc->bge_ldata.bge_tx_ring[*txidx]; 3388 if (m_head->m_flags & M_VLANTAG) { 3389 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 3390 d->bge_vlan_tag = m_head->m_pkthdr.ether_vlantag; 3391 } else { 3392 d->bge_vlan_tag = 0; 3393 } 3394 3395 /* 3396 * Insure that the map for this transmission is placed at 3397 * the array index of the last descriptor in this chain. 3398 */ 3399 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx]; 3400 sc->bge_cdata.bge_tx_dmamap[idx] = map; 3401 sc->bge_cdata.bge_tx_chain[idx] = m_head; 3402 sc->bge_txcnt += nsegs; 3403 3404 BGE_INC(idx, BGE_TX_RING_CNT); 3405 *txidx = idx; 3406 back: 3407 if (error) { 3408 m_freem(*m_head0); 3409 *m_head0 = NULL; 3410 } 3411 return error; 3412 } 3413 3414 /* 3415 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3416 * to the mbuf data regions directly in the transmit descriptors. 3417 */ 3418 static void 3419 bge_start(struct ifnet *ifp) 3420 { 3421 struct bge_softc *sc = ifp->if_softc; 3422 struct mbuf *m_head = NULL; 3423 uint32_t prodidx; 3424 int need_trans; 3425 3426 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 3427 return; 3428 3429 prodidx = sc->bge_tx_prodidx; 3430 3431 need_trans = 0; 3432 while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 3433 m_head = ifq_dequeue(&ifp->if_snd, NULL); 3434 if (m_head == NULL) 3435 break; 3436 3437 /* 3438 * XXX 3439 * The code inside the if() block is never reached since we 3440 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting 3441 * requests to checksum TCP/UDP in a fragmented packet. 3442 * 3443 * XXX 3444 * safety overkill. If this is a fragmented packet chain 3445 * with delayed TCP/UDP checksums, then only encapsulate 3446 * it if we have enough descriptors to handle the entire 3447 * chain at once. 3448 * (paranoia -- may not actually be needed) 3449 */ 3450 if ((m_head->m_flags & M_FIRSTFRAG) && 3451 (m_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA)) { 3452 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 3453 m_head->m_pkthdr.csum_data + BGE_NSEG_RSVD) { 3454 ifp->if_flags |= IFF_OACTIVE; 3455 ifq_prepend(&ifp->if_snd, m_head); 3456 break; 3457 } 3458 } 3459 3460 /* 3461 * Sanity check: avoid coming within BGE_NSEG_RSVD 3462 * descriptors of the end of the ring. Also make 3463 * sure there are BGE_NSEG_SPARE descriptors for 3464 * jumbo buffers' defragmentation. 3465 */ 3466 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 3467 (BGE_NSEG_RSVD + BGE_NSEG_SPARE)) { 3468 ifp->if_flags |= IFF_OACTIVE; 3469 ifq_prepend(&ifp->if_snd, m_head); 3470 break; 3471 } 3472 3473 /* 3474 * Pack the data into the transmit ring. If we 3475 * don't have room, set the OACTIVE flag and wait 3476 * for the NIC to drain the ring. 3477 */ 3478 if (bge_encap(sc, &m_head, &prodidx)) { 3479 ifp->if_flags |= IFF_OACTIVE; 3480 ifp->if_oerrors++; 3481 break; 3482 } 3483 need_trans = 1; 3484 3485 ETHER_BPF_MTAP(ifp, m_head); 3486 } 3487 3488 if (!need_trans) 3489 return; 3490 3491 /* Transmit */ 3492 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3493 /* 5700 b2 errata */ 3494 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 3495 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3496 3497 sc->bge_tx_prodidx = prodidx; 3498 3499 /* 3500 * Set a timeout in case the chip goes out to lunch. 3501 */ 3502 ifp->if_timer = 5; 3503 } 3504 3505 static void 3506 bge_init(void *xsc) 3507 { 3508 struct bge_softc *sc = xsc; 3509 struct ifnet *ifp = &sc->arpcom.ac_if; 3510 uint16_t *m; 3511 uint32_t mode; 3512 3513 ASSERT_SERIALIZED(ifp->if_serializer); 3514 3515 /* Cancel pending I/O and flush buffers. */ 3516 bge_stop(sc); 3517 bge_reset(sc); 3518 bge_chipinit(sc); 3519 3520 /* 3521 * Init the various state machines, ring 3522 * control blocks and firmware. 3523 */ 3524 if (bge_blockinit(sc)) { 3525 if_printf(ifp, "initialization failure\n"); 3526 bge_stop(sc); 3527 return; 3528 } 3529 3530 /* Specify MTU. */ 3531 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 3532 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN); 3533 3534 /* Load our MAC address. */ 3535 m = (uint16_t *)&sc->arpcom.ac_enaddr[0]; 3536 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 3537 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 3538 3539 /* Enable or disable promiscuous mode as needed. */ 3540 bge_setpromisc(sc); 3541 3542 /* Program multicast filter. */ 3543 bge_setmulti(sc); 3544 3545 /* Init RX ring. */ 3546 if (bge_init_rx_ring_std(sc)) { 3547 if_printf(ifp, "RX ring initialization failed\n"); 3548 bge_stop(sc); 3549 return; 3550 } 3551 3552 /* 3553 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 3554 * memory to insure that the chip has in fact read the first 3555 * entry of the ring. 3556 */ 3557 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 3558 uint32_t v, i; 3559 for (i = 0; i < 10; i++) { 3560 DELAY(20); 3561 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 3562 if (v == (MCLBYTES - ETHER_ALIGN)) 3563 break; 3564 } 3565 if (i == 10) 3566 if_printf(ifp, "5705 A0 chip failed to load RX ring\n"); 3567 } 3568 3569 /* Init jumbo RX ring. */ 3570 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) { 3571 if (bge_init_rx_ring_jumbo(sc)) { 3572 if_printf(ifp, "Jumbo RX ring initialization failed\n"); 3573 bge_stop(sc); 3574 return; 3575 } 3576 } 3577 3578 /* Init our RX return ring index */ 3579 sc->bge_rx_saved_considx = 0; 3580 3581 /* Init TX ring. */ 3582 bge_init_tx_ring(sc); 3583 3584 /* Enable TX MAC state machine lockup fix. */ 3585 mode = CSR_READ_4(sc, BGE_TX_MODE); 3586 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906) 3587 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX; 3588 /* Turn on transmitter */ 3589 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE); 3590 3591 /* Turn on receiver */ 3592 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3593 3594 /* 3595 * Set the number of good frames to receive after RX MBUF 3596 * Low Watermark has been reached. After the RX MAC receives 3597 * this number of frames, it will drop subsequent incoming 3598 * frames until the MBUF High Watermark is reached. 3599 */ 3600 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 3601 3602 if (sc->bge_irq_type == PCI_INTR_TYPE_MSI) { 3603 if (bootverbose) { 3604 if_printf(ifp, "MSI_MODE: %#x\n", 3605 CSR_READ_4(sc, BGE_MSI_MODE)); 3606 } 3607 3608 /* 3609 * XXX 3610 * Linux driver turns it on for all chips supporting MSI?! 3611 */ 3612 if (sc->bge_flags & BGE_FLAG_ONESHOT_MSI) { 3613 /* 3614 * XXX 3615 * According to 5722-PG101-R, 3616 * BGE_PCIE_TRANSACT_ONESHOT_MSI applies only to 3617 * BCM5906. 3618 */ 3619 BGE_SETBIT(sc, BGE_PCIE_TRANSACT, 3620 BGE_PCIE_TRANSACT_ONESHOT_MSI); 3621 } 3622 } 3623 3624 /* Tell firmware we're alive. */ 3625 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3626 3627 /* Enable host interrupts if polling(4) is not enabled. */ 3628 PCI_SETBIT(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4); 3629 #ifdef DEVICE_POLLING 3630 if (ifp->if_flags & IFF_POLLING) 3631 bge_disable_intr(sc); 3632 else 3633 #endif 3634 bge_enable_intr(sc); 3635 3636 bge_ifmedia_upd(ifp); 3637 3638 ifp->if_flags |= IFF_RUNNING; 3639 ifp->if_flags &= ~IFF_OACTIVE; 3640 3641 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc); 3642 } 3643 3644 /* 3645 * Set media options. 3646 */ 3647 static int 3648 bge_ifmedia_upd(struct ifnet *ifp) 3649 { 3650 struct bge_softc *sc = ifp->if_softc; 3651 3652 /* If this is a 1000baseX NIC, enable the TBI port. */ 3653 if (sc->bge_flags & BGE_FLAG_TBI) { 3654 struct ifmedia *ifm = &sc->bge_ifmedia; 3655 3656 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3657 return(EINVAL); 3658 3659 switch(IFM_SUBTYPE(ifm->ifm_media)) { 3660 case IFM_AUTO: 3661 /* 3662 * The BCM5704 ASIC appears to have a special 3663 * mechanism for programming the autoneg 3664 * advertisement registers in TBI mode. 3665 */ 3666 if (!bge_fake_autoneg && 3667 sc->bge_asicrev == BGE_ASICREV_BCM5704) { 3668 uint32_t sgdig; 3669 3670 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 3671 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 3672 sgdig |= BGE_SGDIGCFG_AUTO | 3673 BGE_SGDIGCFG_PAUSE_CAP | 3674 BGE_SGDIGCFG_ASYM_PAUSE; 3675 CSR_WRITE_4(sc, BGE_SGDIG_CFG, 3676 sgdig | BGE_SGDIGCFG_SEND); 3677 DELAY(5); 3678 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); 3679 } 3680 break; 3681 case IFM_1000_SX: 3682 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3683 BGE_CLRBIT(sc, BGE_MAC_MODE, 3684 BGE_MACMODE_HALF_DUPLEX); 3685 } else { 3686 BGE_SETBIT(sc, BGE_MAC_MODE, 3687 BGE_MACMODE_HALF_DUPLEX); 3688 } 3689 break; 3690 default: 3691 return(EINVAL); 3692 } 3693 } else { 3694 struct mii_data *mii = device_get_softc(sc->bge_miibus); 3695 3696 sc->bge_link_evt++; 3697 sc->bge_link = 0; 3698 if (mii->mii_instance) { 3699 struct mii_softc *miisc; 3700 3701 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 3702 mii_phy_reset(miisc); 3703 } 3704 mii_mediachg(mii); 3705 3706 /* 3707 * Force an interrupt so that we will call bge_link_upd 3708 * if needed and clear any pending link state attention. 3709 * Without this we are not getting any further interrupts 3710 * for link state changes and thus will not UP the link and 3711 * not be able to send in bge_start. The only way to get 3712 * things working was to receive a packet and get an RX 3713 * intr. 3714 * 3715 * bge_tick should help for fiber cards and we might not 3716 * need to do this here if BGE_FLAG_TBI is set but as 3717 * we poll for fiber anyway it should not harm. 3718 */ 3719 if (BGE_IS_CRIPPLED(sc)) 3720 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 3721 else 3722 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 3723 } 3724 return(0); 3725 } 3726 3727 /* 3728 * Report current media status. 3729 */ 3730 static void 3731 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3732 { 3733 struct bge_softc *sc = ifp->if_softc; 3734 3735 if (sc->bge_flags & BGE_FLAG_TBI) { 3736 ifmr->ifm_status = IFM_AVALID; 3737 ifmr->ifm_active = IFM_ETHER; 3738 if (CSR_READ_4(sc, BGE_MAC_STS) & 3739 BGE_MACSTAT_TBI_PCS_SYNCHED) { 3740 ifmr->ifm_status |= IFM_ACTIVE; 3741 } else { 3742 ifmr->ifm_active |= IFM_NONE; 3743 return; 3744 } 3745 3746 ifmr->ifm_active |= IFM_1000_SX; 3747 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 3748 ifmr->ifm_active |= IFM_HDX; 3749 else 3750 ifmr->ifm_active |= IFM_FDX; 3751 } else { 3752 struct mii_data *mii = device_get_softc(sc->bge_miibus); 3753 3754 mii_pollstat(mii); 3755 ifmr->ifm_active = mii->mii_media_active; 3756 ifmr->ifm_status = mii->mii_media_status; 3757 } 3758 } 3759 3760 static int 3761 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 3762 { 3763 struct bge_softc *sc = ifp->if_softc; 3764 struct ifreq *ifr = (struct ifreq *)data; 3765 int mask, error = 0; 3766 3767 ASSERT_SERIALIZED(ifp->if_serializer); 3768 3769 switch (command) { 3770 case SIOCSIFMTU: 3771 if ((!BGE_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) || 3772 (BGE_IS_JUMBO_CAPABLE(sc) && 3773 ifr->ifr_mtu > BGE_JUMBO_MTU)) { 3774 error = EINVAL; 3775 } else if (ifp->if_mtu != ifr->ifr_mtu) { 3776 ifp->if_mtu = ifr->ifr_mtu; 3777 if (ifp->if_flags & IFF_RUNNING) 3778 bge_init(sc); 3779 } 3780 break; 3781 case SIOCSIFFLAGS: 3782 if (ifp->if_flags & IFF_UP) { 3783 if (ifp->if_flags & IFF_RUNNING) { 3784 mask = ifp->if_flags ^ sc->bge_if_flags; 3785 3786 /* 3787 * If only the state of the PROMISC flag 3788 * changed, then just use the 'set promisc 3789 * mode' command instead of reinitializing 3790 * the entire NIC. Doing a full re-init 3791 * means reloading the firmware and waiting 3792 * for it to start up, which may take a 3793 * second or two. Similarly for ALLMULTI. 3794 */ 3795 if (mask & IFF_PROMISC) 3796 bge_setpromisc(sc); 3797 if (mask & IFF_ALLMULTI) 3798 bge_setmulti(sc); 3799 } else { 3800 bge_init(sc); 3801 } 3802 } else if (ifp->if_flags & IFF_RUNNING) { 3803 bge_stop(sc); 3804 } 3805 sc->bge_if_flags = ifp->if_flags; 3806 break; 3807 case SIOCADDMULTI: 3808 case SIOCDELMULTI: 3809 if (ifp->if_flags & IFF_RUNNING) 3810 bge_setmulti(sc); 3811 break; 3812 case SIOCSIFMEDIA: 3813 case SIOCGIFMEDIA: 3814 if (sc->bge_flags & BGE_FLAG_TBI) { 3815 error = ifmedia_ioctl(ifp, ifr, 3816 &sc->bge_ifmedia, command); 3817 } else { 3818 struct mii_data *mii; 3819 3820 mii = device_get_softc(sc->bge_miibus); 3821 error = ifmedia_ioctl(ifp, ifr, 3822 &mii->mii_media, command); 3823 } 3824 break; 3825 case SIOCSIFCAP: 3826 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3827 if (mask & IFCAP_HWCSUM) { 3828 ifp->if_capenable ^= (mask & IFCAP_HWCSUM); 3829 if (IFCAP_HWCSUM & ifp->if_capenable) 3830 ifp->if_hwassist = BGE_CSUM_FEATURES; 3831 else 3832 ifp->if_hwassist = 0; 3833 } 3834 break; 3835 default: 3836 error = ether_ioctl(ifp, command, data); 3837 break; 3838 } 3839 return error; 3840 } 3841 3842 static void 3843 bge_watchdog(struct ifnet *ifp) 3844 { 3845 struct bge_softc *sc = ifp->if_softc; 3846 3847 if_printf(ifp, "watchdog timeout -- resetting\n"); 3848 3849 bge_init(sc); 3850 3851 ifp->if_oerrors++; 3852 3853 if (!ifq_is_empty(&ifp->if_snd)) 3854 if_devstart(ifp); 3855 } 3856 3857 /* 3858 * Stop the adapter and free any mbufs allocated to the 3859 * RX and TX lists. 3860 */ 3861 static void 3862 bge_stop(struct bge_softc *sc) 3863 { 3864 struct ifnet *ifp = &sc->arpcom.ac_if; 3865 3866 ASSERT_SERIALIZED(ifp->if_serializer); 3867 3868 callout_stop(&sc->bge_stat_timer); 3869 3870 /* 3871 * Disable all of the receiver blocks 3872 */ 3873 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3874 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 3875 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 3876 if (BGE_IS_5700_FAMILY(sc)) 3877 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 3878 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 3879 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 3880 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 3881 3882 /* 3883 * Disable all of the transmit blocks 3884 */ 3885 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 3886 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 3887 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 3888 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 3889 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 3890 if (BGE_IS_5700_FAMILY(sc)) 3891 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 3892 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 3893 3894 /* 3895 * Shut down all of the memory managers and related 3896 * state machines. 3897 */ 3898 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 3899 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 3900 if (BGE_IS_5700_FAMILY(sc)) 3901 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 3902 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 3903 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 3904 if (!BGE_IS_5705_PLUS(sc)) { 3905 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 3906 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 3907 } 3908 3909 /* Disable host interrupts. */ 3910 bge_disable_intr(sc); 3911 3912 /* 3913 * Tell firmware we're shutting down. 3914 */ 3915 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3916 3917 /* Free the RX lists. */ 3918 bge_free_rx_ring_std(sc); 3919 3920 /* Free jumbo RX list. */ 3921 if (BGE_IS_JUMBO_CAPABLE(sc)) 3922 bge_free_rx_ring_jumbo(sc); 3923 3924 /* Free TX buffers. */ 3925 bge_free_tx_ring(sc); 3926 3927 sc->bge_status_tag = 0; 3928 sc->bge_link = 0; 3929 sc->bge_coal_chg = 0; 3930 3931 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 3932 3933 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3934 ifp->if_timer = 0; 3935 } 3936 3937 /* 3938 * Stop all chip I/O so that the kernel's probe routines don't 3939 * get confused by errant DMAs when rebooting. 3940 */ 3941 static void 3942 bge_shutdown(device_t dev) 3943 { 3944 struct bge_softc *sc = device_get_softc(dev); 3945 struct ifnet *ifp = &sc->arpcom.ac_if; 3946 3947 lwkt_serialize_enter(ifp->if_serializer); 3948 bge_stop(sc); 3949 bge_reset(sc); 3950 lwkt_serialize_exit(ifp->if_serializer); 3951 } 3952 3953 static int 3954 bge_suspend(device_t dev) 3955 { 3956 struct bge_softc *sc = device_get_softc(dev); 3957 struct ifnet *ifp = &sc->arpcom.ac_if; 3958 3959 lwkt_serialize_enter(ifp->if_serializer); 3960 bge_stop(sc); 3961 lwkt_serialize_exit(ifp->if_serializer); 3962 3963 return 0; 3964 } 3965 3966 static int 3967 bge_resume(device_t dev) 3968 { 3969 struct bge_softc *sc = device_get_softc(dev); 3970 struct ifnet *ifp = &sc->arpcom.ac_if; 3971 3972 lwkt_serialize_enter(ifp->if_serializer); 3973 3974 if (ifp->if_flags & IFF_UP) { 3975 bge_init(sc); 3976 3977 if (!ifq_is_empty(&ifp->if_snd)) 3978 if_devstart(ifp); 3979 } 3980 3981 lwkt_serialize_exit(ifp->if_serializer); 3982 3983 return 0; 3984 } 3985 3986 static void 3987 bge_setpromisc(struct bge_softc *sc) 3988 { 3989 struct ifnet *ifp = &sc->arpcom.ac_if; 3990 3991 if (ifp->if_flags & IFF_PROMISC) 3992 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3993 else 3994 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 3995 } 3996 3997 static void 3998 bge_dma_free(struct bge_softc *sc) 3999 { 4000 int i; 4001 4002 /* Destroy RX mbuf DMA stuffs. */ 4003 if (sc->bge_cdata.bge_rx_mtag != NULL) { 4004 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 4005 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, 4006 sc->bge_cdata.bge_rx_std_dmamap[i]); 4007 } 4008 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, 4009 sc->bge_cdata.bge_rx_tmpmap); 4010 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag); 4011 } 4012 4013 /* Destroy TX mbuf DMA stuffs. */ 4014 if (sc->bge_cdata.bge_tx_mtag != NULL) { 4015 for (i = 0; i < BGE_TX_RING_CNT; i++) { 4016 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag, 4017 sc->bge_cdata.bge_tx_dmamap[i]); 4018 } 4019 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag); 4020 } 4021 4022 /* Destroy standard RX ring */ 4023 bge_dma_block_free(sc->bge_cdata.bge_rx_std_ring_tag, 4024 sc->bge_cdata.bge_rx_std_ring_map, 4025 sc->bge_ldata.bge_rx_std_ring); 4026 4027 if (BGE_IS_JUMBO_CAPABLE(sc)) 4028 bge_free_jumbo_mem(sc); 4029 4030 /* Destroy RX return ring */ 4031 bge_dma_block_free(sc->bge_cdata.bge_rx_return_ring_tag, 4032 sc->bge_cdata.bge_rx_return_ring_map, 4033 sc->bge_ldata.bge_rx_return_ring); 4034 4035 /* Destroy TX ring */ 4036 bge_dma_block_free(sc->bge_cdata.bge_tx_ring_tag, 4037 sc->bge_cdata.bge_tx_ring_map, 4038 sc->bge_ldata.bge_tx_ring); 4039 4040 /* Destroy status block */ 4041 bge_dma_block_free(sc->bge_cdata.bge_status_tag, 4042 sc->bge_cdata.bge_status_map, 4043 sc->bge_ldata.bge_status_block); 4044 4045 /* Destroy statistics block */ 4046 bge_dma_block_free(sc->bge_cdata.bge_stats_tag, 4047 sc->bge_cdata.bge_stats_map, 4048 sc->bge_ldata.bge_stats); 4049 4050 /* Destroy the parent tag */ 4051 if (sc->bge_cdata.bge_parent_tag != NULL) 4052 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); 4053 } 4054 4055 static int 4056 bge_dma_alloc(struct bge_softc *sc) 4057 { 4058 struct ifnet *ifp = &sc->arpcom.ac_if; 4059 int i, error; 4060 bus_addr_t lowaddr; 4061 4062 lowaddr = BUS_SPACE_MAXADDR; 4063 if (sc->bge_flags & BGE_FLAG_MAXADDR_40BIT) 4064 lowaddr = BGE_DMA_MAXADDR_40BIT; 4065 4066 /* 4067 * Allocate the parent bus DMA tag appropriate for PCI. 4068 * 4069 * All of the NetExtreme/NetLink controllers have 4GB boundary 4070 * DMA bug. 4071 * Whenever an address crosses a multiple of the 4GB boundary 4072 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition 4073 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA 4074 * state machine will lockup and cause the device to hang. 4075 */ 4076 error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G, 4077 lowaddr, BUS_SPACE_MAXADDR, 4078 NULL, NULL, 4079 BUS_SPACE_MAXSIZE_32BIT, 0, 4080 BUS_SPACE_MAXSIZE_32BIT, 4081 0, &sc->bge_cdata.bge_parent_tag); 4082 if (error) { 4083 if_printf(ifp, "could not allocate parent dma tag\n"); 4084 return error; 4085 } 4086 4087 /* 4088 * Create DMA tag and maps for RX mbufs. 4089 */ 4090 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0, 4091 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 4092 NULL, NULL, MCLBYTES, 1, MCLBYTES, 4093 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, 4094 &sc->bge_cdata.bge_rx_mtag); 4095 if (error) { 4096 if_printf(ifp, "could not allocate RX mbuf dma tag\n"); 4097 return error; 4098 } 4099 4100 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 4101 BUS_DMA_WAITOK, &sc->bge_cdata.bge_rx_tmpmap); 4102 if (error) { 4103 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag); 4104 sc->bge_cdata.bge_rx_mtag = NULL; 4105 return error; 4106 } 4107 4108 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 4109 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 4110 BUS_DMA_WAITOK, 4111 &sc->bge_cdata.bge_rx_std_dmamap[i]); 4112 if (error) { 4113 int j; 4114 4115 for (j = 0; j < i; ++j) { 4116 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, 4117 sc->bge_cdata.bge_rx_std_dmamap[j]); 4118 } 4119 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag); 4120 sc->bge_cdata.bge_rx_mtag = NULL; 4121 4122 if_printf(ifp, "could not create DMA map for RX\n"); 4123 return error; 4124 } 4125 } 4126 4127 /* 4128 * Create DMA tag and maps for TX mbufs. 4129 */ 4130 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0, 4131 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 4132 NULL, NULL, 4133 BGE_JUMBO_FRAMELEN, BGE_NSEG_NEW, MCLBYTES, 4134 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | 4135 BUS_DMA_ONEBPAGE, 4136 &sc->bge_cdata.bge_tx_mtag); 4137 if (error) { 4138 if_printf(ifp, "could not allocate TX mbuf dma tag\n"); 4139 return error; 4140 } 4141 4142 for (i = 0; i < BGE_TX_RING_CNT; i++) { 4143 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 4144 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 4145 &sc->bge_cdata.bge_tx_dmamap[i]); 4146 if (error) { 4147 int j; 4148 4149 for (j = 0; j < i; ++j) { 4150 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag, 4151 sc->bge_cdata.bge_tx_dmamap[j]); 4152 } 4153 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag); 4154 sc->bge_cdata.bge_tx_mtag = NULL; 4155 4156 if_printf(ifp, "could not create DMA map for TX\n"); 4157 return error; 4158 } 4159 } 4160 4161 /* 4162 * Create DMA stuffs for standard RX ring. 4163 */ 4164 error = bge_dma_block_alloc(sc, BGE_STD_RX_RING_SZ, 4165 &sc->bge_cdata.bge_rx_std_ring_tag, 4166 &sc->bge_cdata.bge_rx_std_ring_map, 4167 (void *)&sc->bge_ldata.bge_rx_std_ring, 4168 &sc->bge_ldata.bge_rx_std_ring_paddr); 4169 if (error) { 4170 if_printf(ifp, "could not create std RX ring\n"); 4171 return error; 4172 } 4173 4174 /* 4175 * Create jumbo buffer pool. 4176 */ 4177 if (BGE_IS_JUMBO_CAPABLE(sc)) { 4178 error = bge_alloc_jumbo_mem(sc); 4179 if (error) { 4180 if_printf(ifp, "could not create jumbo buffer pool\n"); 4181 return error; 4182 } 4183 } 4184 4185 /* 4186 * Create DMA stuffs for RX return ring. 4187 */ 4188 error = bge_dma_block_alloc(sc, 4189 BGE_RX_RTN_RING_SZ(sc->bge_return_ring_cnt), 4190 &sc->bge_cdata.bge_rx_return_ring_tag, 4191 &sc->bge_cdata.bge_rx_return_ring_map, 4192 (void *)&sc->bge_ldata.bge_rx_return_ring, 4193 &sc->bge_ldata.bge_rx_return_ring_paddr); 4194 if (error) { 4195 if_printf(ifp, "could not create RX ret ring\n"); 4196 return error; 4197 } 4198 4199 /* 4200 * Create DMA stuffs for TX ring. 4201 */ 4202 error = bge_dma_block_alloc(sc, BGE_TX_RING_SZ, 4203 &sc->bge_cdata.bge_tx_ring_tag, 4204 &sc->bge_cdata.bge_tx_ring_map, 4205 (void *)&sc->bge_ldata.bge_tx_ring, 4206 &sc->bge_ldata.bge_tx_ring_paddr); 4207 if (error) { 4208 if_printf(ifp, "could not create TX ring\n"); 4209 return error; 4210 } 4211 4212 /* 4213 * Create DMA stuffs for status block. 4214 */ 4215 error = bge_dma_block_alloc(sc, BGE_STATUS_BLK_SZ, 4216 &sc->bge_cdata.bge_status_tag, 4217 &sc->bge_cdata.bge_status_map, 4218 (void *)&sc->bge_ldata.bge_status_block, 4219 &sc->bge_ldata.bge_status_block_paddr); 4220 if (error) { 4221 if_printf(ifp, "could not create status block\n"); 4222 return error; 4223 } 4224 4225 /* 4226 * Create DMA stuffs for statistics block. 4227 */ 4228 error = bge_dma_block_alloc(sc, BGE_STATS_SZ, 4229 &sc->bge_cdata.bge_stats_tag, 4230 &sc->bge_cdata.bge_stats_map, 4231 (void *)&sc->bge_ldata.bge_stats, 4232 &sc->bge_ldata.bge_stats_paddr); 4233 if (error) { 4234 if_printf(ifp, "could not create stats block\n"); 4235 return error; 4236 } 4237 return 0; 4238 } 4239 4240 static int 4241 bge_dma_block_alloc(struct bge_softc *sc, bus_size_t size, bus_dma_tag_t *tag, 4242 bus_dmamap_t *map, void **addr, bus_addr_t *paddr) 4243 { 4244 bus_dmamem_t dmem; 4245 int error; 4246 4247 error = bus_dmamem_coherent(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0, 4248 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 4249 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 4250 if (error) 4251 return error; 4252 4253 *tag = dmem.dmem_tag; 4254 *map = dmem.dmem_map; 4255 *addr = dmem.dmem_addr; 4256 *paddr = dmem.dmem_busaddr; 4257 4258 return 0; 4259 } 4260 4261 static void 4262 bge_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr) 4263 { 4264 if (tag != NULL) { 4265 bus_dmamap_unload(tag, map); 4266 bus_dmamem_free(tag, addr, map); 4267 bus_dma_tag_destroy(tag); 4268 } 4269 } 4270 4271 /* 4272 * Grrr. The link status word in the status block does 4273 * not work correctly on the BCM5700 rev AX and BX chips, 4274 * according to all available information. Hence, we have 4275 * to enable MII interrupts in order to properly obtain 4276 * async link changes. Unfortunately, this also means that 4277 * we have to read the MAC status register to detect link 4278 * changes, thereby adding an additional register access to 4279 * the interrupt handler. 4280 * 4281 * XXX: perhaps link state detection procedure used for 4282 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions. 4283 */ 4284 static void 4285 bge_bcm5700_link_upd(struct bge_softc *sc, uint32_t status __unused) 4286 { 4287 struct ifnet *ifp = &sc->arpcom.ac_if; 4288 struct mii_data *mii = device_get_softc(sc->bge_miibus); 4289 4290 mii_pollstat(mii); 4291 4292 if (!sc->bge_link && 4293 (mii->mii_media_status & IFM_ACTIVE) && 4294 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 4295 sc->bge_link++; 4296 if (bootverbose) 4297 if_printf(ifp, "link UP\n"); 4298 } else if (sc->bge_link && 4299 (!(mii->mii_media_status & IFM_ACTIVE) || 4300 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 4301 sc->bge_link = 0; 4302 if (bootverbose) 4303 if_printf(ifp, "link DOWN\n"); 4304 } 4305 4306 /* Clear the interrupt. */ 4307 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT); 4308 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); 4309 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, BRGPHY_INTRS); 4310 } 4311 4312 static void 4313 bge_tbi_link_upd(struct bge_softc *sc, uint32_t status) 4314 { 4315 struct ifnet *ifp = &sc->arpcom.ac_if; 4316 4317 #define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE) 4318 4319 /* 4320 * Sometimes PCS encoding errors are detected in 4321 * TBI mode (on fiber NICs), and for some reason 4322 * the chip will signal them as link changes. 4323 * If we get a link change event, but the 'PCS 4324 * encoding error' bit in the MAC status register 4325 * is set, don't bother doing a link check. 4326 * This avoids spurious "gigabit link up" messages 4327 * that sometimes appear on fiber NICs during 4328 * periods of heavy traffic. 4329 */ 4330 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 4331 if (!sc->bge_link) { 4332 sc->bge_link++; 4333 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { 4334 BGE_CLRBIT(sc, BGE_MAC_MODE, 4335 BGE_MACMODE_TBI_SEND_CFGS); 4336 } 4337 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 4338 4339 if (bootverbose) 4340 if_printf(ifp, "link UP\n"); 4341 4342 ifp->if_link_state = LINK_STATE_UP; 4343 if_link_state_change(ifp); 4344 } 4345 } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) { 4346 if (sc->bge_link) { 4347 sc->bge_link = 0; 4348 4349 if (bootverbose) 4350 if_printf(ifp, "link DOWN\n"); 4351 4352 ifp->if_link_state = LINK_STATE_DOWN; 4353 if_link_state_change(ifp); 4354 } 4355 } 4356 4357 #undef PCS_ENCODE_ERR 4358 4359 /* Clear the attention. */ 4360 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4361 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4362 BGE_MACSTAT_LINK_CHANGED); 4363 } 4364 4365 static void 4366 bge_copper_link_upd(struct bge_softc *sc, uint32_t status __unused) 4367 { 4368 struct ifnet *ifp = &sc->arpcom.ac_if; 4369 struct mii_data *mii = device_get_softc(sc->bge_miibus); 4370 4371 mii_pollstat(mii); 4372 bge_miibus_statchg(sc->bge_dev); 4373 4374 if (bootverbose) { 4375 if (sc->bge_link) 4376 if_printf(ifp, "link UP\n"); 4377 else 4378 if_printf(ifp, "link DOWN\n"); 4379 } 4380 4381 /* Clear the attention. */ 4382 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4383 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4384 BGE_MACSTAT_LINK_CHANGED); 4385 } 4386 4387 static void 4388 bge_autopoll_link_upd(struct bge_softc *sc, uint32_t status __unused) 4389 { 4390 struct ifnet *ifp = &sc->arpcom.ac_if; 4391 struct mii_data *mii = device_get_softc(sc->bge_miibus); 4392 4393 mii_pollstat(mii); 4394 4395 if (!sc->bge_link && 4396 (mii->mii_media_status & IFM_ACTIVE) && 4397 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 4398 sc->bge_link++; 4399 if (bootverbose) 4400 if_printf(ifp, "link UP\n"); 4401 } else if (sc->bge_link && 4402 (!(mii->mii_media_status & IFM_ACTIVE) || 4403 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 4404 sc->bge_link = 0; 4405 if (bootverbose) 4406 if_printf(ifp, "link DOWN\n"); 4407 } 4408 4409 /* Clear the attention. */ 4410 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4411 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4412 BGE_MACSTAT_LINK_CHANGED); 4413 } 4414 4415 static int 4416 bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS) 4417 { 4418 struct bge_softc *sc = arg1; 4419 4420 return bge_sysctl_coal_chg(oidp, arg1, arg2, req, 4421 &sc->bge_rx_coal_ticks, 4422 BGE_RX_COAL_TICKS_MIN, BGE_RX_COAL_TICKS_MAX, 4423 BGE_RX_COAL_TICKS_CHG); 4424 } 4425 4426 static int 4427 bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS) 4428 { 4429 struct bge_softc *sc = arg1; 4430 4431 return bge_sysctl_coal_chg(oidp, arg1, arg2, req, 4432 &sc->bge_tx_coal_ticks, 4433 BGE_TX_COAL_TICKS_MIN, BGE_TX_COAL_TICKS_MAX, 4434 BGE_TX_COAL_TICKS_CHG); 4435 } 4436 4437 static int 4438 bge_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS) 4439 { 4440 struct bge_softc *sc = arg1; 4441 4442 return bge_sysctl_coal_chg(oidp, arg1, arg2, req, 4443 &sc->bge_rx_coal_bds, 4444 BGE_RX_COAL_BDS_MIN, BGE_RX_COAL_BDS_MAX, 4445 BGE_RX_COAL_BDS_CHG); 4446 } 4447 4448 static int 4449 bge_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS) 4450 { 4451 struct bge_softc *sc = arg1; 4452 4453 return bge_sysctl_coal_chg(oidp, arg1, arg2, req, 4454 &sc->bge_tx_coal_bds, 4455 BGE_TX_COAL_BDS_MIN, BGE_TX_COAL_BDS_MAX, 4456 BGE_TX_COAL_BDS_CHG); 4457 } 4458 4459 static int 4460 bge_sysctl_rx_coal_ticks_int(SYSCTL_HANDLER_ARGS) 4461 { 4462 struct bge_softc *sc = arg1; 4463 4464 return bge_sysctl_coal_chg(oidp, arg1, arg2, req, 4465 &sc->bge_rx_coal_ticks_int, 4466 BGE_RX_COAL_TICKS_MIN, BGE_RX_COAL_TICKS_MAX, 4467 BGE_RX_COAL_TICKS_INT_CHG); 4468 } 4469 4470 static int 4471 bge_sysctl_tx_coal_ticks_int(SYSCTL_HANDLER_ARGS) 4472 { 4473 struct bge_softc *sc = arg1; 4474 4475 return bge_sysctl_coal_chg(oidp, arg1, arg2, req, 4476 &sc->bge_tx_coal_ticks_int, 4477 BGE_TX_COAL_TICKS_MIN, BGE_TX_COAL_TICKS_MAX, 4478 BGE_TX_COAL_TICKS_INT_CHG); 4479 } 4480 4481 static int 4482 bge_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS) 4483 { 4484 struct bge_softc *sc = arg1; 4485 4486 return bge_sysctl_coal_chg(oidp, arg1, arg2, req, 4487 &sc->bge_rx_coal_bds_int, 4488 BGE_RX_COAL_BDS_MIN, BGE_RX_COAL_BDS_MAX, 4489 BGE_RX_COAL_BDS_INT_CHG); 4490 } 4491 4492 static int 4493 bge_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS) 4494 { 4495 struct bge_softc *sc = arg1; 4496 4497 return bge_sysctl_coal_chg(oidp, arg1, arg2, req, 4498 &sc->bge_tx_coal_bds_int, 4499 BGE_TX_COAL_BDS_MIN, BGE_TX_COAL_BDS_MAX, 4500 BGE_TX_COAL_BDS_INT_CHG); 4501 } 4502 4503 static int 4504 bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal, 4505 int coal_min, int coal_max, uint32_t coal_chg_mask) 4506 { 4507 struct bge_softc *sc = arg1; 4508 struct ifnet *ifp = &sc->arpcom.ac_if; 4509 int error = 0, v; 4510 4511 lwkt_serialize_enter(ifp->if_serializer); 4512 4513 v = *coal; 4514 error = sysctl_handle_int(oidp, &v, 0, req); 4515 if (!error && req->newptr != NULL) { 4516 if (v < coal_min || v > coal_max) { 4517 error = EINVAL; 4518 } else { 4519 *coal = v; 4520 sc->bge_coal_chg |= coal_chg_mask; 4521 } 4522 } 4523 4524 lwkt_serialize_exit(ifp->if_serializer); 4525 return error; 4526 } 4527 4528 static void 4529 bge_coal_change(struct bge_softc *sc) 4530 { 4531 struct ifnet *ifp = &sc->arpcom.ac_if; 4532 uint32_t val; 4533 4534 ASSERT_SERIALIZED(ifp->if_serializer); 4535 4536 if (sc->bge_coal_chg & BGE_RX_COAL_TICKS_CHG) { 4537 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, 4538 sc->bge_rx_coal_ticks); 4539 DELAY(10); 4540 val = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); 4541 4542 if (bootverbose) { 4543 if_printf(ifp, "rx_coal_ticks -> %u\n", 4544 sc->bge_rx_coal_ticks); 4545 } 4546 } 4547 4548 if (sc->bge_coal_chg & BGE_TX_COAL_TICKS_CHG) { 4549 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, 4550 sc->bge_tx_coal_ticks); 4551 DELAY(10); 4552 val = CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS); 4553 4554 if (bootverbose) { 4555 if_printf(ifp, "tx_coal_ticks -> %u\n", 4556 sc->bge_tx_coal_ticks); 4557 } 4558 } 4559 4560 if (sc->bge_coal_chg & BGE_RX_COAL_BDS_CHG) { 4561 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, 4562 sc->bge_rx_coal_bds); 4563 DELAY(10); 4564 val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); 4565 4566 if (bootverbose) { 4567 if_printf(ifp, "rx_coal_bds -> %u\n", 4568 sc->bge_rx_coal_bds); 4569 } 4570 } 4571 4572 if (sc->bge_coal_chg & BGE_TX_COAL_BDS_CHG) { 4573 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, 4574 sc->bge_tx_coal_bds); 4575 DELAY(10); 4576 val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS); 4577 4578 if (bootverbose) { 4579 if_printf(ifp, "tx_max_coal_bds -> %u\n", 4580 sc->bge_tx_coal_bds); 4581 } 4582 } 4583 4584 if (sc->bge_coal_chg & BGE_RX_COAL_TICKS_INT_CHG) { 4585 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 4586 sc->bge_rx_coal_ticks_int); 4587 DELAY(10); 4588 val = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS_INT); 4589 4590 if (bootverbose) { 4591 if_printf(ifp, "rx_coal_ticks_int -> %u\n", 4592 sc->bge_rx_coal_ticks_int); 4593 } 4594 } 4595 4596 if (sc->bge_coal_chg & BGE_TX_COAL_TICKS_INT_CHG) { 4597 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 4598 sc->bge_tx_coal_ticks_int); 4599 DELAY(10); 4600 val = CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS_INT); 4601 4602 if (bootverbose) { 4603 if_printf(ifp, "tx_coal_ticks_int -> %u\n", 4604 sc->bge_tx_coal_ticks_int); 4605 } 4606 } 4607 4608 if (sc->bge_coal_chg & BGE_RX_COAL_BDS_INT_CHG) { 4609 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 4610 sc->bge_rx_coal_bds_int); 4611 DELAY(10); 4612 val = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT); 4613 4614 if (bootverbose) { 4615 if_printf(ifp, "rx_coal_bds_int -> %u\n", 4616 sc->bge_rx_coal_bds_int); 4617 } 4618 } 4619 4620 if (sc->bge_coal_chg & BGE_TX_COAL_BDS_INT_CHG) { 4621 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 4622 sc->bge_tx_coal_bds_int); 4623 DELAY(10); 4624 val = CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT); 4625 4626 if (bootverbose) { 4627 if_printf(ifp, "tx_coal_bds_int -> %u\n", 4628 sc->bge_tx_coal_bds_int); 4629 } 4630 } 4631 4632 sc->bge_coal_chg = 0; 4633 } 4634 4635 static void 4636 bge_enable_intr(struct bge_softc *sc) 4637 { 4638 struct ifnet *ifp = &sc->arpcom.ac_if; 4639 4640 lwkt_serialize_handler_enable(ifp->if_serializer); 4641 4642 /* 4643 * Enable interrupt. 4644 */ 4645 bge_writembx(sc, BGE_MBX_IRQ0_LO, sc->bge_status_tag << 24); 4646 if (sc->bge_flags & BGE_FLAG_ONESHOT_MSI) { 4647 /* XXX Linux driver */ 4648 bge_writembx(sc, BGE_MBX_IRQ0_LO, sc->bge_status_tag << 24); 4649 } 4650 4651 /* 4652 * Unmask the interrupt when we stop polling. 4653 */ 4654 PCI_CLRBIT(sc->bge_dev, BGE_PCI_MISC_CTL, 4655 BGE_PCIMISCCTL_MASK_PCI_INTR, 4); 4656 4657 /* 4658 * Trigger another interrupt, since above writing 4659 * to interrupt mailbox0 may acknowledge pending 4660 * interrupt. 4661 */ 4662 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 4663 } 4664 4665 static void 4666 bge_disable_intr(struct bge_softc *sc) 4667 { 4668 struct ifnet *ifp = &sc->arpcom.ac_if; 4669 4670 /* 4671 * Mask the interrupt when we start polling. 4672 */ 4673 PCI_SETBIT(sc->bge_dev, BGE_PCI_MISC_CTL, 4674 BGE_PCIMISCCTL_MASK_PCI_INTR, 4); 4675 4676 /* 4677 * Acknowledge possible asserted interrupt. 4678 */ 4679 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 4680 4681 lwkt_serialize_handler_disable(ifp->if_serializer); 4682 } 4683 4684 static int 4685 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[]) 4686 { 4687 uint32_t mac_addr; 4688 int ret = 1; 4689 4690 mac_addr = bge_readmem_ind(sc, 0x0c14); 4691 if ((mac_addr >> 16) == 0x484b) { 4692 ether_addr[0] = (uint8_t)(mac_addr >> 8); 4693 ether_addr[1] = (uint8_t)mac_addr; 4694 mac_addr = bge_readmem_ind(sc, 0x0c18); 4695 ether_addr[2] = (uint8_t)(mac_addr >> 24); 4696 ether_addr[3] = (uint8_t)(mac_addr >> 16); 4697 ether_addr[4] = (uint8_t)(mac_addr >> 8); 4698 ether_addr[5] = (uint8_t)mac_addr; 4699 ret = 0; 4700 } 4701 return ret; 4702 } 4703 4704 static int 4705 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[]) 4706 { 4707 int mac_offset = BGE_EE_MAC_OFFSET; 4708 4709 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 4710 mac_offset = BGE_EE_MAC_OFFSET_5906; 4711 4712 return bge_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN); 4713 } 4714 4715 static int 4716 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[]) 4717 { 4718 if (sc->bge_flags & BGE_FLAG_NO_EEPROM) 4719 return 1; 4720 4721 return bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, 4722 ETHER_ADDR_LEN); 4723 } 4724 4725 static int 4726 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[]) 4727 { 4728 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = { 4729 /* NOTE: Order is critical */ 4730 bge_get_eaddr_mem, 4731 bge_get_eaddr_nvram, 4732 bge_get_eaddr_eeprom, 4733 NULL 4734 }; 4735 const bge_eaddr_fcn_t *func; 4736 4737 for (func = bge_eaddr_funcs; *func != NULL; ++func) { 4738 if ((*func)(sc, eaddr) == 0) 4739 break; 4740 } 4741 return (*func == NULL ? ENXIO : 0); 4742 } 4743 4744 /* 4745 * NOTE: 'm' is not freed upon failure 4746 */ 4747 struct mbuf * 4748 bge_defrag_shortdma(struct mbuf *m) 4749 { 4750 struct mbuf *n; 4751 int found; 4752 4753 /* 4754 * If device receive two back-to-back send BDs with less than 4755 * or equal to 8 total bytes then the device may hang. The two 4756 * back-to-back send BDs must in the same frame for this failure 4757 * to occur. Scan mbuf chains and see whether two back-to-back 4758 * send BDs are there. If this is the case, allocate new mbuf 4759 * and copy the frame to workaround the silicon bug. 4760 */ 4761 for (n = m, found = 0; n != NULL; n = n->m_next) { 4762 if (n->m_len < 8) { 4763 found++; 4764 if (found > 1) 4765 break; 4766 continue; 4767 } 4768 found = 0; 4769 } 4770 4771 if (found > 1) 4772 n = m_defrag(m, MB_DONTWAIT); 4773 else 4774 n = m; 4775 return n; 4776 } 4777 4778 static void 4779 bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit) 4780 { 4781 int i; 4782 4783 BGE_CLRBIT(sc, reg, bit); 4784 for (i = 0; i < BGE_TIMEOUT; i++) { 4785 if ((CSR_READ_4(sc, reg) & bit) == 0) 4786 return; 4787 DELAY(100); 4788 } 4789 } 4790 4791 static void 4792 bge_link_poll(struct bge_softc *sc) 4793 { 4794 uint32_t status; 4795 4796 status = CSR_READ_4(sc, BGE_MAC_STS); 4797 if ((status & sc->bge_link_chg) || sc->bge_link_evt) { 4798 sc->bge_link_evt = 0; 4799 sc->bge_link_upd(sc, status); 4800 } 4801 } 4802 4803 static void 4804 bge_enable_msi(struct bge_softc *sc) 4805 { 4806 uint32_t msi_mode; 4807 4808 msi_mode = CSR_READ_4(sc, BGE_MSI_MODE); 4809 msi_mode |= BGE_MSIMODE_ENABLE; 4810 if (sc->bge_flags & BGE_FLAG_ONESHOT_MSI) { 4811 /* 4812 * According to all of the datasheets that are publicly 4813 * available, bit 5 of the MSI_MODE is defined to be 4814 * "MSI FIFO Underrun Attn" for BCM5755+ and BCM5906, on 4815 * which "oneshot MSI" is enabled. However, it is always 4816 * safe to clear it here. 4817 */ 4818 msi_mode &= ~BGE_MSIMODE_ONESHOT_DISABLE; 4819 } 4820 CSR_WRITE_4(sc, BGE_MSI_MODE, msi_mode); 4821 } 4822