1 /* 2 * Copyright (c) 2001 Wind River Systems 3 * Copyright (c) 1997, 1998, 1999, 2001 4 * Bill Paul <wpaul@windriver.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.39 2005/07/03 03:41:18 silby Exp $ 34 */ 35 36 /* 37 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Engineer, Wind River Systems 41 */ 42 43 /* 44 * The Broadcom BCM5700 is based on technology originally developed by 45 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 46 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 47 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 48 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 49 * frames, highly configurable RX filtering, and 16 RX and TX queues 50 * (which, along with RX filter rules, can be used for QOS applications). 51 * Other features, such as TCP segmentation, may be available as part 52 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 53 * firmware images can be stored in hardware and need not be compiled 54 * into the driver. 55 * 56 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 57 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. 58 * 59 * The BCM5701 is a single-chip solution incorporating both the BCM5700 60 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 61 * does not support external SSRAM. 62 * 63 * Broadcom also produces a variation of the BCM5700 under the "Altima" 64 * brand name, which is functionally similar but lacks PCI-X support. 65 * 66 * Without external SSRAM, you can only have at most 4 TX rings, 67 * and the use of the mini RX ring is disabled. This seems to imply 68 * that these features are simply not available on the BCM5701. As a 69 * result, this driver does not implement any support for the mini RX 70 * ring. 71 */ 72 73 #include "opt_ifpoll.h" 74 75 #include <sys/param.h> 76 #include <sys/bus.h> 77 #include <sys/endian.h> 78 #include <sys/kernel.h> 79 #include <sys/ktr.h> 80 #include <sys/interrupt.h> 81 #include <sys/mbuf.h> 82 #include <sys/malloc.h> 83 #include <sys/queue.h> 84 #include <sys/rman.h> 85 #include <sys/serialize.h> 86 #include <sys/socket.h> 87 #include <sys/sockio.h> 88 #include <sys/sysctl.h> 89 90 #include <netinet/ip.h> 91 #include <netinet/tcp.h> 92 93 #include <net/bpf.h> 94 #include <net/ethernet.h> 95 #include <net/if.h> 96 #include <net/if_arp.h> 97 #include <net/if_dl.h> 98 #include <net/if_media.h> 99 #include <net/if_poll.h> 100 #include <net/if_types.h> 101 #include <net/ifq_var.h> 102 #include <net/vlan/if_vlan_var.h> 103 #include <net/vlan/if_vlan_ether.h> 104 105 #include <dev/netif/mii_layer/mii.h> 106 #include <dev/netif/mii_layer/miivar.h> 107 #include <dev/netif/mii_layer/brgphyreg.h> 108 109 #include "pcidevs.h" 110 #include <bus/pci/pcireg.h> 111 #include <bus/pci/pcivar.h> 112 113 #include <dev/netif/bge/if_bgereg.h> 114 #include <dev/netif/bge/if_bgevar.h> 115 116 /* "device miibus" required. See GENERIC if you get errors here. */ 117 #include "miibus_if.h" 118 119 #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP) 120 121 #define BGE_RESET_SHUTDOWN 0 122 #define BGE_RESET_START 1 123 #define BGE_RESET_SUSPEND 2 124 125 static const struct bge_type { 126 uint16_t bge_vid; 127 uint16_t bge_did; 128 char *bge_name; 129 } bge_devs[] = { 130 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996, 131 "3COM 3C996 Gigabit Ethernet" }, 132 133 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700, 134 "Alteon BCM5700 Gigabit Ethernet" }, 135 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701, 136 "Alteon BCM5701 Gigabit Ethernet" }, 137 138 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000, 139 "Altima AC1000 Gigabit Ethernet" }, 140 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001, 141 "Altima AC1002 Gigabit Ethernet" }, 142 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100, 143 "Altima AC9100 Gigabit Ethernet" }, 144 145 { PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701, 146 "Apple BCM5701 Gigabit Ethernet" }, 147 148 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700, 149 "Broadcom BCM5700 Gigabit Ethernet" }, 150 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701, 151 "Broadcom BCM5701 Gigabit Ethernet" }, 152 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702, 153 "Broadcom BCM5702 Gigabit Ethernet" }, 154 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X, 155 "Broadcom BCM5702X Gigabit Ethernet" }, 156 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT, 157 "Broadcom BCM5702 Gigabit Ethernet" }, 158 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703, 159 "Broadcom BCM5703 Gigabit Ethernet" }, 160 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X, 161 "Broadcom BCM5703X Gigabit Ethernet" }, 162 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703A3, 163 "Broadcom BCM5703 Gigabit Ethernet" }, 164 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C, 165 "Broadcom BCM5704C Dual Gigabit Ethernet" }, 166 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S, 167 "Broadcom BCM5704S Dual Gigabit Ethernet" }, 168 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT, 169 "Broadcom BCM5704S Dual Gigabit Ethernet" }, 170 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705, 171 "Broadcom BCM5705 Gigabit Ethernet" }, 172 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F, 173 "Broadcom BCM5705F Gigabit Ethernet" }, 174 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K, 175 "Broadcom BCM5705K Gigabit Ethernet" }, 176 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M, 177 "Broadcom BCM5705M Gigabit Ethernet" }, 178 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT, 179 "Broadcom BCM5705M Gigabit Ethernet" }, 180 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714, 181 "Broadcom BCM5714C Gigabit Ethernet" }, 182 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S, 183 "Broadcom BCM5714S Gigabit Ethernet" }, 184 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715, 185 "Broadcom BCM5715 Gigabit Ethernet" }, 186 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S, 187 "Broadcom BCM5715S Gigabit Ethernet" }, 188 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720, 189 "Broadcom BCM5720 Gigabit Ethernet" }, 190 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721, 191 "Broadcom BCM5721 Gigabit Ethernet" }, 192 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722, 193 "Broadcom BCM5722 Gigabit Ethernet" }, 194 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5723, 195 "Broadcom BCM5723 Gigabit Ethernet" }, 196 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750, 197 "Broadcom BCM5750 Gigabit Ethernet" }, 198 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M, 199 "Broadcom BCM5750M Gigabit Ethernet" }, 200 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751, 201 "Broadcom BCM5751 Gigabit Ethernet" }, 202 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F, 203 "Broadcom BCM5751F Gigabit Ethernet" }, 204 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M, 205 "Broadcom BCM5751M Gigabit Ethernet" }, 206 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752, 207 "Broadcom BCM5752 Gigabit Ethernet" }, 208 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M, 209 "Broadcom BCM5752M Gigabit Ethernet" }, 210 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753, 211 "Broadcom BCM5753 Gigabit Ethernet" }, 212 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F, 213 "Broadcom BCM5753F Gigabit Ethernet" }, 214 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M, 215 "Broadcom BCM5753M Gigabit Ethernet" }, 216 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754, 217 "Broadcom BCM5754 Gigabit Ethernet" }, 218 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M, 219 "Broadcom BCM5754M Gigabit Ethernet" }, 220 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755, 221 "Broadcom BCM5755 Gigabit Ethernet" }, 222 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M, 223 "Broadcom BCM5755M Gigabit Ethernet" }, 224 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756, 225 "Broadcom BCM5756 Gigabit Ethernet" }, 226 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761, 227 "Broadcom BCM5761 Gigabit Ethernet" }, 228 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761E, 229 "Broadcom BCM5761E Gigabit Ethernet" }, 230 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761S, 231 "Broadcom BCM5761S Gigabit Ethernet" }, 232 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761SE, 233 "Broadcom BCM5761SE Gigabit Ethernet" }, 234 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5764, 235 "Broadcom BCM5764 Gigabit Ethernet" }, 236 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780, 237 "Broadcom BCM5780 Gigabit Ethernet" }, 238 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S, 239 "Broadcom BCM5780S Gigabit Ethernet" }, 240 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781, 241 "Broadcom BCM5781 Gigabit Ethernet" }, 242 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782, 243 "Broadcom BCM5782 Gigabit Ethernet" }, 244 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5784, 245 "Broadcom BCM5784 Gigabit Ethernet" }, 246 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785F, 247 "Broadcom BCM5785F Gigabit Ethernet" }, 248 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785G, 249 "Broadcom BCM5785G Gigabit Ethernet" }, 250 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786, 251 "Broadcom BCM5786 Gigabit Ethernet" }, 252 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787, 253 "Broadcom BCM5787 Gigabit Ethernet" }, 254 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F, 255 "Broadcom BCM5787F Gigabit Ethernet" }, 256 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M, 257 "Broadcom BCM5787M Gigabit Ethernet" }, 258 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788, 259 "Broadcom BCM5788 Gigabit Ethernet" }, 260 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789, 261 "Broadcom BCM5789 Gigabit Ethernet" }, 262 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901, 263 "Broadcom BCM5901 Fast Ethernet" }, 264 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2, 265 "Broadcom BCM5901A2 Fast Ethernet" }, 266 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M, 267 "Broadcom BCM5903M Fast Ethernet" }, 268 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906, 269 "Broadcom BCM5906 Fast Ethernet"}, 270 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906M, 271 "Broadcom BCM5906M Fast Ethernet"}, 272 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57760, 273 "Broadcom BCM57760 Gigabit Ethernet"}, 274 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57780, 275 "Broadcom BCM57780 Gigabit Ethernet"}, 276 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57788, 277 "Broadcom BCM57788 Gigabit Ethernet"}, 278 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57790, 279 "Broadcom BCM57790 Gigabit Ethernet"}, 280 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1, 281 "SysKonnect Gigabit Ethernet" }, 282 283 { 0, 0, NULL } 284 }; 285 286 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO) 287 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY) 288 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS) 289 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY) 290 #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS) 291 #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS) 292 #define BGE_IS_5788(sc) ((sc)->bge_flags & BGE_FLAG_5788) 293 294 #define BGE_IS_CRIPPLED(sc) \ 295 (BGE_IS_5788((sc)) || (sc)->bge_asicrev == BGE_ASICREV_BCM5700) 296 297 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]); 298 299 static int bge_probe(device_t); 300 static int bge_attach(device_t); 301 static int bge_detach(device_t); 302 static void bge_txeof(struct bge_softc *, uint16_t); 303 static void bge_rxeof(struct bge_softc *, uint16_t, int); 304 305 static void bge_tick(void *); 306 static void bge_stats_update(struct bge_softc *); 307 static void bge_stats_update_regs(struct bge_softc *); 308 static struct mbuf * 309 bge_defrag_shortdma(struct mbuf *); 310 static int bge_encap(struct bge_softc *, struct mbuf **, 311 uint32_t *, int *); 312 static void bge_xmit(struct bge_softc *, uint32_t); 313 static int bge_setup_tso(struct bge_softc *, struct mbuf **, 314 uint16_t *, uint16_t *); 315 316 #ifdef IFPOLL_ENABLE 317 static void bge_npoll(struct ifnet *, struct ifpoll_info *); 318 static void bge_npoll_compat(struct ifnet *, void *, int ); 319 #endif 320 static void bge_intr_crippled(void *); 321 static void bge_intr_legacy(void *); 322 static void bge_msi(void *); 323 static void bge_msi_oneshot(void *); 324 static void bge_intr(struct bge_softc *); 325 static void bge_enable_intr(struct bge_softc *); 326 static void bge_disable_intr(struct bge_softc *); 327 static void bge_start(struct ifnet *, struct ifaltq_subque *); 328 static int bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 329 static void bge_init(void *); 330 static void bge_stop(struct bge_softc *); 331 static void bge_watchdog(struct ifnet *); 332 static void bge_shutdown(device_t); 333 static int bge_suspend(device_t); 334 static int bge_resume(device_t); 335 static int bge_ifmedia_upd(struct ifnet *); 336 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 337 338 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *); 339 static int bge_read_nvram(struct bge_softc *, caddr_t, int, int); 340 341 static uint8_t bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *); 342 static int bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t); 343 344 static void bge_setmulti(struct bge_softc *); 345 static void bge_setpromisc(struct bge_softc *); 346 static void bge_enable_msi(struct bge_softc *sc); 347 348 static int bge_alloc_jumbo_mem(struct bge_softc *); 349 static void bge_free_jumbo_mem(struct bge_softc *); 350 static struct bge_jslot 351 *bge_jalloc(struct bge_softc *); 352 static void bge_jfree(void *); 353 static void bge_jref(void *); 354 static int bge_newbuf_std(struct bge_softc *, int, int); 355 static int bge_newbuf_jumbo(struct bge_softc *, int, int); 356 static void bge_setup_rxdesc_std(struct bge_softc *, int); 357 static void bge_setup_rxdesc_jumbo(struct bge_softc *, int); 358 static int bge_init_rx_ring_std(struct bge_softc *); 359 static void bge_free_rx_ring_std(struct bge_softc *); 360 static int bge_init_rx_ring_jumbo(struct bge_softc *); 361 static void bge_free_rx_ring_jumbo(struct bge_softc *); 362 static void bge_free_tx_ring(struct bge_softc *); 363 static int bge_init_tx_ring(struct bge_softc *); 364 365 static int bge_chipinit(struct bge_softc *); 366 static int bge_blockinit(struct bge_softc *); 367 static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t); 368 369 static uint32_t bge_readmem_ind(struct bge_softc *, uint32_t); 370 static void bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t); 371 #ifdef notdef 372 static uint32_t bge_readreg_ind(struct bge_softc *, uint32_t); 373 #endif 374 static void bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t); 375 static void bge_writemem_direct(struct bge_softc *, uint32_t, uint32_t); 376 static void bge_writembx(struct bge_softc *, int, int); 377 378 static int bge_miibus_readreg(device_t, int, int); 379 static int bge_miibus_writereg(device_t, int, int, int); 380 static void bge_miibus_statchg(device_t); 381 static void bge_bcm5700_link_upd(struct bge_softc *, uint32_t); 382 static void bge_tbi_link_upd(struct bge_softc *, uint32_t); 383 static void bge_copper_link_upd(struct bge_softc *, uint32_t); 384 static void bge_autopoll_link_upd(struct bge_softc *, uint32_t); 385 static void bge_link_poll(struct bge_softc *); 386 387 static void bge_reset(struct bge_softc *); 388 389 static int bge_dma_alloc(struct bge_softc *); 390 static void bge_dma_free(struct bge_softc *); 391 static int bge_dma_block_alloc(struct bge_softc *, bus_size_t, 392 bus_dma_tag_t *, bus_dmamap_t *, 393 void **, bus_addr_t *); 394 static void bge_dma_block_free(bus_dma_tag_t, bus_dmamap_t, void *); 395 396 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]); 397 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]); 398 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]); 399 static int bge_get_eaddr(struct bge_softc *, uint8_t[]); 400 401 static void bge_coal_change(struct bge_softc *); 402 static int bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS); 403 static int bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS); 404 static int bge_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS); 405 static int bge_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS); 406 static int bge_sysctl_rx_coal_ticks_int(SYSCTL_HANDLER_ARGS); 407 static int bge_sysctl_tx_coal_ticks_int(SYSCTL_HANDLER_ARGS); 408 static int bge_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS); 409 static int bge_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS); 410 static int bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *, 411 int, int, uint32_t); 412 413 static void bge_sig_post_reset(struct bge_softc *, int); 414 static void bge_sig_legacy(struct bge_softc *, int); 415 static void bge_sig_pre_reset(struct bge_softc *, int); 416 static void bge_stop_fw(struct bge_softc *); 417 static void bge_asf_driver_up(struct bge_softc *); 418 419 /* 420 * Set following tunable to 1 for some IBM blade servers with the DNLK 421 * switch module. Auto negotiation is broken for those configurations. 422 */ 423 static int bge_fake_autoneg = 0; 424 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg); 425 426 static int bge_msi_enable = 1; 427 TUNABLE_INT("hw.bge.msi.enable", &bge_msi_enable); 428 429 static int bge_allow_asf = 1; 430 TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf); 431 432 #if !defined(KTR_IF_BGE) 433 #define KTR_IF_BGE KTR_ALL 434 #endif 435 KTR_INFO_MASTER(if_bge); 436 KTR_INFO(KTR_IF_BGE, if_bge, intr, 0, "intr"); 437 KTR_INFO(KTR_IF_BGE, if_bge, rx_pkt, 1, "rx_pkt"); 438 KTR_INFO(KTR_IF_BGE, if_bge, tx_pkt, 2, "tx_pkt"); 439 #define logif(name) KTR_LOG(if_bge_ ## name) 440 441 static device_method_t bge_methods[] = { 442 /* Device interface */ 443 DEVMETHOD(device_probe, bge_probe), 444 DEVMETHOD(device_attach, bge_attach), 445 DEVMETHOD(device_detach, bge_detach), 446 DEVMETHOD(device_shutdown, bge_shutdown), 447 DEVMETHOD(device_suspend, bge_suspend), 448 DEVMETHOD(device_resume, bge_resume), 449 450 /* bus interface */ 451 DEVMETHOD(bus_print_child, bus_generic_print_child), 452 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 453 454 /* MII interface */ 455 DEVMETHOD(miibus_readreg, bge_miibus_readreg), 456 DEVMETHOD(miibus_writereg, bge_miibus_writereg), 457 DEVMETHOD(miibus_statchg, bge_miibus_statchg), 458 459 DEVMETHOD_END 460 }; 461 462 static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc)); 463 static devclass_t bge_devclass; 464 465 DECLARE_DUMMY_MODULE(if_bge); 466 DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, NULL, NULL); 467 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, NULL, NULL); 468 469 static uint32_t 470 bge_readmem_ind(struct bge_softc *sc, uint32_t off) 471 { 472 device_t dev = sc->bge_dev; 473 uint32_t val; 474 475 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && 476 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) 477 return 0; 478 479 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 480 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4); 481 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 482 return (val); 483 } 484 485 static void 486 bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val) 487 { 488 device_t dev = sc->bge_dev; 489 490 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && 491 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) 492 return; 493 494 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 495 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 496 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 497 } 498 499 #ifdef notdef 500 static uint32_t 501 bge_readreg_ind(struct bge_softc *sc, uin32_t off) 502 { 503 device_t dev = sc->bge_dev; 504 505 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 506 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4)); 507 } 508 #endif 509 510 static void 511 bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val) 512 { 513 device_t dev = sc->bge_dev; 514 515 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 516 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); 517 } 518 519 static void 520 bge_writemem_direct(struct bge_softc *sc, uint32_t off, uint32_t val) 521 { 522 CSR_WRITE_4(sc, off, val); 523 } 524 525 static void 526 bge_writembx(struct bge_softc *sc, int off, int val) 527 { 528 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 529 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; 530 531 CSR_WRITE_4(sc, off, val); 532 if (sc->bge_mbox_reorder) 533 CSR_READ_4(sc, off); 534 } 535 536 static uint8_t 537 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 538 { 539 uint32_t access, byte = 0; 540 int i; 541 542 /* Lock. */ 543 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 544 for (i = 0; i < 8000; i++) { 545 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) 546 break; 547 DELAY(20); 548 } 549 if (i == 8000) 550 return (1); 551 552 /* Enable access. */ 553 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); 554 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); 555 556 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); 557 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); 558 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 559 DELAY(10); 560 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { 561 DELAY(10); 562 break; 563 } 564 } 565 566 if (i == BGE_TIMEOUT * 10) { 567 if_printf(&sc->arpcom.ac_if, "nvram read timed out\n"); 568 return (1); 569 } 570 571 /* Get result. */ 572 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); 573 574 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; 575 576 /* Disable access. */ 577 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); 578 579 /* Unlock. */ 580 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); 581 CSR_READ_4(sc, BGE_NVRAM_SWARB); 582 583 return (0); 584 } 585 586 /* 587 * Read a sequence of bytes from NVRAM. 588 */ 589 static int 590 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt) 591 { 592 int err = 0, i; 593 uint8_t byte = 0; 594 595 if (sc->bge_asicrev != BGE_ASICREV_BCM5906) 596 return (1); 597 598 for (i = 0; i < cnt; i++) { 599 err = bge_nvram_getbyte(sc, off + i, &byte); 600 if (err) 601 break; 602 *(dest + i) = byte; 603 } 604 605 return (err ? 1 : 0); 606 } 607 608 /* 609 * Read a byte of data stored in the EEPROM at address 'addr.' The 610 * BCM570x supports both the traditional bitbang interface and an 611 * auto access interface for reading the EEPROM. We use the auto 612 * access method. 613 */ 614 static uint8_t 615 bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest) 616 { 617 int i; 618 uint32_t byte = 0; 619 620 /* 621 * Enable use of auto EEPROM access so we can avoid 622 * having to use the bitbang method. 623 */ 624 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 625 626 /* Reset the EEPROM, load the clock period. */ 627 CSR_WRITE_4(sc, BGE_EE_ADDR, 628 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 629 DELAY(20); 630 631 /* Issue the read EEPROM command. */ 632 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 633 634 /* Wait for completion */ 635 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 636 DELAY(10); 637 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 638 break; 639 } 640 641 if (i == BGE_TIMEOUT) { 642 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n"); 643 return(1); 644 } 645 646 /* Get result. */ 647 byte = CSR_READ_4(sc, BGE_EE_DATA); 648 649 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 650 651 return(0); 652 } 653 654 /* 655 * Read a sequence of bytes from the EEPROM. 656 */ 657 static int 658 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len) 659 { 660 size_t i; 661 int err; 662 uint8_t byte; 663 664 for (byte = 0, err = 0, i = 0; i < len; i++) { 665 err = bge_eeprom_getbyte(sc, off + i, &byte); 666 if (err) 667 break; 668 *(dest + i) = byte; 669 } 670 671 return(err ? 1 : 0); 672 } 673 674 static int 675 bge_miibus_readreg(device_t dev, int phy, int reg) 676 { 677 struct bge_softc *sc = device_get_softc(dev); 678 uint32_t val; 679 int i; 680 681 KASSERT(phy == sc->bge_phyno, 682 ("invalid phyno %d, should be %d", phy, sc->bge_phyno)); 683 684 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ 685 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) { 686 CSR_WRITE_4(sc, BGE_MI_MODE, 687 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL); 688 DELAY(80); 689 } 690 691 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | 692 BGE_MIPHY(phy) | BGE_MIREG(reg)); 693 694 /* Poll for the PHY register access to complete. */ 695 for (i = 0; i < BGE_TIMEOUT; i++) { 696 DELAY(10); 697 val = CSR_READ_4(sc, BGE_MI_COMM); 698 if ((val & BGE_MICOMM_BUSY) == 0) { 699 DELAY(5); 700 val = CSR_READ_4(sc, BGE_MI_COMM); 701 break; 702 } 703 } 704 if (i == BGE_TIMEOUT) { 705 if_printf(&sc->arpcom.ac_if, "PHY read timed out " 706 "(phy %d, reg %d, val 0x%08x)\n", phy, reg, val); 707 val = 0; 708 } 709 710 /* Restore the autopoll bit if necessary. */ 711 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) { 712 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode); 713 DELAY(80); 714 } 715 716 if (val & BGE_MICOMM_READFAIL) 717 return 0; 718 719 return (val & 0xFFFF); 720 } 721 722 static int 723 bge_miibus_writereg(device_t dev, int phy, int reg, int val) 724 { 725 struct bge_softc *sc = device_get_softc(dev); 726 int i; 727 728 KASSERT(phy == sc->bge_phyno, 729 ("invalid phyno %d, should be %d", phy, sc->bge_phyno)); 730 731 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && 732 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL)) 733 return 0; 734 735 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ 736 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) { 737 CSR_WRITE_4(sc, BGE_MI_MODE, 738 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL); 739 DELAY(80); 740 } 741 742 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | 743 BGE_MIPHY(phy) | BGE_MIREG(reg) | val); 744 745 for (i = 0; i < BGE_TIMEOUT; i++) { 746 DELAY(10); 747 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { 748 DELAY(5); 749 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */ 750 break; 751 } 752 } 753 if (i == BGE_TIMEOUT) { 754 if_printf(&sc->arpcom.ac_if, "PHY write timed out " 755 "(phy %d, reg %d, val %d)\n", phy, reg, val); 756 } 757 758 /* Restore the autopoll bit if necessary. */ 759 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) { 760 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode); 761 DELAY(80); 762 } 763 764 return 0; 765 } 766 767 static void 768 bge_miibus_statchg(device_t dev) 769 { 770 struct bge_softc *sc; 771 struct mii_data *mii; 772 uint32_t mac_mode; 773 774 sc = device_get_softc(dev); 775 if ((sc->arpcom.ac_if.if_flags & IFF_RUNNING) == 0) 776 return; 777 778 mii = device_get_softc(sc->bge_miibus); 779 780 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 781 (IFM_ACTIVE | IFM_AVALID)) { 782 switch (IFM_SUBTYPE(mii->mii_media_active)) { 783 case IFM_10_T: 784 case IFM_100_TX: 785 sc->bge_link = 1; 786 break; 787 case IFM_1000_T: 788 case IFM_1000_SX: 789 case IFM_2500_SX: 790 if (sc->bge_asicrev != BGE_ASICREV_BCM5906) 791 sc->bge_link = 1; 792 else 793 sc->bge_link = 0; 794 break; 795 default: 796 sc->bge_link = 0; 797 break; 798 } 799 } else { 800 sc->bge_link = 0; 801 } 802 if (sc->bge_link == 0) 803 return; 804 805 /* 806 * APE firmware touches these registers to keep the MAC 807 * connected to the outside world. Try to keep the 808 * accesses atomic. 809 */ 810 811 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & 812 ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX); 813 814 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 815 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 816 mac_mode |= BGE_PORTMODE_GMII; 817 else 818 mac_mode |= BGE_PORTMODE_MII; 819 820 if ((mii->mii_media_active & IFM_GMASK) != IFM_FDX) 821 mac_mode |= BGE_MACMODE_HALF_DUPLEX; 822 823 CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode); 824 DELAY(40); 825 } 826 827 /* 828 * Memory management for jumbo frames. 829 */ 830 static int 831 bge_alloc_jumbo_mem(struct bge_softc *sc) 832 { 833 struct ifnet *ifp = &sc->arpcom.ac_if; 834 struct bge_jslot *entry; 835 uint8_t *ptr; 836 bus_addr_t paddr; 837 int i, error; 838 839 /* 840 * Create tag for jumbo mbufs. 841 * This is really a bit of a kludge. We allocate a special 842 * jumbo buffer pool which (thanks to the way our DMA 843 * memory allocation works) will consist of contiguous 844 * pages. This means that even though a jumbo buffer might 845 * be larger than a page size, we don't really need to 846 * map it into more than one DMA segment. However, the 847 * default mbuf tag will result in multi-segment mappings, 848 * so we have to create a special jumbo mbuf tag that 849 * lets us get away with mapping the jumbo buffers as 850 * a single segment. I think eventually the driver should 851 * be changed so that it uses ordinary mbufs and cluster 852 * buffers, i.e. jumbo frames can span multiple DMA 853 * descriptors. But that's a project for another day. 854 */ 855 856 /* 857 * Create DMA stuffs for jumbo RX ring. 858 */ 859 error = bge_dma_block_alloc(sc, BGE_JUMBO_RX_RING_SZ, 860 &sc->bge_cdata.bge_rx_jumbo_ring_tag, 861 &sc->bge_cdata.bge_rx_jumbo_ring_map, 862 (void *)&sc->bge_ldata.bge_rx_jumbo_ring, 863 &sc->bge_ldata.bge_rx_jumbo_ring_paddr); 864 if (error) { 865 if_printf(ifp, "could not create jumbo RX ring\n"); 866 return error; 867 } 868 869 /* 870 * Create DMA stuffs for jumbo buffer block. 871 */ 872 error = bge_dma_block_alloc(sc, BGE_JMEM, 873 &sc->bge_cdata.bge_jumbo_tag, 874 &sc->bge_cdata.bge_jumbo_map, 875 (void **)&sc->bge_ldata.bge_jumbo_buf, 876 &paddr); 877 if (error) { 878 if_printf(ifp, "could not create jumbo buffer\n"); 879 return error; 880 } 881 882 SLIST_INIT(&sc->bge_jfree_listhead); 883 884 /* 885 * Now divide it up into 9K pieces and save the addresses 886 * in an array. Note that we play an evil trick here by using 887 * the first few bytes in the buffer to hold the the address 888 * of the softc structure for this interface. This is because 889 * bge_jfree() needs it, but it is called by the mbuf management 890 * code which will not pass it to us explicitly. 891 */ 892 for (i = 0, ptr = sc->bge_ldata.bge_jumbo_buf; i < BGE_JSLOTS; i++) { 893 entry = &sc->bge_cdata.bge_jslots[i]; 894 entry->bge_sc = sc; 895 entry->bge_buf = ptr; 896 entry->bge_paddr = paddr; 897 entry->bge_inuse = 0; 898 entry->bge_slot = i; 899 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jslot_link); 900 901 ptr += BGE_JLEN; 902 paddr += BGE_JLEN; 903 } 904 return 0; 905 } 906 907 static void 908 bge_free_jumbo_mem(struct bge_softc *sc) 909 { 910 /* Destroy jumbo RX ring. */ 911 bge_dma_block_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, 912 sc->bge_cdata.bge_rx_jumbo_ring_map, 913 sc->bge_ldata.bge_rx_jumbo_ring); 914 915 /* Destroy jumbo buffer block. */ 916 bge_dma_block_free(sc->bge_cdata.bge_jumbo_tag, 917 sc->bge_cdata.bge_jumbo_map, 918 sc->bge_ldata.bge_jumbo_buf); 919 } 920 921 /* 922 * Allocate a jumbo buffer. 923 */ 924 static struct bge_jslot * 925 bge_jalloc(struct bge_softc *sc) 926 { 927 struct bge_jslot *entry; 928 929 lwkt_serialize_enter(&sc->bge_jslot_serializer); 930 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 931 if (entry) { 932 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jslot_link); 933 entry->bge_inuse = 1; 934 } else { 935 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n"); 936 } 937 lwkt_serialize_exit(&sc->bge_jslot_serializer); 938 return(entry); 939 } 940 941 /* 942 * Adjust usage count on a jumbo buffer. 943 */ 944 static void 945 bge_jref(void *arg) 946 { 947 struct bge_jslot *entry = (struct bge_jslot *)arg; 948 struct bge_softc *sc = entry->bge_sc; 949 950 if (sc == NULL) 951 panic("bge_jref: can't find softc pointer!"); 952 953 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) { 954 panic("bge_jref: asked to reference buffer " 955 "that we don't manage!"); 956 } else if (entry->bge_inuse == 0) { 957 panic("bge_jref: buffer already free!"); 958 } else { 959 atomic_add_int(&entry->bge_inuse, 1); 960 } 961 } 962 963 /* 964 * Release a jumbo buffer. 965 */ 966 static void 967 bge_jfree(void *arg) 968 { 969 struct bge_jslot *entry = (struct bge_jslot *)arg; 970 struct bge_softc *sc = entry->bge_sc; 971 972 if (sc == NULL) 973 panic("bge_jfree: can't find softc pointer!"); 974 975 if (&sc->bge_cdata.bge_jslots[entry->bge_slot] != entry) { 976 panic("bge_jfree: asked to free buffer that we don't manage!"); 977 } else if (entry->bge_inuse == 0) { 978 panic("bge_jfree: buffer already free!"); 979 } else { 980 /* 981 * Possible MP race to 0, use the serializer. The atomic insn 982 * is still needed for races against bge_jref(). 983 */ 984 lwkt_serialize_enter(&sc->bge_jslot_serializer); 985 atomic_subtract_int(&entry->bge_inuse, 1); 986 if (entry->bge_inuse == 0) { 987 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 988 entry, jslot_link); 989 } 990 lwkt_serialize_exit(&sc->bge_jslot_serializer); 991 } 992 } 993 994 995 /* 996 * Intialize a standard receive ring descriptor. 997 */ 998 static int 999 bge_newbuf_std(struct bge_softc *sc, int i, int init) 1000 { 1001 struct mbuf *m_new = NULL; 1002 bus_dma_segment_t seg; 1003 bus_dmamap_t map; 1004 int error, nsegs; 1005 1006 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 1007 if (m_new == NULL) 1008 return ENOBUFS; 1009 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1010 1011 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) 1012 m_adj(m_new, ETHER_ALIGN); 1013 1014 error = bus_dmamap_load_mbuf_segment(sc->bge_cdata.bge_rx_mtag, 1015 sc->bge_cdata.bge_rx_tmpmap, m_new, 1016 &seg, 1, &nsegs, BUS_DMA_NOWAIT); 1017 if (error) { 1018 m_freem(m_new); 1019 return error; 1020 } 1021 1022 if (!init) { 1023 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, 1024 sc->bge_cdata.bge_rx_std_dmamap[i], 1025 BUS_DMASYNC_POSTREAD); 1026 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, 1027 sc->bge_cdata.bge_rx_std_dmamap[i]); 1028 } 1029 1030 map = sc->bge_cdata.bge_rx_tmpmap; 1031 sc->bge_cdata.bge_rx_tmpmap = sc->bge_cdata.bge_rx_std_dmamap[i]; 1032 sc->bge_cdata.bge_rx_std_dmamap[i] = map; 1033 1034 sc->bge_cdata.bge_rx_std_chain[i].bge_mbuf = m_new; 1035 sc->bge_cdata.bge_rx_std_chain[i].bge_paddr = seg.ds_addr; 1036 1037 bge_setup_rxdesc_std(sc, i); 1038 return 0; 1039 } 1040 1041 static void 1042 bge_setup_rxdesc_std(struct bge_softc *sc, int i) 1043 { 1044 struct bge_rxchain *rc; 1045 struct bge_rx_bd *r; 1046 1047 rc = &sc->bge_cdata.bge_rx_std_chain[i]; 1048 r = &sc->bge_ldata.bge_rx_std_ring[i]; 1049 1050 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr); 1051 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr); 1052 r->bge_len = rc->bge_mbuf->m_len; 1053 r->bge_idx = i; 1054 r->bge_flags = BGE_RXBDFLAG_END; 1055 } 1056 1057 /* 1058 * Initialize a jumbo receive ring descriptor. This allocates 1059 * a jumbo buffer from the pool managed internally by the driver. 1060 */ 1061 static int 1062 bge_newbuf_jumbo(struct bge_softc *sc, int i, int init) 1063 { 1064 struct mbuf *m_new = NULL; 1065 struct bge_jslot *buf; 1066 bus_addr_t paddr; 1067 1068 /* Allocate the mbuf. */ 1069 MGETHDR(m_new, init ? MB_WAIT : MB_DONTWAIT, MT_DATA); 1070 if (m_new == NULL) 1071 return ENOBUFS; 1072 1073 /* Allocate the jumbo buffer */ 1074 buf = bge_jalloc(sc); 1075 if (buf == NULL) { 1076 m_freem(m_new); 1077 return ENOBUFS; 1078 } 1079 1080 /* Attach the buffer to the mbuf. */ 1081 m_new->m_ext.ext_arg = buf; 1082 m_new->m_ext.ext_buf = buf->bge_buf; 1083 m_new->m_ext.ext_free = bge_jfree; 1084 m_new->m_ext.ext_ref = bge_jref; 1085 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 1086 1087 m_new->m_flags |= M_EXT; 1088 1089 m_new->m_data = m_new->m_ext.ext_buf; 1090 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size; 1091 1092 paddr = buf->bge_paddr; 1093 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) { 1094 m_adj(m_new, ETHER_ALIGN); 1095 paddr += ETHER_ALIGN; 1096 } 1097 1098 /* Save necessary information */ 1099 sc->bge_cdata.bge_rx_jumbo_chain[i].bge_mbuf = m_new; 1100 sc->bge_cdata.bge_rx_jumbo_chain[i].bge_paddr = paddr; 1101 1102 /* Set up the descriptor. */ 1103 bge_setup_rxdesc_jumbo(sc, i); 1104 return 0; 1105 } 1106 1107 static void 1108 bge_setup_rxdesc_jumbo(struct bge_softc *sc, int i) 1109 { 1110 struct bge_rx_bd *r; 1111 struct bge_rxchain *rc; 1112 1113 r = &sc->bge_ldata.bge_rx_jumbo_ring[i]; 1114 rc = &sc->bge_cdata.bge_rx_jumbo_chain[i]; 1115 1116 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(rc->bge_paddr); 1117 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(rc->bge_paddr); 1118 r->bge_len = rc->bge_mbuf->m_len; 1119 r->bge_idx = i; 1120 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 1121 } 1122 1123 static int 1124 bge_init_rx_ring_std(struct bge_softc *sc) 1125 { 1126 int i, error; 1127 1128 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1129 error = bge_newbuf_std(sc, i, 1); 1130 if (error) 1131 return error; 1132 } 1133 1134 sc->bge_std = BGE_STD_RX_RING_CNT - 1; 1135 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 1136 1137 return(0); 1138 } 1139 1140 static void 1141 bge_free_rx_ring_std(struct bge_softc *sc) 1142 { 1143 int i; 1144 1145 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1146 struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_std_chain[i]; 1147 1148 if (rc->bge_mbuf != NULL) { 1149 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, 1150 sc->bge_cdata.bge_rx_std_dmamap[i]); 1151 m_freem(rc->bge_mbuf); 1152 rc->bge_mbuf = NULL; 1153 } 1154 bzero(&sc->bge_ldata.bge_rx_std_ring[i], 1155 sizeof(struct bge_rx_bd)); 1156 } 1157 } 1158 1159 static int 1160 bge_init_rx_ring_jumbo(struct bge_softc *sc) 1161 { 1162 struct bge_rcb *rcb; 1163 int i, error; 1164 1165 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1166 error = bge_newbuf_jumbo(sc, i, 1); 1167 if (error) 1168 return error; 1169 } 1170 1171 sc->bge_jumbo = BGE_JUMBO_RX_RING_CNT - 1; 1172 1173 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 1174 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0); 1175 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1176 1177 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1178 1179 return(0); 1180 } 1181 1182 static void 1183 bge_free_rx_ring_jumbo(struct bge_softc *sc) 1184 { 1185 int i; 1186 1187 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1188 struct bge_rxchain *rc = &sc->bge_cdata.bge_rx_jumbo_chain[i]; 1189 1190 if (rc->bge_mbuf != NULL) { 1191 m_freem(rc->bge_mbuf); 1192 rc->bge_mbuf = NULL; 1193 } 1194 bzero(&sc->bge_ldata.bge_rx_jumbo_ring[i], 1195 sizeof(struct bge_rx_bd)); 1196 } 1197 } 1198 1199 static void 1200 bge_free_tx_ring(struct bge_softc *sc) 1201 { 1202 int i; 1203 1204 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1205 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 1206 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, 1207 sc->bge_cdata.bge_tx_dmamap[i]); 1208 m_freem(sc->bge_cdata.bge_tx_chain[i]); 1209 sc->bge_cdata.bge_tx_chain[i] = NULL; 1210 } 1211 bzero(&sc->bge_ldata.bge_tx_ring[i], 1212 sizeof(struct bge_tx_bd)); 1213 } 1214 } 1215 1216 static int 1217 bge_init_tx_ring(struct bge_softc *sc) 1218 { 1219 sc->bge_txcnt = 0; 1220 sc->bge_tx_saved_considx = 0; 1221 sc->bge_tx_prodidx = 0; 1222 1223 /* Initialize transmit producer index for host-memory send ring. */ 1224 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1225 1226 /* 5700 b2 errata */ 1227 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 1228 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1229 1230 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1231 /* 5700 b2 errata */ 1232 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 1233 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1234 1235 return(0); 1236 } 1237 1238 static void 1239 bge_setmulti(struct bge_softc *sc) 1240 { 1241 struct ifnet *ifp; 1242 struct ifmultiaddr *ifma; 1243 uint32_t hashes[4] = { 0, 0, 0, 0 }; 1244 int h, i; 1245 1246 ifp = &sc->arpcom.ac_if; 1247 1248 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 1249 for (i = 0; i < 4; i++) 1250 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 1251 return; 1252 } 1253 1254 /* First, zot all the existing filters. */ 1255 for (i = 0; i < 4; i++) 1256 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 1257 1258 /* Now program new ones. */ 1259 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1260 if (ifma->ifma_addr->sa_family != AF_LINK) 1261 continue; 1262 h = ether_crc32_le( 1263 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1264 ETHER_ADDR_LEN) & 0x7f; 1265 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1266 } 1267 1268 for (i = 0; i < 4; i++) 1269 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1270 } 1271 1272 /* 1273 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1274 * self-test results. 1275 */ 1276 static int 1277 bge_chipinit(struct bge_softc *sc) 1278 { 1279 int i; 1280 uint32_t dma_rw_ctl, mode_ctl; 1281 uint16_t val; 1282 1283 /* Set endian type before we access any non-PCI registers. */ 1284 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, 1285 BGE_INIT | sc->bge_pci_miscctl, 4); 1286 1287 /* Clear the MAC control register */ 1288 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1289 DELAY(40); 1290 1291 /* 1292 * Clear the MAC statistics block in the NIC's 1293 * internal memory. 1294 */ 1295 for (i = BGE_STATS_BLOCK; 1296 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 1297 BGE_MEMWIN_WRITE(sc, i, 0); 1298 1299 for (i = BGE_STATUS_BLOCK; 1300 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 1301 BGE_MEMWIN_WRITE(sc, i, 0); 1302 1303 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) { 1304 /* 1305 * Fix data corruption caused by non-qword write with WB. 1306 * Fix master abort in PCI mode. 1307 * Fix PCI latency timer. 1308 */ 1309 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2); 1310 val |= (1 << 10) | (1 << 12) | (1 << 13); 1311 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2); 1312 } 1313 1314 /* Set up the PCI DMA control register. */ 1315 dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD; 1316 if (sc->bge_flags & BGE_FLAG_PCIE) { 1317 /* PCI-E bus */ 1318 /* DMA read watermark not used on PCI-E */ 1319 dma_rw_ctl |= (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1320 } else if (sc->bge_flags & BGE_FLAG_PCIX) { 1321 /* PCI-X bus */ 1322 if (sc->bge_asicrev == BGE_ASICREV_BCM5780) { 1323 dma_rw_ctl |= (0x4 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1324 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1325 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 1326 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5714) { 1327 dma_rw_ctl |= (0x4 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1328 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1329 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL; 1330 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1331 sc->bge_asicrev == BGE_ASICREV_BCM5704) { 1332 uint32_t rd_wat = 0x7; 1333 uint32_t clkctl; 1334 1335 clkctl = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; 1336 if ((sc->bge_flags & BGE_FLAG_MAXADDR_40BIT) && 1337 sc->bge_asicrev == BGE_ASICREV_BCM5704) { 1338 dma_rw_ctl |= 1339 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL; 1340 } else if (clkctl == 0x6 || clkctl == 0x7) { 1341 dma_rw_ctl |= 1342 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 1343 } 1344 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) 1345 rd_wat = 0x4; 1346 1347 dma_rw_ctl |= (rd_wat << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1348 (3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1349 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE; 1350 } else { 1351 dma_rw_ctl |= (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1352 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1353 dma_rw_ctl |= 0xf; 1354 } 1355 } else { 1356 /* Conventional PCI bus */ 1357 dma_rw_ctl |= (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1358 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1359 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1360 sc->bge_asicrev != BGE_ASICREV_BCM5750) 1361 dma_rw_ctl |= 0xf; 1362 } 1363 1364 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1365 sc->bge_asicrev == BGE_ASICREV_BCM5704) { 1366 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 1367 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || 1368 sc->bge_asicrev == BGE_ASICREV_BCM5701) { 1369 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM | 1370 BGE_PCIDMARWCTL_ASRT_ALL_BE; 1371 } 1372 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); 1373 1374 /* 1375 * Set up general mode register. 1376 */ 1377 mode_ctl = BGE_DMA_SWAP_OPTIONS| 1378 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1379 BGE_MODECTL_TX_NO_PHDR_CSUM; 1380 1381 /* 1382 * BCM5701 B5 have a bug causing data corruption when using 1383 * 64-bit DMA reads, which can be terminated early and then 1384 * completed later as 32-bit accesses, in combination with 1385 * certain bridges. 1386 */ 1387 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && 1388 sc->bge_chipid == BGE_CHIPID_BCM5701_B5) 1389 mode_ctl |= BGE_MODECTL_FORCE_PCI32; 1390 1391 /* 1392 * Tell the firmware the driver is running 1393 */ 1394 if (sc->bge_asf_mode & ASF_STACKUP) 1395 mode_ctl |= BGE_MODECTL_STACKUP; 1396 1397 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 1398 1399 /* 1400 * Disable memory write invalidate. Apparently it is not supported 1401 * properly by these devices. Also ensure that INTx isn't disabled, 1402 * as these chips need it even when using MSI. 1403 */ 1404 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, 1405 (PCIM_CMD_MWRICEN | PCIM_CMD_INTxDIS), 4); 1406 1407 /* Set the timer prescaler (always 66Mhz) */ 1408 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1409 1410 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 1411 DELAY(40); /* XXX */ 1412 1413 /* Put PHY into ready state */ 1414 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); 1415 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */ 1416 DELAY(40); 1417 } 1418 1419 return(0); 1420 } 1421 1422 static int 1423 bge_blockinit(struct bge_softc *sc) 1424 { 1425 struct bge_rcb *rcb; 1426 bus_size_t vrcb; 1427 bge_hostaddr taddr; 1428 uint32_t val; 1429 int i, limit; 1430 1431 /* 1432 * Initialize the memory window pointer register so that 1433 * we can access the first 32K of internal NIC RAM. This will 1434 * allow us to set up the TX send ring RCBs and the RX return 1435 * ring RCBs, plus other things which live in NIC memory. 1436 */ 1437 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1438 1439 /* Note: the BCM5704 has a smaller mbuf space than other chips. */ 1440 1441 if (!BGE_IS_5705_PLUS(sc)) { 1442 /* Configure mbuf memory pool */ 1443 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); 1444 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1445 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1446 else 1447 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1448 1449 /* Configure DMA resource pool */ 1450 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1451 BGE_DMA_DESCRIPTORS); 1452 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1453 } 1454 1455 /* Configure mbuf pool watermarks */ 1456 if (!BGE_IS_5705_PLUS(sc)) { 1457 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1458 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1459 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1460 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 1461 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1462 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); 1463 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); 1464 } else { 1465 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1466 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1467 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1468 } 1469 1470 /* Configure DMA resource watermarks */ 1471 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1472 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1473 1474 /* Enable buffer manager */ 1475 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1476 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1477 1478 /* Poll for buffer manager start indication */ 1479 for (i = 0; i < BGE_TIMEOUT; i++) { 1480 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1481 break; 1482 DELAY(10); 1483 } 1484 1485 if (i == BGE_TIMEOUT) { 1486 if_printf(&sc->arpcom.ac_if, 1487 "buffer manager failed to start\n"); 1488 return(ENXIO); 1489 } 1490 1491 /* Enable flow-through queues */ 1492 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1493 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1494 1495 /* Wait until queue initialization is complete */ 1496 for (i = 0; i < BGE_TIMEOUT; i++) { 1497 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1498 break; 1499 DELAY(10); 1500 } 1501 1502 if (i == BGE_TIMEOUT) { 1503 if_printf(&sc->arpcom.ac_if, 1504 "flow-through queue init failed\n"); 1505 return(ENXIO); 1506 } 1507 1508 /* 1509 * Summary of rings supported by the controller: 1510 * 1511 * Standard Receive Producer Ring 1512 * - This ring is used to feed receive buffers for "standard" 1513 * sized frames (typically 1536 bytes) to the controller. 1514 * 1515 * Jumbo Receive Producer Ring 1516 * - This ring is used to feed receive buffers for jumbo sized 1517 * frames (i.e. anything bigger than the "standard" frames) 1518 * to the controller. 1519 * 1520 * Mini Receive Producer Ring 1521 * - This ring is used to feed receive buffers for "mini" 1522 * sized frames to the controller. 1523 * - This feature required external memory for the controller 1524 * but was never used in a production system. Should always 1525 * be disabled. 1526 * 1527 * Receive Return Ring 1528 * - After the controller has placed an incoming frame into a 1529 * receive buffer that buffer is moved into a receive return 1530 * ring. The driver is then responsible to passing the 1531 * buffer up to the stack. Many versions of the controller 1532 * support multiple RR rings. 1533 * 1534 * Send Ring 1535 * - This ring is used for outgoing frames. Many versions of 1536 * the controller support multiple send rings. 1537 */ 1538 1539 /* Initialize the standard receive producer ring control block. */ 1540 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; 1541 rcb->bge_hostaddr.bge_addr_lo = 1542 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); 1543 rcb->bge_hostaddr.bge_addr_hi = 1544 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); 1545 if (BGE_IS_5705_PLUS(sc)) { 1546 /* 1547 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32) 1548 * Bits 15-2 : Reserved (should be 0) 1549 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 1550 * Bit 0 : Reserved 1551 */ 1552 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1553 } else { 1554 /* 1555 * Ring size is always XXX entries 1556 * Bits 31-16: Maximum RX frame size 1557 * Bits 15-2 : Reserved (should be 0) 1558 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 1559 * Bit 0 : Reserved 1560 */ 1561 rcb->bge_maxlen_flags = 1562 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1563 } 1564 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1565 /* Write the standard receive producer ring control block. */ 1566 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1567 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1568 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1569 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1570 /* Reset the standard receive producer ring producer index. */ 1571 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1572 1573 /* 1574 * Initialize the jumbo RX producer ring control 1575 * block. We set the 'ring disabled' bit in the 1576 * flags field until we're actually ready to start 1577 * using this ring (i.e. once we set the MTU 1578 * high enough to require it). 1579 */ 1580 if (BGE_IS_JUMBO_CAPABLE(sc)) { 1581 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 1582 /* Get the jumbo receive producer ring RCB parameters. */ 1583 rcb->bge_hostaddr.bge_addr_lo = 1584 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1585 rcb->bge_hostaddr.bge_addr_hi = 1586 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1587 rcb->bge_maxlen_flags = 1588 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 1589 BGE_RCB_FLAG_RING_DISABLED); 1590 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1591 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1592 rcb->bge_hostaddr.bge_addr_hi); 1593 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1594 rcb->bge_hostaddr.bge_addr_lo); 1595 /* Program the jumbo receive producer ring RCB parameters. */ 1596 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1597 rcb->bge_maxlen_flags); 1598 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1599 /* Reset the jumbo receive producer ring producer index. */ 1600 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1601 } 1602 1603 /* Disable the mini receive producer ring RCB. */ 1604 if (BGE_IS_5700_FAMILY(sc)) { 1605 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; 1606 rcb->bge_maxlen_flags = 1607 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 1608 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 1609 rcb->bge_maxlen_flags); 1610 /* Reset the mini receive producer ring producer index. */ 1611 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1612 } 1613 1614 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */ 1615 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && 1616 (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 || 1617 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 || 1618 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)) { 1619 CSR_WRITE_4(sc, BGE_ISO_PKT_TX, 1620 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2); 1621 } 1622 1623 /* 1624 * The BD ring replenish thresholds control how often the 1625 * hardware fetches new BD's from the producer rings in host 1626 * memory. Setting the value too low on a busy system can 1627 * starve the hardware and recue the throughpout. 1628 * 1629 * Set the BD ring replentish thresholds. The recommended 1630 * values are 1/8th the number of descriptors allocated to 1631 * each ring. 1632 */ 1633 if (BGE_IS_5705_PLUS(sc)) 1634 val = 8; 1635 else 1636 val = BGE_STD_RX_RING_CNT / 8; 1637 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val); 1638 if (BGE_IS_JUMBO_CAPABLE(sc)) { 1639 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 1640 BGE_JUMBO_RX_RING_CNT/8); 1641 } 1642 1643 /* 1644 * Disable all send rings by setting the 'ring disabled' bit 1645 * in the flags field of all the TX send ring control blocks, 1646 * located in NIC memory. 1647 */ 1648 if (!BGE_IS_5705_PLUS(sc)) { 1649 /* 5700 to 5704 had 16 send rings. */ 1650 limit = BGE_TX_RINGS_EXTSSRAM_MAX; 1651 } else { 1652 limit = 1; 1653 } 1654 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1655 for (i = 0; i < limit; i++) { 1656 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1657 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 1658 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1659 vrcb += sizeof(struct bge_rcb); 1660 } 1661 1662 /* Configure send ring RCB 0 (we use only the first ring) */ 1663 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1664 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr); 1665 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1666 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1667 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 1668 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1669 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1670 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1671 1672 /* 1673 * Disable all receive return rings by setting the 1674 * 'ring diabled' bit in the flags field of all the receive 1675 * return ring control blocks, located in NIC memory. 1676 */ 1677 if (!BGE_IS_5705_PLUS(sc)) 1678 limit = BGE_RX_RINGS_MAX; 1679 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755) 1680 limit = 4; 1681 else 1682 limit = 1; 1683 /* Disable all receive return rings. */ 1684 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1685 for (i = 0; i < limit; i++) { 1686 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); 1687 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); 1688 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1689 BGE_RCB_FLAG_RING_DISABLED); 1690 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1691 bge_writembx(sc, BGE_MBX_RX_CONS0_LO + 1692 (i * (sizeof(uint64_t))), 0); 1693 vrcb += sizeof(struct bge_rcb); 1694 } 1695 1696 /* 1697 * Set up receive return ring 0. Note that the NIC address 1698 * for RX return rings is 0x0. The return rings live entirely 1699 * within the host, so the nicaddr field in the RCB isn't used. 1700 */ 1701 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1702 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr); 1703 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1704 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1705 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1706 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1707 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 1708 1709 /* Set random backoff seed for TX */ 1710 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1711 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + 1712 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + 1713 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] + 1714 BGE_TX_BACKOFF_SEED_MASK); 1715 1716 /* Set inter-packet gap */ 1717 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1718 1719 /* 1720 * Specify which ring to use for packets that don't match 1721 * any RX rules. 1722 */ 1723 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1724 1725 /* 1726 * Configure number of RX lists. One interrupt distribution 1727 * list, sixteen active lists, one bad frames class. 1728 */ 1729 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1730 1731 /* Inialize RX list placement stats mask. */ 1732 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1733 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1734 1735 /* Disable host coalescing until we get it set up */ 1736 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1737 1738 /* Poll to make sure it's shut down. */ 1739 for (i = 0; i < BGE_TIMEOUT; i++) { 1740 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1741 break; 1742 DELAY(10); 1743 } 1744 1745 if (i == BGE_TIMEOUT) { 1746 if_printf(&sc->arpcom.ac_if, 1747 "host coalescing engine failed to idle\n"); 1748 return(ENXIO); 1749 } 1750 1751 /* Set up host coalescing defaults */ 1752 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1753 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1754 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_coal_bds); 1755 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_coal_bds); 1756 if (!BGE_IS_5705_PLUS(sc)) { 1757 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 1758 sc->bge_rx_coal_ticks_int); 1759 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 1760 sc->bge_tx_coal_ticks_int); 1761 } 1762 /* 1763 * NOTE: 1764 * The datasheet (57XX-PG105-R) says BCM5705+ do not 1765 * have following two registers; obviously it is wrong. 1766 */ 1767 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, sc->bge_rx_coal_bds_int); 1768 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, sc->bge_tx_coal_bds_int); 1769 1770 /* Set up address of statistics block */ 1771 if (!BGE_IS_5705_PLUS(sc)) { 1772 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 1773 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); 1774 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, 1775 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); 1776 1777 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1778 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1779 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1780 } 1781 1782 /* Set up address of status block */ 1783 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ); 1784 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 1785 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); 1786 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1787 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); 1788 1789 /* 1790 * Set up status block partail update size. 1791 * 1792 * Because only single TX ring, RX produce ring and Rx return ring 1793 * are used, ask device to update only minimum part of status block 1794 * except for BCM5700 AX/BX, whose status block partial update size 1795 * can't be configured. 1796 */ 1797 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 1798 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) { 1799 /* XXX Actually reserved on BCM5700 AX/BX */ 1800 val = BGE_STATBLKSZ_FULL; 1801 } else { 1802 val = BGE_STATBLKSZ_32BYTE; 1803 } 1804 #if 0 1805 /* 1806 * Does not seem to have visible effect in both 1807 * bulk data (1472B UDP datagram) and tiny data 1808 * (18B UDP datagram) TX tests. 1809 */ 1810 if (!BGE_IS_CRIPPLED(sc)) 1811 val |= BGE_HCCMODE_CLRTICK_TX; 1812 #endif 1813 1814 /* Turn on host coalescing state machine */ 1815 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE); 1816 1817 /* Turn on RX BD completion state machine and enable attentions */ 1818 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1819 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1820 1821 /* Turn on RX list placement state machine */ 1822 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1823 1824 /* Turn on RX list selector state machine. */ 1825 if (!BGE_IS_5705_PLUS(sc)) 1826 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1827 1828 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | 1829 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | 1830 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | 1831 BGE_MACMODE_FRMHDR_DMA_ENB; 1832 1833 if (sc->bge_flags & BGE_FLAG_TBI) 1834 val |= BGE_PORTMODE_TBI; 1835 else if (sc->bge_flags & BGE_FLAG_MII_SERDES) 1836 val |= BGE_PORTMODE_GMII; 1837 else 1838 val |= BGE_PORTMODE_MII; 1839 1840 /* Turn on DMA, clear stats */ 1841 CSR_WRITE_4(sc, BGE_MAC_MODE, val); 1842 DELAY(40); 1843 1844 /* Set misc. local control, enable interrupts on attentions */ 1845 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1846 1847 #ifdef notdef 1848 /* Assert GPIO pins for PHY reset */ 1849 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1850 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1851 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1852 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1853 #endif 1854 1855 /* Turn on DMA completion state machine */ 1856 if (!BGE_IS_5705_PLUS(sc)) 1857 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1858 1859 /* Turn on write DMA state machine */ 1860 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS; 1861 if (BGE_IS_5755_PLUS(sc)) { 1862 /* Enable host coalescing bug fix. */ 1863 val |= BGE_WDMAMODE_STATUS_TAG_FIX; 1864 } 1865 if (sc->bge_asicrev == BGE_ASICREV_BCM5785) { 1866 /* Request larger DMA burst size to get better performance. */ 1867 val |= BGE_WDMAMODE_BURST_ALL_DATA; 1868 } 1869 CSR_WRITE_4(sc, BGE_WDMA_MODE, val); 1870 DELAY(40); 1871 1872 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 || 1873 sc->bge_asicrev == BGE_ASICREV_BCM5784 || 1874 sc->bge_asicrev == BGE_ASICREV_BCM5785 || 1875 sc->bge_asicrev == BGE_ASICREV_BCM57780) { 1876 /* 1877 * Enable fix for read DMA FIFO overruns. 1878 * The fix is to limit the number of RX BDs 1879 * the hardware would fetch at a fime. 1880 */ 1881 val = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL); 1882 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL, 1883 val| BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 1884 } 1885 1886 /* Turn on read DMA state machine */ 1887 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 1888 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 || 1889 sc->bge_asicrev == BGE_ASICREV_BCM5785 || 1890 sc->bge_asicrev == BGE_ASICREV_BCM57780) 1891 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | 1892 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | 1893 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; 1894 if (sc->bge_flags & BGE_FLAG_PCIE) 1895 val |= BGE_RDMAMODE_FIFO_LONG_BURST; 1896 if (sc->bge_flags & BGE_FLAG_TSO) 1897 val |= BGE_RDMAMODE_TSO4_ENABLE; 1898 CSR_WRITE_4(sc, BGE_RDMA_MODE, val); 1899 DELAY(40); 1900 1901 /* Turn on RX data completion state machine */ 1902 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1903 1904 /* Turn on RX BD initiator state machine */ 1905 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1906 1907 /* Turn on RX data and RX BD initiator state machine */ 1908 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1909 1910 /* Turn on Mbuf cluster free state machine */ 1911 if (!BGE_IS_5705_PLUS(sc)) 1912 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1913 1914 /* Turn on send BD completion state machine */ 1915 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1916 1917 /* Turn on send data completion state machine */ 1918 val = BGE_SDCMODE_ENABLE; 1919 if (sc->bge_asicrev == BGE_ASICREV_BCM5761) 1920 val |= BGE_SDCMODE_CDELAY; 1921 CSR_WRITE_4(sc, BGE_SDC_MODE, val); 1922 1923 /* Turn on send data initiator state machine */ 1924 if (sc->bge_flags & BGE_FLAG_TSO) 1925 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 1926 BGE_SDIMODE_HW_LSO_PRE_DMA); 1927 else 1928 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1929 1930 /* Turn on send BD initiator state machine */ 1931 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1932 1933 /* Turn on send BD selector state machine */ 1934 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1935 1936 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1937 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1938 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1939 1940 /* ack/clear link change events */ 1941 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1942 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1943 BGE_MACSTAT_LINK_CHANGED); 1944 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1945 1946 /* 1947 * Enable attention when the link has changed state for 1948 * devices that use auto polling. 1949 */ 1950 if (sc->bge_flags & BGE_FLAG_TBI) { 1951 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1952 } else { 1953 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) { 1954 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode); 1955 DELAY(80); 1956 } 1957 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 1958 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) { 1959 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1960 BGE_EVTENB_MI_INTERRUPT); 1961 } 1962 } 1963 1964 /* 1965 * Clear any pending link state attention. 1966 * Otherwise some link state change events may be lost until attention 1967 * is cleared by bge_intr() -> bge_softc.bge_link_upd() sequence. 1968 * It's not necessary on newer BCM chips - perhaps enabling link 1969 * state change attentions implies clearing pending attention. 1970 */ 1971 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1972 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1973 BGE_MACSTAT_LINK_CHANGED); 1974 1975 /* Enable link state change attentions. */ 1976 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1977 1978 return(0); 1979 } 1980 1981 /* 1982 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1983 * against our list and return its name if we find a match. Note 1984 * that since the Broadcom controller contains VPD support, we 1985 * can get the device name string from the controller itself instead 1986 * of the compiled-in string. This is a little slow, but it guarantees 1987 * we'll always announce the right product name. 1988 */ 1989 static int 1990 bge_probe(device_t dev) 1991 { 1992 const struct bge_type *t; 1993 uint16_t product, vendor; 1994 1995 product = pci_get_device(dev); 1996 vendor = pci_get_vendor(dev); 1997 1998 for (t = bge_devs; t->bge_name != NULL; t++) { 1999 if (vendor == t->bge_vid && product == t->bge_did) 2000 break; 2001 } 2002 if (t->bge_name == NULL) 2003 return(ENXIO); 2004 2005 device_set_desc(dev, t->bge_name); 2006 return(0); 2007 } 2008 2009 static int 2010 bge_attach(device_t dev) 2011 { 2012 struct ifnet *ifp; 2013 struct bge_softc *sc; 2014 uint32_t hwcfg = 0, misccfg; 2015 int error = 0, rid, capmask; 2016 uint8_t ether_addr[ETHER_ADDR_LEN]; 2017 uint16_t product, vendor; 2018 driver_intr_t *intr_func; 2019 uintptr_t mii_priv = 0; 2020 u_int intr_flags; 2021 int msi_enable; 2022 2023 sc = device_get_softc(dev); 2024 sc->bge_dev = dev; 2025 callout_init_mp(&sc->bge_stat_timer); 2026 lwkt_serialize_init(&sc->bge_jslot_serializer); 2027 2028 product = pci_get_device(dev); 2029 vendor = pci_get_vendor(dev); 2030 2031 #ifndef BURN_BRIDGES 2032 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 2033 uint32_t irq, mem; 2034 2035 irq = pci_read_config(dev, PCIR_INTLINE, 4); 2036 mem = pci_read_config(dev, BGE_PCI_BAR0, 4); 2037 2038 device_printf(dev, "chip is in D%d power mode " 2039 "-- setting to D0\n", pci_get_powerstate(dev)); 2040 2041 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 2042 2043 pci_write_config(dev, PCIR_INTLINE, irq, 4); 2044 pci_write_config(dev, BGE_PCI_BAR0, mem, 4); 2045 } 2046 #endif /* !BURN_BRIDGE */ 2047 2048 /* 2049 * Map control/status registers. 2050 */ 2051 pci_enable_busmaster(dev); 2052 2053 rid = BGE_PCI_BAR0; 2054 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2055 RF_ACTIVE); 2056 2057 if (sc->bge_res == NULL) { 2058 device_printf(dev, "couldn't map memory\n"); 2059 return ENXIO; 2060 } 2061 2062 sc->bge_btag = rman_get_bustag(sc->bge_res); 2063 sc->bge_bhandle = rman_get_bushandle(sc->bge_res); 2064 2065 /* Save various chip information */ 2066 sc->bge_chipid = 2067 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 2068 BGE_PCIMISCCTL_ASICREV_SHIFT; 2069 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) { 2070 /* All chips, which use BGE_PCI_PRODID_ASICREV, have CPMU */ 2071 sc->bge_flags |= BGE_FLAG_CPMU; 2072 sc->bge_chipid = pci_read_config(dev, BGE_PCI_PRODID_ASICREV, 4); 2073 } 2074 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); 2075 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); 2076 2077 /* Save chipset family. */ 2078 switch (sc->bge_asicrev) { 2079 case BGE_ASICREV_BCM5755: 2080 case BGE_ASICREV_BCM5761: 2081 case BGE_ASICREV_BCM5784: 2082 case BGE_ASICREV_BCM5785: 2083 case BGE_ASICREV_BCM5787: 2084 case BGE_ASICREV_BCM57780: 2085 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS | 2086 BGE_FLAG_5705_PLUS; 2087 break; 2088 2089 case BGE_ASICREV_BCM5700: 2090 case BGE_ASICREV_BCM5701: 2091 case BGE_ASICREV_BCM5703: 2092 case BGE_ASICREV_BCM5704: 2093 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO; 2094 break; 2095 2096 case BGE_ASICREV_BCM5714_A0: 2097 case BGE_ASICREV_BCM5780: 2098 case BGE_ASICREV_BCM5714: 2099 sc->bge_flags |= BGE_FLAG_5714_FAMILY; 2100 /* Fall through */ 2101 2102 case BGE_ASICREV_BCM5750: 2103 case BGE_ASICREV_BCM5752: 2104 case BGE_ASICREV_BCM5906: 2105 sc->bge_flags |= BGE_FLAG_575X_PLUS; 2106 /* Fall through */ 2107 2108 case BGE_ASICREV_BCM5705: 2109 sc->bge_flags |= BGE_FLAG_5705_PLUS; 2110 break; 2111 } 2112 2113 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 2114 sc->bge_flags |= BGE_FLAG_NO_EEPROM; 2115 2116 if (sc->bge_asicrev == BGE_ASICREV_BCM5761) 2117 sc->bge_flags |= BGE_FLAG_APE; 2118 2119 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK; 2120 if (sc->bge_asicrev == BGE_ASICREV_BCM5705 && 2121 (misccfg == BGE_MISCCFG_BOARD_ID_5788 || 2122 misccfg == BGE_MISCCFG_BOARD_ID_5788M)) 2123 sc->bge_flags |= BGE_FLAG_5788; 2124 2125 /* BCM5755 or higher and BCM5906 have short DMA bug. */ 2126 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906) 2127 sc->bge_flags |= BGE_FLAG_SHORTDMA; 2128 2129 /* 2130 * Increase STD RX ring prod index by at most 8 for BCM5750, 2131 * BCM5752 and BCM5755 to workaround hardware errata. 2132 */ 2133 if (sc->bge_asicrev == BGE_ASICREV_BCM5750 || 2134 sc->bge_asicrev == BGE_ASICREV_BCM5752 || 2135 sc->bge_asicrev == BGE_ASICREV_BCM5755) 2136 sc->bge_rx_wreg = 8; 2137 2138 /* 2139 * Check if this is a PCI-X or PCI Express device. 2140 */ 2141 if (BGE_IS_5705_PLUS(sc)) { 2142 if (pci_is_pcie(dev)) { 2143 sc->bge_flags |= BGE_FLAG_PCIE; 2144 sc->bge_pciecap = pci_get_pciecap_ptr(sc->bge_dev); 2145 pcie_set_max_readrq(dev, PCIEM_DEVCTL_MAX_READRQ_4096); 2146 } 2147 } else { 2148 /* 2149 * Check if the device is in PCI-X Mode. 2150 * (This bit is not valid on PCI Express controllers.) 2151 */ 2152 if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) & 2153 BGE_PCISTATE_PCI_BUSMODE) == 0) { 2154 sc->bge_flags |= BGE_FLAG_PCIX; 2155 sc->bge_pcixcap = pci_get_pcixcap_ptr(sc->bge_dev); 2156 sc->bge_mbox_reorder = device_getenv_int(sc->bge_dev, 2157 "mbox_reorder", 0); 2158 } 2159 } 2160 device_printf(dev, "CHIP ID 0x%08x; " 2161 "ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n", 2162 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev, 2163 (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" 2164 : ((sc->bge_flags & BGE_FLAG_PCIE) ? 2165 "PCI-E" : "PCI")); 2166 2167 /* 2168 * The 40bit DMA bug applies to the 5714/5715 controllers and is 2169 * not actually a MAC controller bug but an issue with the embedded 2170 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround. 2171 */ 2172 if ((sc->bge_flags & BGE_FLAG_PCIX) && 2173 (BGE_IS_5714_FAMILY(sc) || device_getenv_int(dev, "dma40b", 0))) 2174 sc->bge_flags |= BGE_FLAG_MAXADDR_40BIT; 2175 2176 /* 2177 * When using the BCM5701 in PCI-X mode, data corruption has 2178 * been observed in the first few bytes of some received packets. 2179 * Aligning the packet buffer in memory eliminates the corruption. 2180 * Unfortunately, this misaligns the packet payloads. On platforms 2181 * which do not support unaligned accesses, we will realign the 2182 * payloads by copying the received packets. 2183 */ 2184 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && 2185 (sc->bge_flags & BGE_FLAG_PCIX)) 2186 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG; 2187 2188 if (!BGE_IS_CRIPPLED(sc)) { 2189 if (device_getenv_int(dev, "status_tag", 1)) { 2190 sc->bge_flags |= BGE_FLAG_STATUS_TAG; 2191 sc->bge_pci_miscctl = BGE_PCIMISCCTL_TAGGED_STATUS; 2192 if (bootverbose) 2193 device_printf(dev, "enable status tag\n"); 2194 } 2195 } 2196 2197 if (BGE_IS_5755_PLUS(sc)) { 2198 /* 2199 * BCM5754 and BCM5787 shares the same ASIC id so 2200 * explicit device id check is required. 2201 * Due to unknown reason TSO does not work on BCM5755M. 2202 */ 2203 if (product != PCI_PRODUCT_BROADCOM_BCM5754 && 2204 product != PCI_PRODUCT_BROADCOM_BCM5754M && 2205 product != PCI_PRODUCT_BROADCOM_BCM5755M) 2206 sc->bge_flags |= BGE_FLAG_TSO; 2207 } 2208 2209 /* 2210 * Set various PHY quirk flags. 2211 */ 2212 2213 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 || 2214 sc->bge_asicrev == BGE_ASICREV_BCM5701) && 2215 pci_get_subvendor(dev) == PCI_VENDOR_DELL) 2216 mii_priv |= BRGPHY_FLAG_NO_3LED; 2217 2218 capmask = MII_CAPMASK_DEFAULT; 2219 if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 && 2220 (misccfg == 0x4000 || misccfg == 0x8000)) || 2221 (sc->bge_asicrev == BGE_ASICREV_BCM5705 && 2222 vendor == PCI_VENDOR_BROADCOM && 2223 (product == PCI_PRODUCT_BROADCOM_BCM5901 || 2224 product == PCI_PRODUCT_BROADCOM_BCM5901A2 || 2225 product == PCI_PRODUCT_BROADCOM_BCM5705F)) || 2226 (vendor == PCI_VENDOR_BROADCOM && 2227 (product == PCI_PRODUCT_BROADCOM_BCM5751F || 2228 product == PCI_PRODUCT_BROADCOM_BCM5753F || 2229 product == PCI_PRODUCT_BROADCOM_BCM5787F)) || 2230 product == PCI_PRODUCT_BROADCOM_BCM57790 || 2231 sc->bge_asicrev == BGE_ASICREV_BCM5906) { 2232 /* 10/100 only */ 2233 capmask &= ~BMSR_EXTSTAT; 2234 } 2235 2236 mii_priv |= BRGPHY_FLAG_WIRESPEED; 2237 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || 2238 (sc->bge_asicrev == BGE_ASICREV_BCM5705 && 2239 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && 2240 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) || 2241 sc->bge_asicrev == BGE_ASICREV_BCM5906) 2242 mii_priv &= ~BRGPHY_FLAG_WIRESPEED; 2243 2244 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || 2245 sc->bge_chipid == BGE_CHIPID_BCM5701_B0) 2246 mii_priv |= BRGPHY_FLAG_CRC_BUG; 2247 2248 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX || 2249 sc->bge_chiprev == BGE_CHIPREV_5704_AX) 2250 mii_priv |= BRGPHY_FLAG_ADC_BUG; 2251 2252 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) 2253 mii_priv |= BRGPHY_FLAG_5704_A0; 2254 2255 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 2256 mii_priv |= BRGPHY_FLAG_5906; 2257 2258 if (BGE_IS_5705_PLUS(sc) && 2259 sc->bge_asicrev != BGE_ASICREV_BCM5906 && 2260 /* sc->bge_asicrev != BGE_ASICREV_BCM5717 && */ 2261 sc->bge_asicrev != BGE_ASICREV_BCM5785 && 2262 /* sc->bge_asicrev != BGE_ASICREV_BCM57765 && */ 2263 sc->bge_asicrev != BGE_ASICREV_BCM57780) { 2264 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || 2265 sc->bge_asicrev == BGE_ASICREV_BCM5761 || 2266 sc->bge_asicrev == BGE_ASICREV_BCM5784 || 2267 sc->bge_asicrev == BGE_ASICREV_BCM5787) { 2268 if (product != PCI_PRODUCT_BROADCOM_BCM5722 && 2269 product != PCI_PRODUCT_BROADCOM_BCM5756) 2270 mii_priv |= BRGPHY_FLAG_JITTER_BUG; 2271 if (product == PCI_PRODUCT_BROADCOM_BCM5755M) 2272 mii_priv |= BRGPHY_FLAG_ADJUST_TRIM; 2273 } else { 2274 mii_priv |= BRGPHY_FLAG_BER_BUG; 2275 } 2276 } 2277 2278 /* 2279 * Allocate interrupt 2280 */ 2281 msi_enable = bge_msi_enable; 2282 if ((sc->bge_flags & BGE_FLAG_STATUS_TAG) == 0) { 2283 /* If "tagged status" is disabled, don't enable MSI */ 2284 msi_enable = 0; 2285 } else if (msi_enable) { 2286 msi_enable = 0; /* Disable by default */ 2287 if (BGE_IS_575X_PLUS(sc)) { 2288 msi_enable = 1; 2289 /* XXX we filter all 5714 chips */ 2290 if (sc->bge_asicrev == BGE_ASICREV_BCM5714 || 2291 (sc->bge_asicrev == BGE_ASICREV_BCM5750 && 2292 (sc->bge_chiprev == BGE_CHIPREV_5750_AX || 2293 sc->bge_chiprev == BGE_CHIPREV_5750_BX))) 2294 msi_enable = 0; 2295 else if (BGE_IS_5755_PLUS(sc) || 2296 sc->bge_asicrev == BGE_ASICREV_BCM5906) 2297 sc->bge_flags |= BGE_FLAG_ONESHOT_MSI; 2298 } 2299 } 2300 if (msi_enable) { 2301 if (pci_find_extcap(dev, PCIY_MSI, &sc->bge_msicap)) { 2302 device_printf(dev, "no MSI capability\n"); 2303 msi_enable = 0; 2304 } 2305 } 2306 2307 sc->bge_irq_type = pci_alloc_1intr(dev, msi_enable, &sc->bge_irq_rid, 2308 &intr_flags); 2309 2310 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->bge_irq_rid, 2311 intr_flags); 2312 if (sc->bge_irq == NULL) { 2313 device_printf(dev, "couldn't map interrupt\n"); 2314 error = ENXIO; 2315 goto fail; 2316 } 2317 2318 if (sc->bge_irq_type == PCI_INTR_TYPE_MSI) 2319 bge_enable_msi(sc); 2320 else 2321 sc->bge_flags &= ~BGE_FLAG_ONESHOT_MSI; 2322 2323 /* Initialize if_name earlier, so if_printf could be used */ 2324 ifp = &sc->arpcom.ac_if; 2325 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2326 2327 sc->bge_asf_mode = 0; 2328 /* No ASF if APE present. */ 2329 if ((sc->bge_flags & BGE_FLAG_APE) == 0) { 2330 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == 2331 BGE_SRAM_DATA_SIG_MAGIC)) { 2332 if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG) & 2333 BGE_HWCFG_ASF) { 2334 sc->bge_asf_mode |= ASF_ENABLE; 2335 sc->bge_asf_mode |= ASF_STACKUP; 2336 if (BGE_IS_575X_PLUS(sc)) 2337 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; 2338 } 2339 } 2340 } 2341 2342 /* 2343 * Try to reset the chip. 2344 */ 2345 bge_stop_fw(sc); 2346 bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN); 2347 bge_reset(sc); 2348 bge_sig_legacy(sc, BGE_RESET_SHUTDOWN); 2349 bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN); 2350 2351 if (bge_chipinit(sc)) { 2352 device_printf(dev, "chip initialization failed\n"); 2353 error = ENXIO; 2354 goto fail; 2355 } 2356 2357 /* 2358 * Get station address 2359 */ 2360 error = bge_get_eaddr(sc, ether_addr); 2361 if (error) { 2362 device_printf(dev, "failed to read station address\n"); 2363 goto fail; 2364 } 2365 2366 /* 5705/5750 limits RX return ring to 512 entries. */ 2367 if (BGE_IS_5705_PLUS(sc)) 2368 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 2369 else 2370 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 2371 2372 error = bge_dma_alloc(sc); 2373 if (error) 2374 goto fail; 2375 2376 /* Set default tuneable values. */ 2377 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2378 sc->bge_rx_coal_ticks = BGE_RX_COAL_TICKS_DEF; 2379 sc->bge_tx_coal_ticks = BGE_TX_COAL_TICKS_DEF; 2380 sc->bge_rx_coal_bds = BGE_RX_COAL_BDS_DEF; 2381 sc->bge_tx_coal_bds = BGE_TX_COAL_BDS_DEF; 2382 if (sc->bge_flags & BGE_FLAG_STATUS_TAG) { 2383 sc->bge_rx_coal_ticks_int = BGE_RX_COAL_TICKS_DEF; 2384 sc->bge_tx_coal_ticks_int = BGE_TX_COAL_TICKS_DEF; 2385 sc->bge_rx_coal_bds_int = BGE_RX_COAL_BDS_DEF; 2386 sc->bge_tx_coal_bds_int = BGE_TX_COAL_BDS_DEF; 2387 } else { 2388 sc->bge_rx_coal_ticks_int = BGE_RX_COAL_TICKS_MIN; 2389 sc->bge_tx_coal_ticks_int = BGE_TX_COAL_TICKS_MIN; 2390 sc->bge_rx_coal_bds_int = BGE_RX_COAL_BDS_MIN; 2391 sc->bge_tx_coal_bds_int = BGE_TX_COAL_BDS_MIN; 2392 } 2393 sc->bge_tx_wreg = BGE_TX_WREG_NSEGS; 2394 2395 /* Set up TX spare and reserved descriptor count */ 2396 if (sc->bge_flags & BGE_FLAG_TSO) { 2397 sc->bge_txspare = BGE_NSEG_SPARE_TSO; 2398 sc->bge_txrsvd = BGE_NSEG_RSVD_TSO; 2399 } else { 2400 sc->bge_txspare = BGE_NSEG_SPARE; 2401 sc->bge_txrsvd = BGE_NSEG_RSVD; 2402 } 2403 2404 /* Set up ifnet structure */ 2405 ifp->if_softc = sc; 2406 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2407 ifp->if_ioctl = bge_ioctl; 2408 ifp->if_start = bge_start; 2409 #ifdef IFPOLL_ENABLE 2410 ifp->if_npoll = bge_npoll; 2411 #endif 2412 ifp->if_watchdog = bge_watchdog; 2413 ifp->if_init = bge_init; 2414 ifp->if_mtu = ETHERMTU; 2415 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 2416 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1); 2417 ifq_set_ready(&ifp->if_snd); 2418 2419 /* 2420 * 5700 B0 chips do not support checksumming correctly due 2421 * to hardware bugs. 2422 */ 2423 if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) { 2424 ifp->if_capabilities |= IFCAP_HWCSUM; 2425 ifp->if_hwassist |= BGE_CSUM_FEATURES; 2426 } 2427 if (sc->bge_flags & BGE_FLAG_TSO) { 2428 ifp->if_capabilities |= IFCAP_TSO; 2429 ifp->if_hwassist |= CSUM_TSO; 2430 } 2431 ifp->if_capenable = ifp->if_capabilities; 2432 2433 /* 2434 * Figure out what sort of media we have by checking the 2435 * hardware config word in the first 32k of NIC internal memory, 2436 * or fall back to examining the EEPROM if necessary. 2437 * Note: on some BCM5700 cards, this value appears to be unset. 2438 * If that's the case, we have to rely on identifying the NIC 2439 * by its PCI subsystem ID, as we do below for the SysKonnect 2440 * SK-9D41. 2441 */ 2442 if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC) { 2443 hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG); 2444 } else { 2445 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 2446 sizeof(hwcfg))) { 2447 device_printf(dev, "failed to read EEPROM\n"); 2448 error = ENXIO; 2449 goto fail; 2450 } 2451 hwcfg = ntohl(hwcfg); 2452 } 2453 2454 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2455 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41 || 2456 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) { 2457 if (BGE_IS_5714_FAMILY(sc)) 2458 sc->bge_flags |= BGE_FLAG_MII_SERDES; 2459 else 2460 sc->bge_flags |= BGE_FLAG_TBI; 2461 } 2462 2463 /* Setup MI MODE */ 2464 if (sc->bge_flags & BGE_FLAG_CPMU) 2465 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST; 2466 else 2467 sc->bge_mi_mode = BGE_MIMODE_BASE; 2468 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705) { 2469 /* Enable auto polling for BCM570[0-5]. */ 2470 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL; 2471 } 2472 2473 /* Setup link status update stuffs */ 2474 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 2475 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) { 2476 sc->bge_link_upd = bge_bcm5700_link_upd; 2477 sc->bge_link_chg = BGE_MACSTAT_MI_INTERRUPT; 2478 } else if (sc->bge_flags & BGE_FLAG_TBI) { 2479 sc->bge_link_upd = bge_tbi_link_upd; 2480 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED; 2481 } else if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) { 2482 sc->bge_link_upd = bge_autopoll_link_upd; 2483 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED; 2484 } else { 2485 sc->bge_link_upd = bge_copper_link_upd; 2486 sc->bge_link_chg = BGE_MACSTAT_LINK_CHANGED; 2487 } 2488 2489 /* 2490 * Broadcom's own driver always assumes the internal 2491 * PHY is at GMII address 1. On some chips, the PHY responds 2492 * to accesses at all addresses, which could cause us to 2493 * bogusly attach the PHY 32 times at probe type. Always 2494 * restricting the lookup to address 1 is simpler than 2495 * trying to figure out which chips revisions should be 2496 * special-cased. 2497 */ 2498 sc->bge_phyno = 1; 2499 2500 if (sc->bge_flags & BGE_FLAG_TBI) { 2501 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, 2502 bge_ifmedia_upd, bge_ifmedia_sts); 2503 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2504 ifmedia_add(&sc->bge_ifmedia, 2505 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 2506 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2507 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 2508 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 2509 } else { 2510 struct mii_probe_args mii_args; 2511 int tries; 2512 2513 /* 2514 * Do transceiver setup and tell the firmware the 2515 * driver is down so we can try to get access the 2516 * probe if ASF is running. Retry a couple of times 2517 * if we get a conflict with the ASF firmware accessing 2518 * the PHY. 2519 */ 2520 tries = 0; 2521 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2522 again: 2523 bge_asf_driver_up(sc); 2524 2525 mii_probe_args_init(&mii_args, bge_ifmedia_upd, bge_ifmedia_sts); 2526 mii_args.mii_probemask = 1 << sc->bge_phyno; 2527 mii_args.mii_capmask = capmask; 2528 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY; 2529 mii_args.mii_priv = mii_priv; 2530 2531 error = mii_probe(dev, &sc->bge_miibus, &mii_args); 2532 if (error) { 2533 if (tries++ < 4) { 2534 device_printf(sc->bge_dev, "Probe MII again\n"); 2535 bge_miibus_writereg(sc->bge_dev, 2536 sc->bge_phyno, MII_BMCR, BMCR_RESET); 2537 goto again; 2538 } 2539 device_printf(dev, "MII without any PHY!\n"); 2540 goto fail; 2541 } 2542 2543 /* 2544 * Now tell the firmware we are going up after probing the PHY 2545 */ 2546 if (sc->bge_asf_mode & ASF_STACKUP) 2547 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2548 } 2549 2550 /* 2551 * Create sysctl nodes. 2552 */ 2553 sysctl_ctx_init(&sc->bge_sysctl_ctx); 2554 sc->bge_sysctl_tree = SYSCTL_ADD_NODE(&sc->bge_sysctl_ctx, 2555 SYSCTL_STATIC_CHILDREN(_hw), 2556 OID_AUTO, 2557 device_get_nameunit(dev), 2558 CTLFLAG_RD, 0, ""); 2559 if (sc->bge_sysctl_tree == NULL) { 2560 device_printf(dev, "can't add sysctl node\n"); 2561 error = ENXIO; 2562 goto fail; 2563 } 2564 2565 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx, 2566 SYSCTL_CHILDREN(sc->bge_sysctl_tree), 2567 OID_AUTO, "rx_coal_ticks", 2568 CTLTYPE_INT | CTLFLAG_RW, 2569 sc, 0, bge_sysctl_rx_coal_ticks, "I", 2570 "Receive coalescing ticks (usec)."); 2571 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx, 2572 SYSCTL_CHILDREN(sc->bge_sysctl_tree), 2573 OID_AUTO, "tx_coal_ticks", 2574 CTLTYPE_INT | CTLFLAG_RW, 2575 sc, 0, bge_sysctl_tx_coal_ticks, "I", 2576 "Transmit coalescing ticks (usec)."); 2577 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx, 2578 SYSCTL_CHILDREN(sc->bge_sysctl_tree), 2579 OID_AUTO, "rx_coal_bds", 2580 CTLTYPE_INT | CTLFLAG_RW, 2581 sc, 0, bge_sysctl_rx_coal_bds, "I", 2582 "Receive max coalesced BD count."); 2583 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx, 2584 SYSCTL_CHILDREN(sc->bge_sysctl_tree), 2585 OID_AUTO, "tx_coal_bds", 2586 CTLTYPE_INT | CTLFLAG_RW, 2587 sc, 0, bge_sysctl_tx_coal_bds, "I", 2588 "Transmit max coalesced BD count."); 2589 2590 SYSCTL_ADD_INT(&sc->bge_sysctl_ctx, 2591 SYSCTL_CHILDREN(sc->bge_sysctl_tree), 2592 OID_AUTO, "tx_wreg", CTLFLAG_RW, 2593 &sc->bge_tx_wreg, 0, 2594 "# of segments before writing to hardware register"); 2595 2596 if (sc->bge_flags & BGE_FLAG_PCIE) { 2597 /* 2598 * A common design characteristic for many Broadcom 2599 * client controllers is that they only support a 2600 * single outstanding DMA read operation on the PCIe 2601 * bus. This means that it will take twice as long to 2602 * fetch a TX frame that is split into header and 2603 * payload buffers as it does to fetch a single, 2604 * contiguous TX frame (2 reads vs. 1 read). For these 2605 * controllers, coalescing buffers to reduce the number 2606 * of memory reads is effective way to get maximum 2607 * performance(about 940Mbps). Without collapsing TX 2608 * buffers the maximum TCP bulk transfer performance 2609 * is about 850Mbps. However forcing coalescing mbufs 2610 * consumes a lot of CPU cycles, so leave it off by 2611 * default. 2612 */ 2613 SYSCTL_ADD_INT(&sc->bge_sysctl_ctx, 2614 SYSCTL_CHILDREN(sc->bge_sysctl_tree), 2615 OID_AUTO, "force_defrag", CTLFLAG_RW, 2616 &sc->bge_force_defrag, 0, 2617 "Force defragment on TX path"); 2618 } 2619 if (sc->bge_flags & BGE_FLAG_STATUS_TAG) { 2620 if (!BGE_IS_5705_PLUS(sc)) { 2621 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx, 2622 SYSCTL_CHILDREN(sc->bge_sysctl_tree), OID_AUTO, 2623 "rx_coal_ticks_int", CTLTYPE_INT | CTLFLAG_RW, 2624 sc, 0, bge_sysctl_rx_coal_ticks_int, "I", 2625 "Receive coalescing ticks " 2626 "during interrupt (usec)."); 2627 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx, 2628 SYSCTL_CHILDREN(sc->bge_sysctl_tree), OID_AUTO, 2629 "tx_coal_ticks_int", CTLTYPE_INT | CTLFLAG_RW, 2630 sc, 0, bge_sysctl_tx_coal_ticks_int, "I", 2631 "Transmit coalescing ticks " 2632 "during interrupt (usec)."); 2633 } 2634 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx, 2635 SYSCTL_CHILDREN(sc->bge_sysctl_tree), OID_AUTO, 2636 "rx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW, 2637 sc, 0, bge_sysctl_rx_coal_bds_int, "I", 2638 "Receive max coalesced BD count during interrupt."); 2639 SYSCTL_ADD_PROC(&sc->bge_sysctl_ctx, 2640 SYSCTL_CHILDREN(sc->bge_sysctl_tree), OID_AUTO, 2641 "tx_coal_bds_int", CTLTYPE_INT | CTLFLAG_RW, 2642 sc, 0, bge_sysctl_tx_coal_bds_int, "I", 2643 "Transmit max coalesced BD count during interrupt."); 2644 } 2645 2646 /* 2647 * Call MI attach routine. 2648 */ 2649 ether_ifattach(ifp, ether_addr, NULL); 2650 2651 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->bge_irq)); 2652 2653 #ifdef IFPOLL_ENABLE 2654 /* Polling setup */ 2655 ifpoll_compat_setup(&sc->bge_npoll, 2656 &sc->bge_sysctl_ctx, sc->bge_sysctl_tree, device_get_unit(dev), 2657 ifp->if_serializer); 2658 #endif 2659 2660 if (sc->bge_irq_type == PCI_INTR_TYPE_MSI) { 2661 if (sc->bge_flags & BGE_FLAG_ONESHOT_MSI) { 2662 intr_func = bge_msi_oneshot; 2663 if (bootverbose) 2664 device_printf(dev, "oneshot MSI\n"); 2665 } else { 2666 intr_func = bge_msi; 2667 } 2668 } else if (sc->bge_flags & BGE_FLAG_STATUS_TAG) { 2669 intr_func = bge_intr_legacy; 2670 } else { 2671 intr_func = bge_intr_crippled; 2672 } 2673 error = bus_setup_intr(dev, sc->bge_irq, INTR_MPSAFE, intr_func, sc, 2674 &sc->bge_intrhand, ifp->if_serializer); 2675 if (error) { 2676 ether_ifdetach(ifp); 2677 device_printf(dev, "couldn't set up irq\n"); 2678 goto fail; 2679 } 2680 2681 return(0); 2682 fail: 2683 bge_detach(dev); 2684 return(error); 2685 } 2686 2687 static int 2688 bge_detach(device_t dev) 2689 { 2690 struct bge_softc *sc = device_get_softc(dev); 2691 2692 if (device_is_attached(dev)) { 2693 struct ifnet *ifp = &sc->arpcom.ac_if; 2694 2695 lwkt_serialize_enter(ifp->if_serializer); 2696 bge_stop(sc); 2697 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); 2698 lwkt_serialize_exit(ifp->if_serializer); 2699 2700 ether_ifdetach(ifp); 2701 } 2702 2703 if (sc->bge_flags & BGE_FLAG_TBI) 2704 ifmedia_removeall(&sc->bge_ifmedia); 2705 if (sc->bge_miibus) 2706 device_delete_child(dev, sc->bge_miibus); 2707 bus_generic_detach(dev); 2708 2709 if (sc->bge_irq != NULL) { 2710 bus_release_resource(dev, SYS_RES_IRQ, sc->bge_irq_rid, 2711 sc->bge_irq); 2712 } 2713 if (sc->bge_irq_type == PCI_INTR_TYPE_MSI) 2714 pci_release_msi(dev); 2715 2716 if (sc->bge_res != NULL) { 2717 bus_release_resource(dev, SYS_RES_MEMORY, 2718 BGE_PCI_BAR0, sc->bge_res); 2719 } 2720 2721 if (sc->bge_sysctl_tree != NULL) 2722 sysctl_ctx_free(&sc->bge_sysctl_ctx); 2723 2724 bge_dma_free(sc); 2725 2726 return 0; 2727 } 2728 2729 static void 2730 bge_reset(struct bge_softc *sc) 2731 { 2732 device_t dev; 2733 uint32_t cachesize, command, pcistate, reset; 2734 void (*write_op)(struct bge_softc *, uint32_t, uint32_t); 2735 int i, val = 0; 2736 2737 dev = sc->bge_dev; 2738 2739 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) && 2740 sc->bge_asicrev != BGE_ASICREV_BCM5906) { 2741 if (sc->bge_flags & BGE_FLAG_PCIE) 2742 write_op = bge_writemem_direct; 2743 else 2744 write_op = bge_writemem_ind; 2745 } else { 2746 write_op = bge_writereg_ind; 2747 } 2748 2749 /* Save some important PCI state. */ 2750 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 2751 command = pci_read_config(dev, BGE_PCI_CMD, 4); 2752 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 2753 2754 pci_write_config(dev, BGE_PCI_MISC_CTL, 2755 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2756 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW| 2757 sc->bge_pci_miscctl, 4); 2758 2759 /* Disable fastboot on controllers that support it. */ 2760 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 || 2761 BGE_IS_5755_PLUS(sc)) { 2762 if (bootverbose) 2763 if_printf(&sc->arpcom.ac_if, "Disabling fastboot\n"); 2764 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0); 2765 } 2766 2767 /* 2768 * Write the magic number to SRAM at offset 0xB50. 2769 * When firmware finishes its initialization it will 2770 * write ~BGE_SRAM_FW_MB_MAGIC to the same location. 2771 */ 2772 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); 2773 2774 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1); 2775 2776 /* XXX: Broadcom Linux driver. */ 2777 if (sc->bge_flags & BGE_FLAG_PCIE) { 2778 /* Force PCI-E 1.0a mode */ 2779 if (sc->bge_asicrev != BGE_ASICREV_BCM5785 && 2780 CSR_READ_4(sc, BGE_PCIE_PHY_TSTCTL) == 2781 (BGE_PCIE_PHY_TSTCTL_PSCRAM | 2782 BGE_PCIE_PHY_TSTCTL_PCIE10)) { 2783 CSR_WRITE_4(sc, BGE_PCIE_PHY_TSTCTL, 2784 BGE_PCIE_PHY_TSTCTL_PSCRAM); 2785 } 2786 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2787 /* Prevent PCIE link training during global reset */ 2788 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29)); 2789 reset |= (1<<29); 2790 } 2791 } 2792 2793 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 2794 uint32_t status, ctrl; 2795 2796 status = CSR_READ_4(sc, BGE_VCPU_STATUS); 2797 CSR_WRITE_4(sc, BGE_VCPU_STATUS, 2798 status | BGE_VCPU_STATUS_DRV_RESET); 2799 ctrl = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); 2800 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, 2801 ctrl & ~BGE_VCPU_EXT_CTRL_HALT_CPU); 2802 } 2803 2804 /* 2805 * Set GPHY Power Down Override to leave GPHY 2806 * powered up in D0 uninitialized. 2807 */ 2808 if (BGE_IS_5705_PLUS(sc) && (sc->bge_flags & BGE_FLAG_CPMU) == 0) 2809 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE; 2810 2811 /* Issue global reset */ 2812 write_op(sc, BGE_MISC_CFG, reset); 2813 2814 DELAY(1000); 2815 2816 /* XXX: Broadcom Linux driver. */ 2817 if (sc->bge_flags & BGE_FLAG_PCIE) { 2818 uint16_t devctl; 2819 2820 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 2821 uint32_t v; 2822 2823 DELAY(500000); /* wait for link training to complete */ 2824 v = pci_read_config(dev, 0xc4, 4); 2825 pci_write_config(dev, 0xc4, v | (1<<15), 4); 2826 } 2827 2828 devctl = pci_read_config(dev, 2829 sc->bge_pciecap + PCIER_DEVCTRL, 2); 2830 2831 /* Disable no snoop and disable relaxed ordering. */ 2832 devctl &= ~(PCIEM_DEVCTL_RELAX_ORDER | PCIEM_DEVCTL_NOSNOOP); 2833 2834 /* Old PCI-E chips only support 128 bytes Max PayLoad Size. */ 2835 if ((sc->bge_flags & BGE_FLAG_CPMU) == 0) { 2836 devctl &= ~PCIEM_DEVCTL_MAX_PAYLOAD_MASK; 2837 devctl |= PCIEM_DEVCTL_MAX_PAYLOAD_128; 2838 } 2839 2840 pci_write_config(dev, sc->bge_pciecap + PCIER_DEVCTRL, 2841 devctl, 2); 2842 2843 /* Clear error status. */ 2844 pci_write_config(dev, sc->bge_pciecap + PCIER_DEVSTS, 2845 PCIEM_DEVSTS_CORR_ERR | 2846 PCIEM_DEVSTS_NFATAL_ERR | 2847 PCIEM_DEVSTS_FATAL_ERR | 2848 PCIEM_DEVSTS_UNSUPP_REQ, 2); 2849 } 2850 2851 /* Reset some of the PCI state that got zapped by reset */ 2852 pci_write_config(dev, BGE_PCI_MISC_CTL, 2853 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2854 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW| 2855 sc->bge_pci_miscctl, 4); 2856 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 2857 pci_write_config(dev, BGE_PCI_CMD, command, 4); 2858 write_op(sc, BGE_MISC_CFG, (65 << 1)); 2859 2860 /* 2861 * Disable PCI-X relaxed ordering to ensure status block update 2862 * comes first then packet buffer DMA. Otherwise driver may 2863 * read stale status block. 2864 */ 2865 if (sc->bge_flags & BGE_FLAG_PCIX) { 2866 uint16_t devctl; 2867 2868 devctl = pci_read_config(dev, 2869 sc->bge_pcixcap + PCIXR_COMMAND, 2); 2870 devctl &= ~PCIXM_COMMAND_ERO; 2871 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) { 2872 devctl &= ~PCIXM_COMMAND_MAX_READ; 2873 devctl |= PCIXM_COMMAND_MAX_READ_2048; 2874 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { 2875 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS | 2876 PCIXM_COMMAND_MAX_READ); 2877 devctl |= PCIXM_COMMAND_MAX_READ_2048; 2878 } 2879 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND, 2880 devctl, 2); 2881 } 2882 2883 /* 2884 * Enable memory arbiter and re-enable MSI if necessary. 2885 */ 2886 if (BGE_IS_5714_FAMILY(sc)) { 2887 uint32_t val; 2888 2889 if (sc->bge_irq_type == PCI_INTR_TYPE_MSI) { 2890 /* 2891 * Resetting BCM5714 family will clear MSI 2892 * enable bit; restore it after resetting. 2893 */ 2894 PCI_SETBIT(sc->bge_dev, sc->bge_msicap + PCIR_MSI_CTRL, 2895 PCIM_MSICTRL_MSI_ENABLE, 2); 2896 BGE_SETBIT(sc, BGE_MSI_MODE, BGE_MSIMODE_ENABLE); 2897 } 2898 val = CSR_READ_4(sc, BGE_MARB_MODE); 2899 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); 2900 } else { 2901 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2902 } 2903 2904 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 2905 for (i = 0; i < BGE_TIMEOUT; i++) { 2906 val = CSR_READ_4(sc, BGE_VCPU_STATUS); 2907 if (val & BGE_VCPU_STATUS_INIT_DONE) 2908 break; 2909 DELAY(100); 2910 } 2911 if (i == BGE_TIMEOUT) { 2912 if_printf(&sc->arpcom.ac_if, "reset timed out\n"); 2913 return; 2914 } 2915 } else { 2916 /* 2917 * Poll until we see the 1's complement of the magic number. 2918 * This indicates that the firmware initialization 2919 * is complete. 2920 */ 2921 for (i = 0; i < BGE_FIRMWARE_TIMEOUT; i++) { 2922 val = bge_readmem_ind(sc, BGE_SRAM_FW_MB); 2923 if (val == ~BGE_SRAM_FW_MB_MAGIC) 2924 break; 2925 DELAY(10); 2926 } 2927 if (i == BGE_FIRMWARE_TIMEOUT) { 2928 if_printf(&sc->arpcom.ac_if, "firmware handshake " 2929 "timed out, found 0x%08x\n", val); 2930 } 2931 } 2932 2933 /* 2934 * XXX Wait for the value of the PCISTATE register to 2935 * return to its original pre-reset state. This is a 2936 * fairly good indicator of reset completion. If we don't 2937 * wait for the reset to fully complete, trying to read 2938 * from the device's non-PCI registers may yield garbage 2939 * results. 2940 */ 2941 for (i = 0; i < BGE_TIMEOUT; i++) { 2942 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) 2943 break; 2944 DELAY(10); 2945 } 2946 2947 /* Fix up byte swapping */ 2948 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS | 2949 BGE_MODECTL_BYTESWAP_DATA); 2950 2951 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2952 DELAY(40); 2953 2954 /* 2955 * The 5704 in TBI mode apparently needs some special 2956 * adjustment to insure the SERDES drive level is set 2957 * to 1.2V. 2958 */ 2959 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && 2960 (sc->bge_flags & BGE_FLAG_TBI)) { 2961 uint32_t serdescfg; 2962 2963 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 2964 serdescfg = (serdescfg & ~0xFFF) | 0x880; 2965 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 2966 } 2967 2968 /* XXX: Broadcom Linux driver. */ 2969 if ((sc->bge_flags & BGE_FLAG_PCIE) && 2970 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 && 2971 sc->bge_asicrev != BGE_ASICREV_BCM5785) { 2972 uint32_t v; 2973 2974 /* Enable Data FIFO protection. */ 2975 v = CSR_READ_4(sc, BGE_PCIE_TLDLPL_PORT); 2976 CSR_WRITE_4(sc, BGE_PCIE_TLDLPL_PORT, v | (1 << 25)); 2977 } 2978 2979 DELAY(10000); 2980 } 2981 2982 /* 2983 * Frame reception handling. This is called if there's a frame 2984 * on the receive return list. 2985 * 2986 * Note: we have to be able to handle two possibilities here: 2987 * 1) the frame is from the jumbo recieve ring 2988 * 2) the frame is from the standard receive ring 2989 */ 2990 2991 static void 2992 bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int count) 2993 { 2994 struct ifnet *ifp; 2995 int stdcnt = 0, jumbocnt = 0; 2996 2997 ifp = &sc->arpcom.ac_if; 2998 2999 while (sc->bge_rx_saved_considx != rx_prod && count != 0) { 3000 struct bge_rx_bd *cur_rx; 3001 uint32_t rxidx; 3002 struct mbuf *m = NULL; 3003 uint16_t vlan_tag = 0; 3004 int have_tag = 0; 3005 3006 --count; 3007 3008 cur_rx = 3009 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx]; 3010 3011 rxidx = cur_rx->bge_idx; 3012 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); 3013 logif(rx_pkt); 3014 3015 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 3016 have_tag = 1; 3017 vlan_tag = cur_rx->bge_vlan_tag; 3018 } 3019 3020 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 3021 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 3022 jumbocnt++; 3023 3024 if (rxidx != sc->bge_jumbo) { 3025 IFNET_STAT_INC(ifp, ierrors, 1); 3026 if_printf(ifp, "sw jumbo index(%d) " 3027 "and hw jumbo index(%d) mismatch, drop!\n", 3028 sc->bge_jumbo, rxidx); 3029 bge_setup_rxdesc_jumbo(sc, rxidx); 3030 continue; 3031 } 3032 3033 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx].bge_mbuf; 3034 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3035 IFNET_STAT_INC(ifp, ierrors, 1); 3036 bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo); 3037 continue; 3038 } 3039 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 0)) { 3040 IFNET_STAT_INC(ifp, ierrors, 1); 3041 bge_setup_rxdesc_jumbo(sc, sc->bge_jumbo); 3042 continue; 3043 } 3044 } else { 3045 int discard = 0; 3046 3047 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 3048 stdcnt++; 3049 3050 if (rxidx != sc->bge_std) { 3051 IFNET_STAT_INC(ifp, ierrors, 1); 3052 if_printf(ifp, "sw std index(%d) " 3053 "and hw std index(%d) mismatch, drop!\n", 3054 sc->bge_std, rxidx); 3055 bge_setup_rxdesc_std(sc, rxidx); 3056 discard = 1; 3057 goto refresh_rx; 3058 } 3059 3060 m = sc->bge_cdata.bge_rx_std_chain[rxidx].bge_mbuf; 3061 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3062 IFNET_STAT_INC(ifp, ierrors, 1); 3063 bge_setup_rxdesc_std(sc, sc->bge_std); 3064 discard = 1; 3065 goto refresh_rx; 3066 } 3067 if (bge_newbuf_std(sc, sc->bge_std, 0)) { 3068 IFNET_STAT_INC(ifp, ierrors, 1); 3069 bge_setup_rxdesc_std(sc, sc->bge_std); 3070 discard = 1; 3071 } 3072 refresh_rx: 3073 if (sc->bge_rx_wreg > 0 && stdcnt >= sc->bge_rx_wreg) { 3074 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 3075 sc->bge_std); 3076 stdcnt = 0; 3077 } 3078 if (discard) 3079 continue; 3080 } 3081 3082 IFNET_STAT_INC(ifp, ipackets, 1); 3083 #if !defined(__i386__) && !defined(__x86_64__) 3084 /* 3085 * The x86 allows unaligned accesses, but for other 3086 * platforms we must make sure the payload is aligned. 3087 */ 3088 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) { 3089 bcopy(m->m_data, m->m_data + ETHER_ALIGN, 3090 cur_rx->bge_len); 3091 m->m_data += ETHER_ALIGN; 3092 } 3093 #endif 3094 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 3095 m->m_pkthdr.rcvif = ifp; 3096 3097 if (ifp->if_capenable & IFCAP_RXCSUM) { 3098 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { 3099 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 3100 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0) 3101 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 3102 } 3103 if ((cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) && 3104 m->m_pkthdr.len >= BGE_MIN_FRAMELEN) { 3105 m->m_pkthdr.csum_data = 3106 cur_rx->bge_tcp_udp_csum; 3107 m->m_pkthdr.csum_flags |= 3108 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 3109 } 3110 } 3111 3112 /* 3113 * If we received a packet with a vlan tag, pass it 3114 * to vlan_input() instead of ether_input(). 3115 */ 3116 if (have_tag) { 3117 m->m_flags |= M_VLANTAG; 3118 m->m_pkthdr.ether_vlantag = vlan_tag; 3119 } 3120 ifp->if_input(ifp, m); 3121 } 3122 3123 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 3124 if (stdcnt) 3125 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 3126 if (jumbocnt) 3127 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 3128 } 3129 3130 static void 3131 bge_txeof(struct bge_softc *sc, uint16_t tx_cons) 3132 { 3133 struct ifnet *ifp; 3134 3135 ifp = &sc->arpcom.ac_if; 3136 3137 /* 3138 * Go through our tx ring and free mbufs for those 3139 * frames that have been sent. 3140 */ 3141 while (sc->bge_tx_saved_considx != tx_cons) { 3142 uint32_t idx = 0; 3143 3144 idx = sc->bge_tx_saved_considx; 3145 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { 3146 IFNET_STAT_INC(ifp, opackets, 1); 3147 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, 3148 sc->bge_cdata.bge_tx_dmamap[idx]); 3149 m_freem(sc->bge_cdata.bge_tx_chain[idx]); 3150 sc->bge_cdata.bge_tx_chain[idx] = NULL; 3151 } 3152 sc->bge_txcnt--; 3153 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 3154 logif(tx_pkt); 3155 } 3156 3157 if ((BGE_TX_RING_CNT - sc->bge_txcnt) >= 3158 (sc->bge_txrsvd + sc->bge_txspare)) 3159 ifq_clr_oactive(&ifp->if_snd); 3160 3161 if (sc->bge_txcnt == 0) 3162 ifp->if_timer = 0; 3163 3164 if (!ifq_is_empty(&ifp->if_snd)) 3165 if_devstart(ifp); 3166 } 3167 3168 #ifdef IFPOLL_ENABLE 3169 3170 static void 3171 bge_npoll_compat(struct ifnet *ifp, void *arg __unused, int cycles) 3172 { 3173 struct bge_softc *sc = ifp->if_softc; 3174 struct bge_status_block *sblk = sc->bge_ldata.bge_status_block; 3175 uint16_t rx_prod, tx_cons; 3176 3177 ASSERT_SERIALIZED(ifp->if_serializer); 3178 3179 if (sc->bge_npoll.ifpc_stcount-- == 0) { 3180 sc->bge_npoll.ifpc_stcount = sc->bge_npoll.ifpc_stfrac; 3181 /* 3182 * Process link state changes. 3183 */ 3184 bge_link_poll(sc); 3185 } 3186 3187 if (sc->bge_flags & BGE_FLAG_STATUS_TAG) { 3188 sc->bge_status_tag = sblk->bge_status_tag; 3189 /* 3190 * Use a load fence to ensure that status_tag 3191 * is saved before rx_prod and tx_cons. 3192 */ 3193 cpu_lfence(); 3194 } 3195 3196 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx; 3197 if (sc->bge_rx_saved_considx != rx_prod) 3198 bge_rxeof(sc, rx_prod, cycles); 3199 3200 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx; 3201 if (sc->bge_tx_saved_considx != tx_cons) 3202 bge_txeof(sc, tx_cons); 3203 3204 if (sc->bge_flags & BGE_FLAG_STATUS_TAG) 3205 bge_writembx(sc, BGE_MBX_IRQ0_LO, sc->bge_status_tag << 24); 3206 3207 if (sc->bge_coal_chg) 3208 bge_coal_change(sc); 3209 } 3210 3211 static void 3212 bge_npoll(struct ifnet *ifp, struct ifpoll_info *info) 3213 { 3214 struct bge_softc *sc = ifp->if_softc; 3215 3216 ASSERT_SERIALIZED(ifp->if_serializer); 3217 3218 if (info != NULL) { 3219 int cpuid = sc->bge_npoll.ifpc_cpuid; 3220 3221 info->ifpi_rx[cpuid].poll_func = bge_npoll_compat; 3222 info->ifpi_rx[cpuid].arg = NULL; 3223 info->ifpi_rx[cpuid].serializer = ifp->if_serializer; 3224 3225 if (ifp->if_flags & IFF_RUNNING) 3226 bge_disable_intr(sc); 3227 ifq_set_cpuid(&ifp->if_snd, cpuid); 3228 } else { 3229 if (ifp->if_flags & IFF_RUNNING) 3230 bge_enable_intr(sc); 3231 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->bge_irq)); 3232 } 3233 } 3234 3235 #endif /* IFPOLL_ENABLE */ 3236 3237 static void 3238 bge_intr_crippled(void *xsc) 3239 { 3240 struct bge_softc *sc = xsc; 3241 struct ifnet *ifp = &sc->arpcom.ac_if; 3242 3243 logif(intr); 3244 3245 /* 3246 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't 3247 * disable interrupts by writing nonzero like we used to, since with 3248 * our current organization this just gives complications and 3249 * pessimizations for re-enabling interrupts. We used to have races 3250 * instead of the necessary complications. Disabling interrupts 3251 * would just reduce the chance of a status update while we are 3252 * running (by switching to the interrupt-mode coalescence 3253 * parameters), but this chance is already very low so it is more 3254 * efficient to get another interrupt than prevent it. 3255 * 3256 * We do the ack first to ensure another interrupt if there is a 3257 * status update after the ack. We don't check for the status 3258 * changing later because it is more efficient to get another 3259 * interrupt than prevent it, not quite as above (not checking is 3260 * a smaller optimization than not toggling the interrupt enable, 3261 * since checking doesn't involve PCI accesses and toggling require 3262 * the status check). So toggling would probably be a pessimization 3263 * even with MSI. It would only be needed for using a task queue. 3264 */ 3265 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 3266 3267 /* 3268 * Process link state changes. 3269 */ 3270 bge_link_poll(sc); 3271 3272 if (ifp->if_flags & IFF_RUNNING) { 3273 struct bge_status_block *sblk = sc->bge_ldata.bge_status_block; 3274 uint16_t rx_prod, tx_cons; 3275 3276 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx; 3277 if (sc->bge_rx_saved_considx != rx_prod) 3278 bge_rxeof(sc, rx_prod, -1); 3279 3280 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx; 3281 if (sc->bge_tx_saved_considx != tx_cons) 3282 bge_txeof(sc, tx_cons); 3283 } 3284 3285 if (sc->bge_coal_chg) 3286 bge_coal_change(sc); 3287 } 3288 3289 static void 3290 bge_intr_legacy(void *xsc) 3291 { 3292 struct bge_softc *sc = xsc; 3293 struct bge_status_block *sblk = sc->bge_ldata.bge_status_block; 3294 3295 if (sc->bge_status_tag == sblk->bge_status_tag) { 3296 uint32_t val; 3297 3298 val = pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4); 3299 if (val & BGE_PCISTAT_INTR_NOTACT) 3300 return; 3301 } 3302 3303 /* 3304 * NOTE: 3305 * Interrupt will have to be disabled if tagged status 3306 * is used, else interrupt will always be asserted on 3307 * certain chips (at least on BCM5750 AX/BX). 3308 */ 3309 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 3310 3311 bge_intr(sc); 3312 } 3313 3314 static void 3315 bge_msi(void *xsc) 3316 { 3317 struct bge_softc *sc = xsc; 3318 3319 /* Disable interrupt first */ 3320 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 3321 bge_intr(sc); 3322 } 3323 3324 static void 3325 bge_msi_oneshot(void *xsc) 3326 { 3327 bge_intr(xsc); 3328 } 3329 3330 static void 3331 bge_intr(struct bge_softc *sc) 3332 { 3333 struct ifnet *ifp = &sc->arpcom.ac_if; 3334 struct bge_status_block *sblk = sc->bge_ldata.bge_status_block; 3335 uint16_t rx_prod, tx_cons; 3336 uint32_t status; 3337 3338 sc->bge_status_tag = sblk->bge_status_tag; 3339 /* 3340 * Use a load fence to ensure that status_tag is saved 3341 * before rx_prod, tx_cons and status. 3342 */ 3343 cpu_lfence(); 3344 3345 rx_prod = sblk->bge_idx[0].bge_rx_prod_idx; 3346 tx_cons = sblk->bge_idx[0].bge_tx_cons_idx; 3347 status = sblk->bge_status; 3348 3349 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) || sc->bge_link_evt) 3350 bge_link_poll(sc); 3351 3352 if (ifp->if_flags & IFF_RUNNING) { 3353 if (sc->bge_rx_saved_considx != rx_prod) 3354 bge_rxeof(sc, rx_prod, -1); 3355 3356 if (sc->bge_tx_saved_considx != tx_cons) 3357 bge_txeof(sc, tx_cons); 3358 } 3359 3360 bge_writembx(sc, BGE_MBX_IRQ0_LO, sc->bge_status_tag << 24); 3361 3362 if (sc->bge_coal_chg) 3363 bge_coal_change(sc); 3364 } 3365 3366 static void 3367 bge_tick(void *xsc) 3368 { 3369 struct bge_softc *sc = xsc; 3370 struct ifnet *ifp = &sc->arpcom.ac_if; 3371 3372 lwkt_serialize_enter(ifp->if_serializer); 3373 3374 if (BGE_IS_5705_PLUS(sc)) 3375 bge_stats_update_regs(sc); 3376 else 3377 bge_stats_update(sc); 3378 3379 if (sc->bge_flags & BGE_FLAG_TBI) { 3380 /* 3381 * Since in TBI mode auto-polling can't be used we should poll 3382 * link status manually. Here we register pending link event 3383 * and trigger interrupt. 3384 */ 3385 sc->bge_link_evt++; 3386 if (BGE_IS_CRIPPLED(sc)) 3387 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 3388 else 3389 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 3390 } else if (!sc->bge_link) { 3391 mii_tick(device_get_softc(sc->bge_miibus)); 3392 } 3393 3394 bge_asf_driver_up(sc); 3395 3396 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc); 3397 3398 lwkt_serialize_exit(ifp->if_serializer); 3399 } 3400 3401 static void 3402 bge_stats_update_regs(struct bge_softc *sc) 3403 { 3404 struct ifnet *ifp = &sc->arpcom.ac_if; 3405 struct bge_mac_stats_regs stats; 3406 uint32_t *s; 3407 int i; 3408 3409 s = (uint32_t *)&stats; 3410 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) { 3411 *s = CSR_READ_4(sc, BGE_RX_STATS + i); 3412 s++; 3413 } 3414 3415 IFNET_STAT_SET(ifp, collisions, 3416 (stats.dot3StatsSingleCollisionFrames + 3417 stats.dot3StatsMultipleCollisionFrames + 3418 stats.dot3StatsExcessiveCollisions + 3419 stats.dot3StatsLateCollisions)); 3420 } 3421 3422 static void 3423 bge_stats_update(struct bge_softc *sc) 3424 { 3425 struct ifnet *ifp = &sc->arpcom.ac_if; 3426 bus_size_t stats; 3427 3428 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 3429 3430 #define READ_STAT(sc, stats, stat) \ 3431 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 3432 3433 IFNET_STAT_SET(ifp, collisions, 3434 (READ_STAT(sc, stats, 3435 txstats.dot3StatsSingleCollisionFrames.bge_addr_lo) + 3436 READ_STAT(sc, stats, 3437 txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo) + 3438 READ_STAT(sc, stats, 3439 txstats.dot3StatsExcessiveCollisions.bge_addr_lo) + 3440 READ_STAT(sc, stats, 3441 txstats.dot3StatsLateCollisions.bge_addr_lo))); 3442 3443 #undef READ_STAT 3444 3445 #ifdef notdef 3446 IFNET_STAT_SET(ifp, collisions, 3447 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 3448 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 3449 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 3450 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions)); 3451 #endif 3452 } 3453 3454 /* 3455 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 3456 * pointers to descriptors. 3457 */ 3458 static int 3459 bge_encap(struct bge_softc *sc, struct mbuf **m_head0, uint32_t *txidx, 3460 int *segs_used) 3461 { 3462 struct bge_tx_bd *d = NULL, *last_d; 3463 uint16_t csum_flags = 0, mss = 0; 3464 bus_dma_segment_t segs[BGE_NSEG_NEW]; 3465 bus_dmamap_t map; 3466 int error, maxsegs, nsegs, idx, i; 3467 struct mbuf *m_head = *m_head0, *m_new; 3468 3469 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3470 error = bge_setup_tso(sc, m_head0, &mss, &csum_flags); 3471 if (error) 3472 return ENOBUFS; 3473 m_head = *m_head0; 3474 } else if (m_head->m_pkthdr.csum_flags & BGE_CSUM_FEATURES) { 3475 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 3476 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3477 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 3478 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3479 if (m_head->m_flags & M_LASTFRAG) 3480 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 3481 else if (m_head->m_flags & M_FRAG) 3482 csum_flags |= BGE_TXBDFLAG_IP_FRAG; 3483 } 3484 3485 idx = *txidx; 3486 map = sc->bge_cdata.bge_tx_dmamap[idx]; 3487 3488 maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - sc->bge_txrsvd; 3489 KASSERT(maxsegs >= sc->bge_txspare, 3490 ("not enough segments %d", maxsegs)); 3491 3492 if (maxsegs > BGE_NSEG_NEW) 3493 maxsegs = BGE_NSEG_NEW; 3494 3495 /* 3496 * Pad outbound frame to BGE_MIN_FRAMELEN for an unusual reason. 3497 * The bge hardware will pad out Tx runts to BGE_MIN_FRAMELEN, 3498 * but when such padded frames employ the bge IP/TCP checksum 3499 * offload, the hardware checksum assist gives incorrect results 3500 * (possibly from incorporating its own padding into the UDP/TCP 3501 * checksum; who knows). If we pad such runts with zeros, the 3502 * onboard checksum comes out correct. 3503 */ 3504 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) && 3505 m_head->m_pkthdr.len < BGE_MIN_FRAMELEN) { 3506 error = m_devpad(m_head, BGE_MIN_FRAMELEN); 3507 if (error) 3508 goto back; 3509 } 3510 3511 if ((sc->bge_flags & BGE_FLAG_SHORTDMA) && m_head->m_next != NULL) { 3512 m_new = bge_defrag_shortdma(m_head); 3513 if (m_new == NULL) { 3514 error = ENOBUFS; 3515 goto back; 3516 } 3517 *m_head0 = m_head = m_new; 3518 } 3519 if ((m_head->m_pkthdr.csum_flags & CSUM_TSO) == 0 && 3520 sc->bge_force_defrag && (sc->bge_flags & BGE_FLAG_PCIE) && 3521 m_head->m_next != NULL) { 3522 /* 3523 * Forcefully defragment mbuf chain to overcome hardware 3524 * limitation which only support a single outstanding 3525 * DMA read operation. If it fails, keep moving on using 3526 * the original mbuf chain. 3527 */ 3528 m_new = m_defrag(m_head, MB_DONTWAIT); 3529 if (m_new != NULL) 3530 *m_head0 = m_head = m_new; 3531 } 3532 3533 error = bus_dmamap_load_mbuf_defrag(sc->bge_cdata.bge_tx_mtag, map, 3534 m_head0, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 3535 if (error) 3536 goto back; 3537 *segs_used += nsegs; 3538 3539 m_head = *m_head0; 3540 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE); 3541 3542 for (i = 0; ; i++) { 3543 d = &sc->bge_ldata.bge_tx_ring[idx]; 3544 3545 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); 3546 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); 3547 d->bge_len = segs[i].ds_len; 3548 d->bge_flags = csum_flags; 3549 d->bge_mss = mss; 3550 3551 if (i == nsegs - 1) 3552 break; 3553 BGE_INC(idx, BGE_TX_RING_CNT); 3554 } 3555 last_d = d; 3556 3557 /* Set vlan tag to the first segment of the packet. */ 3558 d = &sc->bge_ldata.bge_tx_ring[*txidx]; 3559 if (m_head->m_flags & M_VLANTAG) { 3560 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 3561 d->bge_vlan_tag = m_head->m_pkthdr.ether_vlantag; 3562 } else { 3563 d->bge_vlan_tag = 0; 3564 } 3565 3566 /* Mark the last segment as end of packet... */ 3567 last_d->bge_flags |= BGE_TXBDFLAG_END; 3568 3569 /* 3570 * Insure that the map for this transmission is placed at 3571 * the array index of the last descriptor in this chain. 3572 */ 3573 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx]; 3574 sc->bge_cdata.bge_tx_dmamap[idx] = map; 3575 sc->bge_cdata.bge_tx_chain[idx] = m_head; 3576 sc->bge_txcnt += nsegs; 3577 3578 BGE_INC(idx, BGE_TX_RING_CNT); 3579 *txidx = idx; 3580 back: 3581 if (error) { 3582 m_freem(*m_head0); 3583 *m_head0 = NULL; 3584 } 3585 return error; 3586 } 3587 3588 static void 3589 bge_xmit(struct bge_softc *sc, uint32_t prodidx) 3590 { 3591 /* Transmit */ 3592 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3593 /* 5700 b2 errata */ 3594 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 3595 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3596 } 3597 3598 /* 3599 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3600 * to the mbuf data regions directly in the transmit descriptors. 3601 */ 3602 static void 3603 bge_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 3604 { 3605 struct bge_softc *sc = ifp->if_softc; 3606 struct mbuf *m_head = NULL; 3607 uint32_t prodidx; 3608 int nsegs = 0; 3609 3610 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 3611 3612 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 3613 return; 3614 3615 prodidx = sc->bge_tx_prodidx; 3616 3617 while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 3618 m_head = ifq_dequeue(&ifp->if_snd); 3619 if (m_head == NULL) 3620 break; 3621 3622 /* 3623 * XXX 3624 * The code inside the if() block is never reached since we 3625 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting 3626 * requests to checksum TCP/UDP in a fragmented packet. 3627 * 3628 * XXX 3629 * safety overkill. If this is a fragmented packet chain 3630 * with delayed TCP/UDP checksums, then only encapsulate 3631 * it if we have enough descriptors to handle the entire 3632 * chain at once. 3633 * (paranoia -- may not actually be needed) 3634 */ 3635 if ((m_head->m_flags & M_FIRSTFRAG) && 3636 (m_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA)) { 3637 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 3638 m_head->m_pkthdr.csum_data + sc->bge_txrsvd) { 3639 ifq_set_oactive(&ifp->if_snd); 3640 ifq_prepend(&ifp->if_snd, m_head); 3641 break; 3642 } 3643 } 3644 3645 /* 3646 * Sanity check: avoid coming within bge_txrsvd 3647 * descriptors of the end of the ring. Also make 3648 * sure there are bge_txspare descriptors for 3649 * jumbo buffers' defragmentation. 3650 */ 3651 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 3652 (sc->bge_txrsvd + sc->bge_txspare)) { 3653 ifq_set_oactive(&ifp->if_snd); 3654 ifq_prepend(&ifp->if_snd, m_head); 3655 break; 3656 } 3657 3658 /* 3659 * Pack the data into the transmit ring. If we 3660 * don't have room, set the OACTIVE flag and wait 3661 * for the NIC to drain the ring. 3662 */ 3663 if (bge_encap(sc, &m_head, &prodidx, &nsegs)) { 3664 ifq_set_oactive(&ifp->if_snd); 3665 IFNET_STAT_INC(ifp, oerrors, 1); 3666 break; 3667 } 3668 3669 if (nsegs >= sc->bge_tx_wreg) { 3670 bge_xmit(sc, prodidx); 3671 nsegs = 0; 3672 } 3673 3674 ETHER_BPF_MTAP(ifp, m_head); 3675 3676 /* 3677 * Set a timeout in case the chip goes out to lunch. 3678 */ 3679 ifp->if_timer = 5; 3680 } 3681 3682 if (nsegs > 0) 3683 bge_xmit(sc, prodidx); 3684 sc->bge_tx_prodidx = prodidx; 3685 } 3686 3687 static void 3688 bge_init(void *xsc) 3689 { 3690 struct bge_softc *sc = xsc; 3691 struct ifnet *ifp = &sc->arpcom.ac_if; 3692 uint16_t *m; 3693 uint32_t mode; 3694 3695 ASSERT_SERIALIZED(ifp->if_serializer); 3696 3697 /* Cancel pending I/O and flush buffers. */ 3698 bge_stop(sc); 3699 3700 bge_stop_fw(sc); 3701 bge_sig_pre_reset(sc, BGE_RESET_START); 3702 bge_reset(sc); 3703 bge_sig_legacy(sc, BGE_RESET_START); 3704 bge_sig_post_reset(sc, BGE_RESET_START); 3705 3706 bge_chipinit(sc); 3707 3708 /* 3709 * Init the various state machines, ring 3710 * control blocks and firmware. 3711 */ 3712 if (bge_blockinit(sc)) { 3713 if_printf(ifp, "initialization failure\n"); 3714 bge_stop(sc); 3715 return; 3716 } 3717 3718 /* Specify MTU. */ 3719 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 3720 ETHER_HDR_LEN + ETHER_CRC_LEN + EVL_ENCAPLEN); 3721 3722 /* Load our MAC address. */ 3723 m = (uint16_t *)&sc->arpcom.ac_enaddr[0]; 3724 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 3725 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 3726 3727 /* Enable or disable promiscuous mode as needed. */ 3728 bge_setpromisc(sc); 3729 3730 /* Program multicast filter. */ 3731 bge_setmulti(sc); 3732 3733 /* Init RX ring. */ 3734 if (bge_init_rx_ring_std(sc)) { 3735 if_printf(ifp, "RX ring initialization failed\n"); 3736 bge_stop(sc); 3737 return; 3738 } 3739 3740 /* 3741 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 3742 * memory to insure that the chip has in fact read the first 3743 * entry of the ring. 3744 */ 3745 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 3746 uint32_t v, i; 3747 for (i = 0; i < 10; i++) { 3748 DELAY(20); 3749 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 3750 if (v == (MCLBYTES - ETHER_ALIGN)) 3751 break; 3752 } 3753 if (i == 10) 3754 if_printf(ifp, "5705 A0 chip failed to load RX ring\n"); 3755 } 3756 3757 /* Init jumbo RX ring. */ 3758 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) { 3759 if (bge_init_rx_ring_jumbo(sc)) { 3760 if_printf(ifp, "Jumbo RX ring initialization failed\n"); 3761 bge_stop(sc); 3762 return; 3763 } 3764 } 3765 3766 /* Init our RX return ring index */ 3767 sc->bge_rx_saved_considx = 0; 3768 3769 /* Init TX ring. */ 3770 bge_init_tx_ring(sc); 3771 3772 /* Enable TX MAC state machine lockup fix. */ 3773 mode = CSR_READ_4(sc, BGE_TX_MODE); 3774 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906) 3775 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX; 3776 /* Turn on transmitter */ 3777 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE); 3778 DELAY(100); 3779 3780 /* Turn on receiver */ 3781 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3782 DELAY(10); 3783 3784 /* 3785 * Set the number of good frames to receive after RX MBUF 3786 * Low Watermark has been reached. After the RX MAC receives 3787 * this number of frames, it will drop subsequent incoming 3788 * frames until the MBUF High Watermark is reached. 3789 */ 3790 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 3791 3792 if (sc->bge_irq_type == PCI_INTR_TYPE_MSI) { 3793 if (bootverbose) { 3794 if_printf(ifp, "MSI_MODE: %#x\n", 3795 CSR_READ_4(sc, BGE_MSI_MODE)); 3796 } 3797 3798 /* 3799 * XXX 3800 * Linux driver turns it on for all chips supporting MSI?! 3801 */ 3802 if (sc->bge_flags & BGE_FLAG_ONESHOT_MSI) { 3803 /* 3804 * XXX 3805 * According to 5722-PG101-R, 3806 * BGE_PCIE_TRANSACT_ONESHOT_MSI applies only to 3807 * BCM5906. 3808 */ 3809 BGE_SETBIT(sc, BGE_PCIE_TRANSACT, 3810 BGE_PCIE_TRANSACT_ONESHOT_MSI); 3811 } 3812 } 3813 3814 /* Tell firmware we're alive. */ 3815 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3816 3817 /* Enable host interrupts if polling(4) is not enabled. */ 3818 PCI_SETBIT(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA, 4); 3819 #ifdef IFPOLL_ENABLE 3820 if (ifp->if_flags & IFF_NPOLLING) 3821 bge_disable_intr(sc); 3822 else 3823 #endif 3824 bge_enable_intr(sc); 3825 3826 ifp->if_flags |= IFF_RUNNING; 3827 ifq_clr_oactive(&ifp->if_snd); 3828 3829 bge_ifmedia_upd(ifp); 3830 3831 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc); 3832 } 3833 3834 /* 3835 * Set media options. 3836 */ 3837 static int 3838 bge_ifmedia_upd(struct ifnet *ifp) 3839 { 3840 struct bge_softc *sc = ifp->if_softc; 3841 3842 /* If this is a 1000baseX NIC, enable the TBI port. */ 3843 if (sc->bge_flags & BGE_FLAG_TBI) { 3844 struct ifmedia *ifm = &sc->bge_ifmedia; 3845 3846 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3847 return(EINVAL); 3848 3849 switch(IFM_SUBTYPE(ifm->ifm_media)) { 3850 case IFM_AUTO: 3851 /* 3852 * The BCM5704 ASIC appears to have a special 3853 * mechanism for programming the autoneg 3854 * advertisement registers in TBI mode. 3855 */ 3856 if (!bge_fake_autoneg && 3857 sc->bge_asicrev == BGE_ASICREV_BCM5704) { 3858 uint32_t sgdig; 3859 3860 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 3861 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 3862 sgdig |= BGE_SGDIGCFG_AUTO | 3863 BGE_SGDIGCFG_PAUSE_CAP | 3864 BGE_SGDIGCFG_ASYM_PAUSE; 3865 CSR_WRITE_4(sc, BGE_SGDIG_CFG, 3866 sgdig | BGE_SGDIGCFG_SEND); 3867 DELAY(5); 3868 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); 3869 } 3870 break; 3871 case IFM_1000_SX: 3872 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3873 BGE_CLRBIT(sc, BGE_MAC_MODE, 3874 BGE_MACMODE_HALF_DUPLEX); 3875 } else { 3876 BGE_SETBIT(sc, BGE_MAC_MODE, 3877 BGE_MACMODE_HALF_DUPLEX); 3878 } 3879 DELAY(40); 3880 break; 3881 default: 3882 return(EINVAL); 3883 } 3884 } else { 3885 struct mii_data *mii = device_get_softc(sc->bge_miibus); 3886 3887 sc->bge_link_evt++; 3888 sc->bge_link = 0; 3889 if (mii->mii_instance) { 3890 struct mii_softc *miisc; 3891 3892 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 3893 mii_phy_reset(miisc); 3894 } 3895 mii_mediachg(mii); 3896 3897 /* 3898 * Force an interrupt so that we will call bge_link_upd 3899 * if needed and clear any pending link state attention. 3900 * Without this we are not getting any further interrupts 3901 * for link state changes and thus will not UP the link and 3902 * not be able to send in bge_start. The only way to get 3903 * things working was to receive a packet and get an RX 3904 * intr. 3905 * 3906 * bge_tick should help for fiber cards and we might not 3907 * need to do this here if BGE_FLAG_TBI is set but as 3908 * we poll for fiber anyway it should not harm. 3909 */ 3910 if (BGE_IS_CRIPPLED(sc)) 3911 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 3912 else 3913 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 3914 } 3915 return(0); 3916 } 3917 3918 /* 3919 * Report current media status. 3920 */ 3921 static void 3922 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3923 { 3924 struct bge_softc *sc = ifp->if_softc; 3925 3926 if ((ifp->if_flags & IFF_RUNNING) == 0) 3927 return; 3928 3929 if (sc->bge_flags & BGE_FLAG_TBI) { 3930 ifmr->ifm_status = IFM_AVALID; 3931 ifmr->ifm_active = IFM_ETHER; 3932 if (CSR_READ_4(sc, BGE_MAC_STS) & 3933 BGE_MACSTAT_TBI_PCS_SYNCHED) { 3934 ifmr->ifm_status |= IFM_ACTIVE; 3935 } else { 3936 ifmr->ifm_active |= IFM_NONE; 3937 return; 3938 } 3939 3940 ifmr->ifm_active |= IFM_1000_SX; 3941 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 3942 ifmr->ifm_active |= IFM_HDX; 3943 else 3944 ifmr->ifm_active |= IFM_FDX; 3945 } else { 3946 struct mii_data *mii = device_get_softc(sc->bge_miibus); 3947 3948 mii_pollstat(mii); 3949 ifmr->ifm_active = mii->mii_media_active; 3950 ifmr->ifm_status = mii->mii_media_status; 3951 } 3952 } 3953 3954 static int 3955 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 3956 { 3957 struct bge_softc *sc = ifp->if_softc; 3958 struct ifreq *ifr = (struct ifreq *)data; 3959 int mask, error = 0; 3960 3961 ASSERT_SERIALIZED(ifp->if_serializer); 3962 3963 switch (command) { 3964 case SIOCSIFMTU: 3965 if ((!BGE_IS_JUMBO_CAPABLE(sc) && ifr->ifr_mtu > ETHERMTU) || 3966 (BGE_IS_JUMBO_CAPABLE(sc) && 3967 ifr->ifr_mtu > BGE_JUMBO_MTU)) { 3968 error = EINVAL; 3969 } else if (ifp->if_mtu != ifr->ifr_mtu) { 3970 ifp->if_mtu = ifr->ifr_mtu; 3971 if (ifp->if_flags & IFF_RUNNING) 3972 bge_init(sc); 3973 } 3974 break; 3975 case SIOCSIFFLAGS: 3976 if (ifp->if_flags & IFF_UP) { 3977 if (ifp->if_flags & IFF_RUNNING) { 3978 mask = ifp->if_flags ^ sc->bge_if_flags; 3979 3980 /* 3981 * If only the state of the PROMISC flag 3982 * changed, then just use the 'set promisc 3983 * mode' command instead of reinitializing 3984 * the entire NIC. Doing a full re-init 3985 * means reloading the firmware and waiting 3986 * for it to start up, which may take a 3987 * second or two. Similarly for ALLMULTI. 3988 */ 3989 if (mask & IFF_PROMISC) 3990 bge_setpromisc(sc); 3991 if (mask & IFF_ALLMULTI) 3992 bge_setmulti(sc); 3993 } else { 3994 bge_init(sc); 3995 } 3996 } else if (ifp->if_flags & IFF_RUNNING) { 3997 bge_stop(sc); 3998 } 3999 sc->bge_if_flags = ifp->if_flags; 4000 break; 4001 case SIOCADDMULTI: 4002 case SIOCDELMULTI: 4003 if (ifp->if_flags & IFF_RUNNING) 4004 bge_setmulti(sc); 4005 break; 4006 case SIOCSIFMEDIA: 4007 case SIOCGIFMEDIA: 4008 if (sc->bge_flags & BGE_FLAG_TBI) { 4009 error = ifmedia_ioctl(ifp, ifr, 4010 &sc->bge_ifmedia, command); 4011 } else { 4012 struct mii_data *mii; 4013 4014 mii = device_get_softc(sc->bge_miibus); 4015 error = ifmedia_ioctl(ifp, ifr, 4016 &mii->mii_media, command); 4017 } 4018 break; 4019 case SIOCSIFCAP: 4020 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 4021 if (mask & IFCAP_HWCSUM) { 4022 ifp->if_capenable ^= (mask & IFCAP_HWCSUM); 4023 if (ifp->if_capenable & IFCAP_TXCSUM) 4024 ifp->if_hwassist |= BGE_CSUM_FEATURES; 4025 else 4026 ifp->if_hwassist &= ~BGE_CSUM_FEATURES; 4027 } 4028 if (mask & IFCAP_TSO) { 4029 ifp->if_capenable ^= IFCAP_TSO; 4030 if (ifp->if_capenable & IFCAP_TSO) 4031 ifp->if_hwassist |= CSUM_TSO; 4032 else 4033 ifp->if_hwassist &= ~CSUM_TSO; 4034 } 4035 break; 4036 default: 4037 error = ether_ioctl(ifp, command, data); 4038 break; 4039 } 4040 return error; 4041 } 4042 4043 static void 4044 bge_watchdog(struct ifnet *ifp) 4045 { 4046 struct bge_softc *sc = ifp->if_softc; 4047 4048 if_printf(ifp, "watchdog timeout -- resetting\n"); 4049 4050 bge_init(sc); 4051 4052 IFNET_STAT_INC(ifp, oerrors, 1); 4053 4054 if (!ifq_is_empty(&ifp->if_snd)) 4055 if_devstart(ifp); 4056 } 4057 4058 /* 4059 * Stop the adapter and free any mbufs allocated to the 4060 * RX and TX lists. 4061 */ 4062 static void 4063 bge_stop(struct bge_softc *sc) 4064 { 4065 struct ifnet *ifp = &sc->arpcom.ac_if; 4066 4067 ASSERT_SERIALIZED(ifp->if_serializer); 4068 4069 callout_stop(&sc->bge_stat_timer); 4070 4071 /* Disable host interrupts. */ 4072 bge_disable_intr(sc); 4073 4074 /* 4075 * Tell firmware we're shutting down. 4076 */ 4077 bge_stop_fw(sc); 4078 bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN); 4079 4080 /* 4081 * Disable all of the receiver blocks 4082 */ 4083 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 4084 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 4085 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 4086 if (BGE_IS_5700_FAMILY(sc)) 4087 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 4088 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 4089 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 4090 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 4091 4092 /* 4093 * Disable all of the transmit blocks 4094 */ 4095 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 4096 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 4097 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 4098 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 4099 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 4100 if (BGE_IS_5700_FAMILY(sc)) 4101 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 4102 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 4103 4104 /* 4105 * Shut down all of the memory managers and related 4106 * state machines. 4107 */ 4108 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 4109 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 4110 if (BGE_IS_5700_FAMILY(sc)) 4111 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 4112 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 4113 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 4114 if (!BGE_IS_5705_PLUS(sc)) { 4115 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 4116 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 4117 } 4118 4119 bge_reset(sc); 4120 bge_sig_legacy(sc, BGE_RESET_SHUTDOWN); 4121 bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN); 4122 4123 /* 4124 * Keep the ASF firmware running if up. 4125 */ 4126 if (sc->bge_asf_mode & ASF_STACKUP) 4127 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4128 else 4129 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4130 4131 /* Free the RX lists. */ 4132 bge_free_rx_ring_std(sc); 4133 4134 /* Free jumbo RX list. */ 4135 if (BGE_IS_JUMBO_CAPABLE(sc)) 4136 bge_free_rx_ring_jumbo(sc); 4137 4138 /* Free TX buffers. */ 4139 bge_free_tx_ring(sc); 4140 4141 sc->bge_status_tag = 0; 4142 sc->bge_link = 0; 4143 sc->bge_coal_chg = 0; 4144 4145 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 4146 4147 ifp->if_flags &= ~IFF_RUNNING; 4148 ifq_clr_oactive(&ifp->if_snd); 4149 ifp->if_timer = 0; 4150 } 4151 4152 /* 4153 * Stop all chip I/O so that the kernel's probe routines don't 4154 * get confused by errant DMAs when rebooting. 4155 */ 4156 static void 4157 bge_shutdown(device_t dev) 4158 { 4159 struct bge_softc *sc = device_get_softc(dev); 4160 struct ifnet *ifp = &sc->arpcom.ac_if; 4161 4162 lwkt_serialize_enter(ifp->if_serializer); 4163 bge_stop(sc); 4164 lwkt_serialize_exit(ifp->if_serializer); 4165 } 4166 4167 static int 4168 bge_suspend(device_t dev) 4169 { 4170 struct bge_softc *sc = device_get_softc(dev); 4171 struct ifnet *ifp = &sc->arpcom.ac_if; 4172 4173 lwkt_serialize_enter(ifp->if_serializer); 4174 bge_stop(sc); 4175 lwkt_serialize_exit(ifp->if_serializer); 4176 4177 return 0; 4178 } 4179 4180 static int 4181 bge_resume(device_t dev) 4182 { 4183 struct bge_softc *sc = device_get_softc(dev); 4184 struct ifnet *ifp = &sc->arpcom.ac_if; 4185 4186 lwkt_serialize_enter(ifp->if_serializer); 4187 4188 if (ifp->if_flags & IFF_UP) { 4189 bge_init(sc); 4190 4191 if (!ifq_is_empty(&ifp->if_snd)) 4192 if_devstart(ifp); 4193 } 4194 4195 lwkt_serialize_exit(ifp->if_serializer); 4196 4197 return 0; 4198 } 4199 4200 static void 4201 bge_setpromisc(struct bge_softc *sc) 4202 { 4203 struct ifnet *ifp = &sc->arpcom.ac_if; 4204 4205 if (ifp->if_flags & IFF_PROMISC) 4206 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4207 else 4208 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4209 } 4210 4211 static void 4212 bge_dma_free(struct bge_softc *sc) 4213 { 4214 int i; 4215 4216 /* Destroy RX mbuf DMA stuffs. */ 4217 if (sc->bge_cdata.bge_rx_mtag != NULL) { 4218 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 4219 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, 4220 sc->bge_cdata.bge_rx_std_dmamap[i]); 4221 } 4222 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, 4223 sc->bge_cdata.bge_rx_tmpmap); 4224 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag); 4225 } 4226 4227 /* Destroy TX mbuf DMA stuffs. */ 4228 if (sc->bge_cdata.bge_tx_mtag != NULL) { 4229 for (i = 0; i < BGE_TX_RING_CNT; i++) { 4230 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag, 4231 sc->bge_cdata.bge_tx_dmamap[i]); 4232 } 4233 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag); 4234 } 4235 4236 /* Destroy standard RX ring */ 4237 bge_dma_block_free(sc->bge_cdata.bge_rx_std_ring_tag, 4238 sc->bge_cdata.bge_rx_std_ring_map, 4239 sc->bge_ldata.bge_rx_std_ring); 4240 4241 if (BGE_IS_JUMBO_CAPABLE(sc)) 4242 bge_free_jumbo_mem(sc); 4243 4244 /* Destroy RX return ring */ 4245 bge_dma_block_free(sc->bge_cdata.bge_rx_return_ring_tag, 4246 sc->bge_cdata.bge_rx_return_ring_map, 4247 sc->bge_ldata.bge_rx_return_ring); 4248 4249 /* Destroy TX ring */ 4250 bge_dma_block_free(sc->bge_cdata.bge_tx_ring_tag, 4251 sc->bge_cdata.bge_tx_ring_map, 4252 sc->bge_ldata.bge_tx_ring); 4253 4254 /* Destroy status block */ 4255 bge_dma_block_free(sc->bge_cdata.bge_status_tag, 4256 sc->bge_cdata.bge_status_map, 4257 sc->bge_ldata.bge_status_block); 4258 4259 /* Destroy statistics block */ 4260 bge_dma_block_free(sc->bge_cdata.bge_stats_tag, 4261 sc->bge_cdata.bge_stats_map, 4262 sc->bge_ldata.bge_stats); 4263 4264 /* Destroy the parent tag */ 4265 if (sc->bge_cdata.bge_parent_tag != NULL) 4266 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); 4267 } 4268 4269 static int 4270 bge_dma_alloc(struct bge_softc *sc) 4271 { 4272 struct ifnet *ifp = &sc->arpcom.ac_if; 4273 int i, error; 4274 bus_addr_t lowaddr; 4275 bus_size_t txmaxsz; 4276 4277 lowaddr = BUS_SPACE_MAXADDR; 4278 if (sc->bge_flags & BGE_FLAG_MAXADDR_40BIT) 4279 lowaddr = BGE_DMA_MAXADDR_40BIT; 4280 4281 /* 4282 * Allocate the parent bus DMA tag appropriate for PCI. 4283 * 4284 * All of the NetExtreme/NetLink controllers have 4GB boundary 4285 * DMA bug. 4286 * Whenever an address crosses a multiple of the 4GB boundary 4287 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition 4288 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA 4289 * state machine will lockup and cause the device to hang. 4290 */ 4291 error = bus_dma_tag_create(NULL, 1, BGE_DMA_BOUNDARY_4G, 4292 lowaddr, BUS_SPACE_MAXADDR, 4293 NULL, NULL, 4294 BUS_SPACE_MAXSIZE_32BIT, 0, 4295 BUS_SPACE_MAXSIZE_32BIT, 4296 0, &sc->bge_cdata.bge_parent_tag); 4297 if (error) { 4298 if_printf(ifp, "could not allocate parent dma tag\n"); 4299 return error; 4300 } 4301 4302 /* 4303 * Create DMA tag and maps for RX mbufs. 4304 */ 4305 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0, 4306 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 4307 NULL, NULL, MCLBYTES, 1, MCLBYTES, 4308 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, 4309 &sc->bge_cdata.bge_rx_mtag); 4310 if (error) { 4311 if_printf(ifp, "could not allocate RX mbuf dma tag\n"); 4312 return error; 4313 } 4314 4315 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 4316 BUS_DMA_WAITOK, &sc->bge_cdata.bge_rx_tmpmap); 4317 if (error) { 4318 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag); 4319 sc->bge_cdata.bge_rx_mtag = NULL; 4320 return error; 4321 } 4322 4323 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 4324 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 4325 BUS_DMA_WAITOK, 4326 &sc->bge_cdata.bge_rx_std_dmamap[i]); 4327 if (error) { 4328 int j; 4329 4330 for (j = 0; j < i; ++j) { 4331 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, 4332 sc->bge_cdata.bge_rx_std_dmamap[j]); 4333 } 4334 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag); 4335 sc->bge_cdata.bge_rx_mtag = NULL; 4336 4337 if_printf(ifp, "could not create DMA map for RX\n"); 4338 return error; 4339 } 4340 } 4341 4342 /* 4343 * Create DMA tag and maps for TX mbufs. 4344 */ 4345 if (sc->bge_flags & BGE_FLAG_TSO) 4346 txmaxsz = IP_MAXPACKET + sizeof(struct ether_vlan_header); 4347 else 4348 txmaxsz = BGE_JUMBO_FRAMELEN; 4349 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0, 4350 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 4351 NULL, NULL, 4352 txmaxsz, BGE_NSEG_NEW, PAGE_SIZE, 4353 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | 4354 BUS_DMA_ONEBPAGE, 4355 &sc->bge_cdata.bge_tx_mtag); 4356 if (error) { 4357 if_printf(ifp, "could not allocate TX mbuf dma tag\n"); 4358 return error; 4359 } 4360 4361 for (i = 0; i < BGE_TX_RING_CNT; i++) { 4362 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 4363 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 4364 &sc->bge_cdata.bge_tx_dmamap[i]); 4365 if (error) { 4366 int j; 4367 4368 for (j = 0; j < i; ++j) { 4369 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag, 4370 sc->bge_cdata.bge_tx_dmamap[j]); 4371 } 4372 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag); 4373 sc->bge_cdata.bge_tx_mtag = NULL; 4374 4375 if_printf(ifp, "could not create DMA map for TX\n"); 4376 return error; 4377 } 4378 } 4379 4380 /* 4381 * Create DMA stuffs for standard RX ring. 4382 */ 4383 error = bge_dma_block_alloc(sc, BGE_STD_RX_RING_SZ, 4384 &sc->bge_cdata.bge_rx_std_ring_tag, 4385 &sc->bge_cdata.bge_rx_std_ring_map, 4386 (void *)&sc->bge_ldata.bge_rx_std_ring, 4387 &sc->bge_ldata.bge_rx_std_ring_paddr); 4388 if (error) { 4389 if_printf(ifp, "could not create std RX ring\n"); 4390 return error; 4391 } 4392 4393 /* 4394 * Create jumbo buffer pool. 4395 */ 4396 if (BGE_IS_JUMBO_CAPABLE(sc)) { 4397 error = bge_alloc_jumbo_mem(sc); 4398 if (error) { 4399 if_printf(ifp, "could not create jumbo buffer pool\n"); 4400 return error; 4401 } 4402 } 4403 4404 /* 4405 * Create DMA stuffs for RX return ring. 4406 */ 4407 error = bge_dma_block_alloc(sc, 4408 BGE_RX_RTN_RING_SZ(sc->bge_return_ring_cnt), 4409 &sc->bge_cdata.bge_rx_return_ring_tag, 4410 &sc->bge_cdata.bge_rx_return_ring_map, 4411 (void *)&sc->bge_ldata.bge_rx_return_ring, 4412 &sc->bge_ldata.bge_rx_return_ring_paddr); 4413 if (error) { 4414 if_printf(ifp, "could not create RX ret ring\n"); 4415 return error; 4416 } 4417 4418 /* 4419 * Create DMA stuffs for TX ring. 4420 */ 4421 error = bge_dma_block_alloc(sc, BGE_TX_RING_SZ, 4422 &sc->bge_cdata.bge_tx_ring_tag, 4423 &sc->bge_cdata.bge_tx_ring_map, 4424 (void *)&sc->bge_ldata.bge_tx_ring, 4425 &sc->bge_ldata.bge_tx_ring_paddr); 4426 if (error) { 4427 if_printf(ifp, "could not create TX ring\n"); 4428 return error; 4429 } 4430 4431 /* 4432 * Create DMA stuffs for status block. 4433 */ 4434 error = bge_dma_block_alloc(sc, BGE_STATUS_BLK_SZ, 4435 &sc->bge_cdata.bge_status_tag, 4436 &sc->bge_cdata.bge_status_map, 4437 (void *)&sc->bge_ldata.bge_status_block, 4438 &sc->bge_ldata.bge_status_block_paddr); 4439 if (error) { 4440 if_printf(ifp, "could not create status block\n"); 4441 return error; 4442 } 4443 4444 /* 4445 * Create DMA stuffs for statistics block. 4446 */ 4447 error = bge_dma_block_alloc(sc, BGE_STATS_SZ, 4448 &sc->bge_cdata.bge_stats_tag, 4449 &sc->bge_cdata.bge_stats_map, 4450 (void *)&sc->bge_ldata.bge_stats, 4451 &sc->bge_ldata.bge_stats_paddr); 4452 if (error) { 4453 if_printf(ifp, "could not create stats block\n"); 4454 return error; 4455 } 4456 return 0; 4457 } 4458 4459 static int 4460 bge_dma_block_alloc(struct bge_softc *sc, bus_size_t size, bus_dma_tag_t *tag, 4461 bus_dmamap_t *map, void **addr, bus_addr_t *paddr) 4462 { 4463 bus_dmamem_t dmem; 4464 int error; 4465 4466 error = bus_dmamem_coherent(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0, 4467 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 4468 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 4469 if (error) 4470 return error; 4471 4472 *tag = dmem.dmem_tag; 4473 *map = dmem.dmem_map; 4474 *addr = dmem.dmem_addr; 4475 *paddr = dmem.dmem_busaddr; 4476 4477 return 0; 4478 } 4479 4480 static void 4481 bge_dma_block_free(bus_dma_tag_t tag, bus_dmamap_t map, void *addr) 4482 { 4483 if (tag != NULL) { 4484 bus_dmamap_unload(tag, map); 4485 bus_dmamem_free(tag, addr, map); 4486 bus_dma_tag_destroy(tag); 4487 } 4488 } 4489 4490 /* 4491 * Grrr. The link status word in the status block does 4492 * not work correctly on the BCM5700 rev AX and BX chips, 4493 * according to all available information. Hence, we have 4494 * to enable MII interrupts in order to properly obtain 4495 * async link changes. Unfortunately, this also means that 4496 * we have to read the MAC status register to detect link 4497 * changes, thereby adding an additional register access to 4498 * the interrupt handler. 4499 * 4500 * XXX: perhaps link state detection procedure used for 4501 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions. 4502 */ 4503 static void 4504 bge_bcm5700_link_upd(struct bge_softc *sc, uint32_t status __unused) 4505 { 4506 struct ifnet *ifp = &sc->arpcom.ac_if; 4507 struct mii_data *mii = device_get_softc(sc->bge_miibus); 4508 4509 mii_pollstat(mii); 4510 4511 if (!sc->bge_link && 4512 (mii->mii_media_status & IFM_ACTIVE) && 4513 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 4514 sc->bge_link++; 4515 if (bootverbose) 4516 if_printf(ifp, "link UP\n"); 4517 } else if (sc->bge_link && 4518 (!(mii->mii_media_status & IFM_ACTIVE) || 4519 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 4520 sc->bge_link = 0; 4521 if (bootverbose) 4522 if_printf(ifp, "link DOWN\n"); 4523 } 4524 4525 /* Clear the interrupt. */ 4526 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT); 4527 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); 4528 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, BRGPHY_INTRS); 4529 } 4530 4531 static void 4532 bge_tbi_link_upd(struct bge_softc *sc, uint32_t status) 4533 { 4534 struct ifnet *ifp = &sc->arpcom.ac_if; 4535 4536 #define PCS_ENCODE_ERR (BGE_MACSTAT_PORT_DECODE_ERROR|BGE_MACSTAT_MI_COMPLETE) 4537 4538 /* 4539 * Sometimes PCS encoding errors are detected in 4540 * TBI mode (on fiber NICs), and for some reason 4541 * the chip will signal them as link changes. 4542 * If we get a link change event, but the 'PCS 4543 * encoding error' bit in the MAC status register 4544 * is set, don't bother doing a link check. 4545 * This avoids spurious "gigabit link up" messages 4546 * that sometimes appear on fiber NICs during 4547 * periods of heavy traffic. 4548 */ 4549 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 4550 if (!sc->bge_link) { 4551 sc->bge_link++; 4552 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { 4553 BGE_CLRBIT(sc, BGE_MAC_MODE, 4554 BGE_MACMODE_TBI_SEND_CFGS); 4555 DELAY(40); 4556 } 4557 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 4558 4559 if (bootverbose) 4560 if_printf(ifp, "link UP\n"); 4561 4562 ifp->if_link_state = LINK_STATE_UP; 4563 if_link_state_change(ifp); 4564 } 4565 } else if ((status & PCS_ENCODE_ERR) != PCS_ENCODE_ERR) { 4566 if (sc->bge_link) { 4567 sc->bge_link = 0; 4568 4569 if (bootverbose) 4570 if_printf(ifp, "link DOWN\n"); 4571 4572 ifp->if_link_state = LINK_STATE_DOWN; 4573 if_link_state_change(ifp); 4574 } 4575 } 4576 4577 #undef PCS_ENCODE_ERR 4578 4579 /* Clear the attention. */ 4580 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4581 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4582 BGE_MACSTAT_LINK_CHANGED); 4583 } 4584 4585 static void 4586 bge_copper_link_upd(struct bge_softc *sc, uint32_t status __unused) 4587 { 4588 struct ifnet *ifp = &sc->arpcom.ac_if; 4589 struct mii_data *mii = device_get_softc(sc->bge_miibus); 4590 4591 mii_pollstat(mii); 4592 bge_miibus_statchg(sc->bge_dev); 4593 4594 if (bootverbose) { 4595 if (sc->bge_link) 4596 if_printf(ifp, "link UP\n"); 4597 else 4598 if_printf(ifp, "link DOWN\n"); 4599 } 4600 4601 /* Clear the attention. */ 4602 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4603 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4604 BGE_MACSTAT_LINK_CHANGED); 4605 } 4606 4607 static void 4608 bge_autopoll_link_upd(struct bge_softc *sc, uint32_t status __unused) 4609 { 4610 struct ifnet *ifp = &sc->arpcom.ac_if; 4611 struct mii_data *mii = device_get_softc(sc->bge_miibus); 4612 4613 mii_pollstat(mii); 4614 4615 if (!sc->bge_link && 4616 (mii->mii_media_status & IFM_ACTIVE) && 4617 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 4618 sc->bge_link++; 4619 if (bootverbose) 4620 if_printf(ifp, "link UP\n"); 4621 } else if (sc->bge_link && 4622 (!(mii->mii_media_status & IFM_ACTIVE) || 4623 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 4624 sc->bge_link = 0; 4625 if (bootverbose) 4626 if_printf(ifp, "link DOWN\n"); 4627 } 4628 4629 /* Clear the attention. */ 4630 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4631 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4632 BGE_MACSTAT_LINK_CHANGED); 4633 } 4634 4635 static int 4636 bge_sysctl_rx_coal_ticks(SYSCTL_HANDLER_ARGS) 4637 { 4638 struct bge_softc *sc = arg1; 4639 4640 return bge_sysctl_coal_chg(oidp, arg1, arg2, req, 4641 &sc->bge_rx_coal_ticks, 4642 BGE_RX_COAL_TICKS_MIN, BGE_RX_COAL_TICKS_MAX, 4643 BGE_RX_COAL_TICKS_CHG); 4644 } 4645 4646 static int 4647 bge_sysctl_tx_coal_ticks(SYSCTL_HANDLER_ARGS) 4648 { 4649 struct bge_softc *sc = arg1; 4650 4651 return bge_sysctl_coal_chg(oidp, arg1, arg2, req, 4652 &sc->bge_tx_coal_ticks, 4653 BGE_TX_COAL_TICKS_MIN, BGE_TX_COAL_TICKS_MAX, 4654 BGE_TX_COAL_TICKS_CHG); 4655 } 4656 4657 static int 4658 bge_sysctl_rx_coal_bds(SYSCTL_HANDLER_ARGS) 4659 { 4660 struct bge_softc *sc = arg1; 4661 4662 return bge_sysctl_coal_chg(oidp, arg1, arg2, req, 4663 &sc->bge_rx_coal_bds, 4664 BGE_RX_COAL_BDS_MIN, BGE_RX_COAL_BDS_MAX, 4665 BGE_RX_COAL_BDS_CHG); 4666 } 4667 4668 static int 4669 bge_sysctl_tx_coal_bds(SYSCTL_HANDLER_ARGS) 4670 { 4671 struct bge_softc *sc = arg1; 4672 4673 return bge_sysctl_coal_chg(oidp, arg1, arg2, req, 4674 &sc->bge_tx_coal_bds, 4675 BGE_TX_COAL_BDS_MIN, BGE_TX_COAL_BDS_MAX, 4676 BGE_TX_COAL_BDS_CHG); 4677 } 4678 4679 static int 4680 bge_sysctl_rx_coal_ticks_int(SYSCTL_HANDLER_ARGS) 4681 { 4682 struct bge_softc *sc = arg1; 4683 4684 return bge_sysctl_coal_chg(oidp, arg1, arg2, req, 4685 &sc->bge_rx_coal_ticks_int, 4686 BGE_RX_COAL_TICKS_MIN, BGE_RX_COAL_TICKS_MAX, 4687 BGE_RX_COAL_TICKS_INT_CHG); 4688 } 4689 4690 static int 4691 bge_sysctl_tx_coal_ticks_int(SYSCTL_HANDLER_ARGS) 4692 { 4693 struct bge_softc *sc = arg1; 4694 4695 return bge_sysctl_coal_chg(oidp, arg1, arg2, req, 4696 &sc->bge_tx_coal_ticks_int, 4697 BGE_TX_COAL_TICKS_MIN, BGE_TX_COAL_TICKS_MAX, 4698 BGE_TX_COAL_TICKS_INT_CHG); 4699 } 4700 4701 static int 4702 bge_sysctl_rx_coal_bds_int(SYSCTL_HANDLER_ARGS) 4703 { 4704 struct bge_softc *sc = arg1; 4705 4706 return bge_sysctl_coal_chg(oidp, arg1, arg2, req, 4707 &sc->bge_rx_coal_bds_int, 4708 BGE_RX_COAL_BDS_MIN, BGE_RX_COAL_BDS_MAX, 4709 BGE_RX_COAL_BDS_INT_CHG); 4710 } 4711 4712 static int 4713 bge_sysctl_tx_coal_bds_int(SYSCTL_HANDLER_ARGS) 4714 { 4715 struct bge_softc *sc = arg1; 4716 4717 return bge_sysctl_coal_chg(oidp, arg1, arg2, req, 4718 &sc->bge_tx_coal_bds_int, 4719 BGE_TX_COAL_BDS_MIN, BGE_TX_COAL_BDS_MAX, 4720 BGE_TX_COAL_BDS_INT_CHG); 4721 } 4722 4723 static int 4724 bge_sysctl_coal_chg(SYSCTL_HANDLER_ARGS, uint32_t *coal, 4725 int coal_min, int coal_max, uint32_t coal_chg_mask) 4726 { 4727 struct bge_softc *sc = arg1; 4728 struct ifnet *ifp = &sc->arpcom.ac_if; 4729 int error = 0, v; 4730 4731 lwkt_serialize_enter(ifp->if_serializer); 4732 4733 v = *coal; 4734 error = sysctl_handle_int(oidp, &v, 0, req); 4735 if (!error && req->newptr != NULL) { 4736 if (v < coal_min || v > coal_max) { 4737 error = EINVAL; 4738 } else { 4739 *coal = v; 4740 sc->bge_coal_chg |= coal_chg_mask; 4741 } 4742 } 4743 4744 lwkt_serialize_exit(ifp->if_serializer); 4745 return error; 4746 } 4747 4748 static void 4749 bge_coal_change(struct bge_softc *sc) 4750 { 4751 struct ifnet *ifp = &sc->arpcom.ac_if; 4752 4753 ASSERT_SERIALIZED(ifp->if_serializer); 4754 4755 if (sc->bge_coal_chg & BGE_RX_COAL_TICKS_CHG) { 4756 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, 4757 sc->bge_rx_coal_ticks); 4758 DELAY(10); 4759 CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); 4760 4761 if (bootverbose) { 4762 if_printf(ifp, "rx_coal_ticks -> %u\n", 4763 sc->bge_rx_coal_ticks); 4764 } 4765 } 4766 4767 if (sc->bge_coal_chg & BGE_TX_COAL_TICKS_CHG) { 4768 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, 4769 sc->bge_tx_coal_ticks); 4770 DELAY(10); 4771 CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS); 4772 4773 if (bootverbose) { 4774 if_printf(ifp, "tx_coal_ticks -> %u\n", 4775 sc->bge_tx_coal_ticks); 4776 } 4777 } 4778 4779 if (sc->bge_coal_chg & BGE_RX_COAL_BDS_CHG) { 4780 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, 4781 sc->bge_rx_coal_bds); 4782 DELAY(10); 4783 CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); 4784 4785 if (bootverbose) { 4786 if_printf(ifp, "rx_coal_bds -> %u\n", 4787 sc->bge_rx_coal_bds); 4788 } 4789 } 4790 4791 if (sc->bge_coal_chg & BGE_TX_COAL_BDS_CHG) { 4792 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, 4793 sc->bge_tx_coal_bds); 4794 DELAY(10); 4795 CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS); 4796 4797 if (bootverbose) { 4798 if_printf(ifp, "tx_max_coal_bds -> %u\n", 4799 sc->bge_tx_coal_bds); 4800 } 4801 } 4802 4803 if (sc->bge_coal_chg & BGE_RX_COAL_TICKS_INT_CHG) { 4804 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 4805 sc->bge_rx_coal_ticks_int); 4806 DELAY(10); 4807 CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS_INT); 4808 4809 if (bootverbose) { 4810 if_printf(ifp, "rx_coal_ticks_int -> %u\n", 4811 sc->bge_rx_coal_ticks_int); 4812 } 4813 } 4814 4815 if (sc->bge_coal_chg & BGE_TX_COAL_TICKS_INT_CHG) { 4816 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 4817 sc->bge_tx_coal_ticks_int); 4818 DELAY(10); 4819 CSR_READ_4(sc, BGE_HCC_TX_COAL_TICKS_INT); 4820 4821 if (bootverbose) { 4822 if_printf(ifp, "tx_coal_ticks_int -> %u\n", 4823 sc->bge_tx_coal_ticks_int); 4824 } 4825 } 4826 4827 if (sc->bge_coal_chg & BGE_RX_COAL_BDS_INT_CHG) { 4828 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 4829 sc->bge_rx_coal_bds_int); 4830 DELAY(10); 4831 CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT); 4832 4833 if (bootverbose) { 4834 if_printf(ifp, "rx_coal_bds_int -> %u\n", 4835 sc->bge_rx_coal_bds_int); 4836 } 4837 } 4838 4839 if (sc->bge_coal_chg & BGE_TX_COAL_BDS_INT_CHG) { 4840 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 4841 sc->bge_tx_coal_bds_int); 4842 DELAY(10); 4843 CSR_READ_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT); 4844 4845 if (bootverbose) { 4846 if_printf(ifp, "tx_coal_bds_int -> %u\n", 4847 sc->bge_tx_coal_bds_int); 4848 } 4849 } 4850 4851 sc->bge_coal_chg = 0; 4852 } 4853 4854 static void 4855 bge_enable_intr(struct bge_softc *sc) 4856 { 4857 struct ifnet *ifp = &sc->arpcom.ac_if; 4858 4859 lwkt_serialize_handler_enable(ifp->if_serializer); 4860 4861 /* 4862 * Enable interrupt. 4863 */ 4864 bge_writembx(sc, BGE_MBX_IRQ0_LO, sc->bge_status_tag << 24); 4865 if (sc->bge_flags & BGE_FLAG_ONESHOT_MSI) { 4866 /* XXX Linux driver */ 4867 bge_writembx(sc, BGE_MBX_IRQ0_LO, sc->bge_status_tag << 24); 4868 } 4869 4870 /* 4871 * Unmask the interrupt when we stop polling. 4872 */ 4873 PCI_CLRBIT(sc->bge_dev, BGE_PCI_MISC_CTL, 4874 BGE_PCIMISCCTL_MASK_PCI_INTR, 4); 4875 4876 /* 4877 * Trigger another interrupt, since above writing 4878 * to interrupt mailbox0 may acknowledge pending 4879 * interrupt. 4880 */ 4881 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 4882 } 4883 4884 static void 4885 bge_disable_intr(struct bge_softc *sc) 4886 { 4887 struct ifnet *ifp = &sc->arpcom.ac_if; 4888 4889 /* 4890 * Mask the interrupt when we start polling. 4891 */ 4892 PCI_SETBIT(sc->bge_dev, BGE_PCI_MISC_CTL, 4893 BGE_PCIMISCCTL_MASK_PCI_INTR, 4); 4894 4895 /* 4896 * Acknowledge possible asserted interrupt. 4897 */ 4898 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 4899 4900 sc->bge_npoll.ifpc_stcount = 0; 4901 4902 lwkt_serialize_handler_disable(ifp->if_serializer); 4903 } 4904 4905 static int 4906 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[]) 4907 { 4908 uint32_t mac_addr; 4909 int ret = 1; 4910 4911 mac_addr = bge_readmem_ind(sc, 0x0c14); 4912 if ((mac_addr >> 16) == 0x484b) { 4913 ether_addr[0] = (uint8_t)(mac_addr >> 8); 4914 ether_addr[1] = (uint8_t)mac_addr; 4915 mac_addr = bge_readmem_ind(sc, 0x0c18); 4916 ether_addr[2] = (uint8_t)(mac_addr >> 24); 4917 ether_addr[3] = (uint8_t)(mac_addr >> 16); 4918 ether_addr[4] = (uint8_t)(mac_addr >> 8); 4919 ether_addr[5] = (uint8_t)mac_addr; 4920 ret = 0; 4921 } 4922 return ret; 4923 } 4924 4925 static int 4926 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[]) 4927 { 4928 int mac_offset = BGE_EE_MAC_OFFSET; 4929 4930 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 4931 mac_offset = BGE_EE_MAC_OFFSET_5906; 4932 4933 return bge_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN); 4934 } 4935 4936 static int 4937 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[]) 4938 { 4939 if (sc->bge_flags & BGE_FLAG_NO_EEPROM) 4940 return 1; 4941 4942 return bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, 4943 ETHER_ADDR_LEN); 4944 } 4945 4946 static int 4947 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[]) 4948 { 4949 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = { 4950 /* NOTE: Order is critical */ 4951 bge_get_eaddr_mem, 4952 bge_get_eaddr_nvram, 4953 bge_get_eaddr_eeprom, 4954 NULL 4955 }; 4956 const bge_eaddr_fcn_t *func; 4957 4958 for (func = bge_eaddr_funcs; *func != NULL; ++func) { 4959 if ((*func)(sc, eaddr) == 0) 4960 break; 4961 } 4962 return (*func == NULL ? ENXIO : 0); 4963 } 4964 4965 /* 4966 * NOTE: 'm' is not freed upon failure 4967 */ 4968 struct mbuf * 4969 bge_defrag_shortdma(struct mbuf *m) 4970 { 4971 struct mbuf *n; 4972 int found; 4973 4974 /* 4975 * If device receive two back-to-back send BDs with less than 4976 * or equal to 8 total bytes then the device may hang. The two 4977 * back-to-back send BDs must in the same frame for this failure 4978 * to occur. Scan mbuf chains and see whether two back-to-back 4979 * send BDs are there. If this is the case, allocate new mbuf 4980 * and copy the frame to workaround the silicon bug. 4981 */ 4982 for (n = m, found = 0; n != NULL; n = n->m_next) { 4983 if (n->m_len < 8) { 4984 found++; 4985 if (found > 1) 4986 break; 4987 continue; 4988 } 4989 found = 0; 4990 } 4991 4992 if (found > 1) 4993 n = m_defrag(m, MB_DONTWAIT); 4994 else 4995 n = m; 4996 return n; 4997 } 4998 4999 static void 5000 bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit) 5001 { 5002 int i; 5003 5004 BGE_CLRBIT(sc, reg, bit); 5005 for (i = 0; i < BGE_TIMEOUT; i++) { 5006 if ((CSR_READ_4(sc, reg) & bit) == 0) 5007 return; 5008 DELAY(100); 5009 } 5010 } 5011 5012 static void 5013 bge_link_poll(struct bge_softc *sc) 5014 { 5015 uint32_t status; 5016 5017 status = CSR_READ_4(sc, BGE_MAC_STS); 5018 if ((status & sc->bge_link_chg) || sc->bge_link_evt) { 5019 sc->bge_link_evt = 0; 5020 sc->bge_link_upd(sc, status); 5021 } 5022 } 5023 5024 static void 5025 bge_enable_msi(struct bge_softc *sc) 5026 { 5027 uint32_t msi_mode; 5028 5029 msi_mode = CSR_READ_4(sc, BGE_MSI_MODE); 5030 msi_mode |= BGE_MSIMODE_ENABLE; 5031 if (sc->bge_flags & BGE_FLAG_ONESHOT_MSI) { 5032 /* 5033 * According to all of the datasheets that are publicly 5034 * available, bit 5 of the MSI_MODE is defined to be 5035 * "MSI FIFO Underrun Attn" for BCM5755+ and BCM5906, on 5036 * which "oneshot MSI" is enabled. However, it is always 5037 * safe to clear it here. 5038 */ 5039 msi_mode &= ~BGE_MSIMODE_ONESHOT_DISABLE; 5040 } 5041 CSR_WRITE_4(sc, BGE_MSI_MODE, msi_mode); 5042 } 5043 5044 static int 5045 bge_setup_tso(struct bge_softc *sc, struct mbuf **mp, 5046 uint16_t *mss0, uint16_t *flags0) 5047 { 5048 struct mbuf *m; 5049 struct ip *ip; 5050 struct tcphdr *th; 5051 int thoff, iphlen, hoff, hlen; 5052 uint16_t flags, mss; 5053 5054 m = *mp; 5055 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 5056 5057 hoff = m->m_pkthdr.csum_lhlen; 5058 iphlen = m->m_pkthdr.csum_iphlen; 5059 thoff = m->m_pkthdr.csum_thlen; 5060 5061 KASSERT(hoff > 0, ("invalid ether header len")); 5062 KASSERT(iphlen > 0, ("invalid ip header len")); 5063 KASSERT(thoff > 0, ("invalid tcp header len")); 5064 5065 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 5066 m = m_pullup(m, hoff + iphlen + thoff); 5067 if (m == NULL) { 5068 *mp = NULL; 5069 return ENOBUFS; 5070 } 5071 *mp = m; 5072 } 5073 ip = mtodoff(m, struct ip *, hoff); 5074 th = mtodoff(m, struct tcphdr *, hoff + iphlen); 5075 5076 mss = m->m_pkthdr.tso_segsz; 5077 flags = BGE_TXBDFLAG_CPU_PRE_DMA | BGE_TXBDFLAG_CPU_POST_DMA; 5078 5079 ip->ip_len = htons(mss + iphlen + thoff); 5080 th->th_sum = 0; 5081 5082 hlen = (iphlen + thoff) >> 2; 5083 mss |= (hlen << 11); 5084 5085 *mss0 = mss; 5086 *flags0 = flags; 5087 5088 return 0; 5089 } 5090 5091 static void 5092 bge_stop_fw(struct bge_softc *sc) 5093 { 5094 int i; 5095 5096 if (sc->bge_asf_mode) { 5097 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE); 5098 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT, 5099 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT); 5100 5101 for (i = 0; i < 100; i++ ) { 5102 if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) & 5103 BGE_RX_CPU_DRV_EVENT)) 5104 break; 5105 DELAY(10); 5106 } 5107 } 5108 } 5109 5110 static void 5111 bge_sig_pre_reset(struct bge_softc *sc, int type) 5112 { 5113 /* 5114 * Some chips don't like this so only do this if ASF is enabled 5115 */ 5116 if (sc->bge_asf_mode) 5117 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); 5118 5119 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 5120 switch (type) { 5121 case BGE_RESET_START: 5122 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 5123 BGE_FW_DRV_STATE_START); 5124 break; 5125 case BGE_RESET_SHUTDOWN: 5126 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 5127 BGE_FW_DRV_STATE_UNLOAD); 5128 break; 5129 case BGE_RESET_SUSPEND: 5130 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 5131 BGE_FW_DRV_STATE_SUSPEND); 5132 break; 5133 } 5134 } 5135 5136 #ifdef notyet 5137 if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND) 5138 bge_ape_driver_state_change(sc, type); 5139 #endif 5140 } 5141 5142 static void 5143 bge_sig_legacy(struct bge_softc *sc, int type) 5144 { 5145 if (sc->bge_asf_mode) { 5146 switch (type) { 5147 case BGE_RESET_START: 5148 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 5149 BGE_FW_DRV_STATE_START); 5150 break; 5151 case BGE_RESET_SHUTDOWN: 5152 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 5153 BGE_FW_DRV_STATE_UNLOAD); 5154 break; 5155 } 5156 } 5157 } 5158 5159 static void 5160 bge_sig_post_reset(struct bge_softc *sc, int type) 5161 { 5162 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 5163 switch (type) { 5164 case BGE_RESET_START: 5165 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 5166 BGE_FW_DRV_STATE_START_DONE); 5167 /* START DONE */ 5168 break; 5169 case BGE_RESET_SHUTDOWN: 5170 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 5171 BGE_FW_DRV_STATE_UNLOAD_DONE); 5172 break; 5173 } 5174 } 5175 #ifdef notyet 5176 if (type == BGE_RESET_SHUTDOWN) 5177 bge_ape_driver_state_change(sc, type); 5178 #endif 5179 } 5180 5181 static void 5182 bge_asf_driver_up(struct bge_softc *sc) 5183 { 5184 if (sc->bge_asf_mode & ASF_STACKUP) { 5185 /* Send ASF heartbeat aprox. every 2s */ 5186 if (sc->bge_asf_count) 5187 sc->bge_asf_count --; 5188 else { 5189 sc->bge_asf_count = 2; 5190 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, 5191 BGE_FW_CMD_DRV_ALIVE); 5192 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4); 5193 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB, 5194 BGE_FW_HB_TIMEOUT_SEC); 5195 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT, 5196 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | 5197 BGE_RX_CPU_DRV_EVENT); 5198 } 5199 } 5200 } 5201