1 /* $NetBSD: if_bge.c,v 1.346 2020/07/02 09:07:10 msaitoh Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2001 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $ 36 */ 37 38 /* 39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD. 40 * 41 * NetBSD version by: 42 * 43 * Frank van der Linden <fvdl@wasabisystems.com> 44 * Jason Thorpe <thorpej@wasabisystems.com> 45 * Jonathan Stone <jonathan@dsg.stanford.edu> 46 * 47 * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com> 48 * Senior Engineer, Wind River Systems 49 */ 50 51 /* 52 * The Broadcom BCM5700 is based on technology originally developed by 53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 54 * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has 55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 57 * frames, highly configurable RX filtering, and 16 RX and TX queues 58 * (which, along with RX filter rules, can be used for QOS applications). 59 * Other features, such as TCP segmentation, may be available as part 60 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 61 * firmware images can be stored in hardware and need not be compiled 62 * into the driver. 63 * 64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. 66 * 67 * The BCM5701 is a single-chip solution incorporating both the BCM5700 68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 69 * does not support external SSRAM. 70 * 71 * Broadcom also produces a variation of the BCM5700 under the "Altima" 72 * brand name, which is functionally similar but lacks PCI-X support. 73 * 74 * Without external SSRAM, you can only have at most 4 TX rings, 75 * and the use of the mini RX ring is disabled. This seems to imply 76 * that these features are simply not available on the BCM5701. As a 77 * result, this driver does not implement any support for the mini RX 78 * ring. 79 */ 80 81 #include <sys/cdefs.h> 82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.346 2020/07/02 09:07:10 msaitoh Exp $"); 83 84 #include <sys/param.h> 85 #include <sys/systm.h> 86 #include <sys/callout.h> 87 #include <sys/sockio.h> 88 #include <sys/mbuf.h> 89 #include <sys/malloc.h> 90 #include <sys/kernel.h> 91 #include <sys/device.h> 92 #include <sys/socket.h> 93 #include <sys/sysctl.h> 94 #include <sys/rndsource.h> 95 96 #include <net/if.h> 97 #include <net/if_dl.h> 98 #include <net/if_media.h> 99 #include <net/if_ether.h> 100 #include <net/bpf.h> 101 102 #ifdef INET 103 #include <netinet/in.h> 104 #include <netinet/in_systm.h> 105 #include <netinet/in_var.h> 106 #include <netinet/ip.h> 107 #endif 108 109 /* Headers for TCP Segmentation Offload (TSO) */ 110 #include <netinet/in_systm.h> /* n_time for <netinet/ip.h>... */ 111 #include <netinet/in.h> /* ip_{src,dst}, for <netinet/ip.h> */ 112 #include <netinet/ip.h> /* for struct ip */ 113 #include <netinet/tcp.h> /* for struct tcphdr */ 114 115 #include <dev/pci/pcireg.h> 116 #include <dev/pci/pcivar.h> 117 #include <dev/pci/pcidevs.h> 118 119 #include <dev/mii/mii.h> 120 #include <dev/mii/miivar.h> 121 #include <dev/mii/miidevs.h> 122 #include <dev/mii/brgphyreg.h> 123 124 #include <dev/pci/if_bgereg.h> 125 #include <dev/pci/if_bgevar.h> 126 127 #include <prop/proplib.h> 128 129 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 130 131 132 /* 133 * Tunable thresholds for rx-side bge interrupt mitigation. 134 */ 135 136 /* 137 * The pairs of values below were obtained from empirical measurement 138 * on bcm5700 rev B2; they ar designed to give roughly 1 receive 139 * interrupt for every N packets received, where N is, approximately, 140 * the second value (rx_max_bds) in each pair. The values are chosen 141 * such that moving from one pair to the succeeding pair was observed 142 * to roughly halve interrupt rate under sustained input packet load. 143 * The values were empirically chosen to avoid overflowing internal 144 * limits on the bcm5700: increasing rx_ticks much beyond 600 145 * results in internal wrapping and higher interrupt rates. 146 * The limit of 46 frames was chosen to match NFS workloads. 147 * 148 * These values also work well on bcm5701, bcm5704C, and (less 149 * tested) bcm5703. On other chipsets, (including the Altima chip 150 * family), the larger values may overflow internal chip limits, 151 * leading to increasing interrupt rates rather than lower interrupt 152 * rates. 153 * 154 * Applications using heavy interrupt mitigation (interrupting every 155 * 32 or 46 frames) in both directions may need to increase the TCP 156 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain 157 * full link bandwidth, due to ACKs and window updates lingering 158 * in the RX queue during the 30-to-40-frame interrupt-mitigation window. 159 */ 160 static const struct bge_load_rx_thresh { 161 int rx_ticks; 162 int rx_max_bds; } 163 bge_rx_threshes[] = { 164 { 16, 1 }, /* rx_max_bds = 1 disables interrupt mitigation */ 165 { 32, 2 }, 166 { 50, 4 }, 167 { 100, 8 }, 168 { 192, 16 }, 169 { 416, 32 }, 170 { 598, 46 } 171 }; 172 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0])) 173 174 /* XXX patchable; should be sysctl'able */ 175 static int bge_auto_thresh = 1; 176 static int bge_rx_thresh_lvl; 177 178 static int bge_rxthresh_nodenum; 179 180 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]); 181 182 static uint32_t bge_chipid(const struct pci_attach_args *); 183 static int bge_can_use_msi(struct bge_softc *); 184 static int bge_probe(device_t, cfdata_t, void *); 185 static void bge_attach(device_t, device_t, void *); 186 static int bge_detach(device_t, int); 187 static void bge_release_resources(struct bge_softc *); 188 189 static int bge_get_eaddr_fw(struct bge_softc *, uint8_t[]); 190 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]); 191 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]); 192 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]); 193 static int bge_get_eaddr(struct bge_softc *, uint8_t[]); 194 195 static void bge_txeof(struct bge_softc *); 196 static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *); 197 static void bge_rxeof(struct bge_softc *); 198 199 static void bge_asf_driver_up (struct bge_softc *); 200 static void bge_tick(void *); 201 static void bge_stats_update(struct bge_softc *); 202 static void bge_stats_update_regs(struct bge_softc *); 203 static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *); 204 205 static int bge_intr(void *); 206 static void bge_start(struct ifnet *); 207 static int bge_ifflags_cb(struct ethercom *); 208 static int bge_ioctl(struct ifnet *, u_long, void *); 209 static int bge_init(struct ifnet *); 210 static void bge_stop(struct ifnet *, int); 211 static void bge_watchdog(struct ifnet *); 212 static int bge_ifmedia_upd(struct ifnet *); 213 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 214 215 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *); 216 static int bge_read_nvram(struct bge_softc *, uint8_t *, int, int); 217 218 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); 219 static int bge_read_eeprom(struct bge_softc *, void *, int, int); 220 static void bge_setmulti(struct bge_softc *); 221 222 static void bge_handle_events(struct bge_softc *); 223 static int bge_alloc_jumbo_mem(struct bge_softc *); 224 #if 0 /* XXX */ 225 static void bge_free_jumbo_mem(struct bge_softc *); 226 #endif 227 static void *bge_jalloc(struct bge_softc *); 228 static void bge_jfree(struct mbuf *, void *, size_t, void *); 229 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, 230 bus_dmamap_t); 231 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 232 static int bge_init_rx_ring_std(struct bge_softc *); 233 static void bge_free_rx_ring_std(struct bge_softc *m, bool); 234 static int bge_init_rx_ring_jumbo(struct bge_softc *); 235 static void bge_free_rx_ring_jumbo(struct bge_softc *); 236 static void bge_free_tx_ring(struct bge_softc *m, bool); 237 static int bge_init_tx_ring(struct bge_softc *); 238 239 static int bge_chipinit(struct bge_softc *); 240 static int bge_blockinit(struct bge_softc *); 241 static int bge_phy_addr(struct bge_softc *); 242 static uint32_t bge_readmem_ind(struct bge_softc *, int); 243 static void bge_writemem_ind(struct bge_softc *, int, int); 244 static void bge_writembx(struct bge_softc *, int, int); 245 static void bge_writembx_flush(struct bge_softc *, int, int); 246 static void bge_writemem_direct(struct bge_softc *, int, int); 247 static void bge_writereg_ind(struct bge_softc *, int, int); 248 static void bge_set_max_readrq(struct bge_softc *); 249 250 static int bge_miibus_readreg(device_t, int, int, uint16_t *); 251 static int bge_miibus_writereg(device_t, int, int, uint16_t); 252 static void bge_miibus_statchg(struct ifnet *); 253 254 #define BGE_RESET_SHUTDOWN 0 255 #define BGE_RESET_START 1 256 #define BGE_RESET_SUSPEND 2 257 static void bge_sig_post_reset(struct bge_softc *, int); 258 static void bge_sig_legacy(struct bge_softc *, int); 259 static void bge_sig_pre_reset(struct bge_softc *, int); 260 static void bge_wait_for_event_ack(struct bge_softc *); 261 static void bge_stop_fw(struct bge_softc *); 262 static int bge_reset(struct bge_softc *); 263 static void bge_link_upd(struct bge_softc *); 264 static void bge_sysctl_init(struct bge_softc *); 265 static int bge_sysctl_verify(SYSCTLFN_PROTO); 266 267 static void bge_ape_lock_init(struct bge_softc *); 268 static void bge_ape_read_fw_ver(struct bge_softc *); 269 static int bge_ape_lock(struct bge_softc *, int); 270 static void bge_ape_unlock(struct bge_softc *, int); 271 static void bge_ape_send_event(struct bge_softc *, uint32_t); 272 static void bge_ape_driver_state_change(struct bge_softc *, int); 273 274 #ifdef BGE_DEBUG 275 #define DPRINTF(x) if (bgedebug) printf x 276 #define DPRINTFN(n, x) if (bgedebug >= (n)) printf x 277 #define BGE_TSO_PRINTF(x) do { if (bge_tso_debug) printf x ;} while (0) 278 int bgedebug = 0; 279 int bge_tso_debug = 0; 280 void bge_debug_info(struct bge_softc *); 281 #else 282 #define DPRINTF(x) 283 #define DPRINTFN(n, x) 284 #define BGE_TSO_PRINTF(x) 285 #endif 286 287 #ifdef BGE_EVENT_COUNTERS 288 #define BGE_EVCNT_INCR(ev) (ev).ev_count++ 289 #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val) 290 #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val) 291 #else 292 #define BGE_EVCNT_INCR(ev) /* nothing */ 293 #define BGE_EVCNT_ADD(ev, val) /* nothing */ 294 #define BGE_EVCNT_UPD(ev, val) /* nothing */ 295 #endif 296 297 #define VIDDID(a, b) PCI_VENDOR_ ## a, PCI_PRODUCT_ ## a ## _ ## b 298 /* 299 * The BCM5700 documentation seems to indicate that the hardware still has the 300 * Alteon vendor ID burned into it, though it should always be overridden by 301 * the value in the EEPROM. We'll check for it anyway. 302 */ 303 static const struct bge_product { 304 pci_vendor_id_t bp_vendor; 305 pci_product_id_t bp_product; 306 const char *bp_name; 307 } bge_products[] = { 308 { VIDDID(ALTEON, BCM5700), "Broadcom BCM5700 Gigabit" }, 309 { VIDDID(ALTEON, BCM5701), "Broadcom BCM5701 Gigabit" }, 310 { VIDDID(ALTIMA, AC1000), "Altima AC1000 Gigabit" }, 311 { VIDDID(ALTIMA, AC1001), "Altima AC1001 Gigabit" }, 312 { VIDDID(ALTIMA, AC1003), "Altima AC1003 Gigabit" }, 313 { VIDDID(ALTIMA, AC9100), "Altima AC9100 Gigabit" }, 314 { VIDDID(APPLE, BCM5701), "APPLE BCM5701 Gigabit" }, 315 { VIDDID(BROADCOM, BCM5700), "Broadcom BCM5700 Gigabit" }, 316 { VIDDID(BROADCOM, BCM5701), "Broadcom BCM5701 Gigabit" }, 317 { VIDDID(BROADCOM, BCM5702), "Broadcom BCM5702 Gigabit" }, 318 { VIDDID(BROADCOM, BCM5702FE), "Broadcom BCM5702FE Fast" }, 319 { VIDDID(BROADCOM, BCM5702X), "Broadcom BCM5702X Gigabit" }, 320 { VIDDID(BROADCOM, BCM5703), "Broadcom BCM5703 Gigabit" }, 321 { VIDDID(BROADCOM, BCM5703X), "Broadcom BCM5703X Gigabit" }, 322 { VIDDID(BROADCOM, BCM5703_ALT),"Broadcom BCM5703 Gigabit" }, 323 { VIDDID(BROADCOM, BCM5704C), "Broadcom BCM5704C Dual Gigabit" }, 324 { VIDDID(BROADCOM, BCM5704S), "Broadcom BCM5704S Dual Gigabit" }, 325 { VIDDID(BROADCOM, BCM5704S_ALT),"Broadcom BCM5704S Dual Gigabit" }, 326 { VIDDID(BROADCOM, BCM5705), "Broadcom BCM5705 Gigabit" }, 327 { VIDDID(BROADCOM, BCM5705F), "Broadcom BCM5705F Gigabit" }, 328 { VIDDID(BROADCOM, BCM5705K), "Broadcom BCM5705K Gigabit" }, 329 { VIDDID(BROADCOM, BCM5705M), "Broadcom BCM5705M Gigabit" }, 330 { VIDDID(BROADCOM, BCM5705M_ALT),"Broadcom BCM5705M Gigabit" }, 331 { VIDDID(BROADCOM, BCM5714), "Broadcom BCM5714 Gigabit" }, 332 { VIDDID(BROADCOM, BCM5714S), "Broadcom BCM5714S Gigabit" }, 333 { VIDDID(BROADCOM, BCM5715), "Broadcom BCM5715 Gigabit" }, 334 { VIDDID(BROADCOM, BCM5715S), "Broadcom BCM5715S Gigabit" }, 335 { VIDDID(BROADCOM, BCM5717), "Broadcom BCM5717 Gigabit" }, 336 { VIDDID(BROADCOM, BCM5717C), "Broadcom BCM5717 Gigabit" }, 337 { VIDDID(BROADCOM, BCM5718), "Broadcom BCM5718 Gigabit" }, 338 { VIDDID(BROADCOM, BCM5719), "Broadcom BCM5719 Gigabit" }, 339 { VIDDID(BROADCOM, BCM5720), "Broadcom BCM5720 Gigabit" }, 340 { VIDDID(BROADCOM, BCM5721), "Broadcom BCM5721 Gigabit" }, 341 { VIDDID(BROADCOM, BCM5722), "Broadcom BCM5722 Gigabit" }, 342 { VIDDID(BROADCOM, BCM5723), "Broadcom BCM5723 Gigabit" }, 343 { VIDDID(BROADCOM, BCM5725), "Broadcom BCM5725 Gigabit" }, 344 { VIDDID(BROADCOM, BCM5727), "Broadcom BCM5727 Gigabit" }, 345 { VIDDID(BROADCOM, BCM5750), "Broadcom BCM5750 Gigabit" }, 346 { VIDDID(BROADCOM, BCM5751), "Broadcom BCM5751 Gigabit" }, 347 { VIDDID(BROADCOM, BCM5751F), "Broadcom BCM5751F Gigabit" }, 348 { VIDDID(BROADCOM, BCM5751M), "Broadcom BCM5751M Gigabit" }, 349 { VIDDID(BROADCOM, BCM5752), "Broadcom BCM5752 Gigabit" }, 350 { VIDDID(BROADCOM, BCM5752M), "Broadcom BCM5752M Gigabit" }, 351 { VIDDID(BROADCOM, BCM5753), "Broadcom BCM5753 Gigabit" }, 352 { VIDDID(BROADCOM, BCM5753F), "Broadcom BCM5753F Gigabit" }, 353 { VIDDID(BROADCOM, BCM5753M), "Broadcom BCM5753M Gigabit" }, 354 { VIDDID(BROADCOM, BCM5754), "Broadcom BCM5754 Gigabit" }, 355 { VIDDID(BROADCOM, BCM5754M), "Broadcom BCM5754M Gigabit" }, 356 { VIDDID(BROADCOM, BCM5755), "Broadcom BCM5755 Gigabit" }, 357 { VIDDID(BROADCOM, BCM5755M), "Broadcom BCM5755M Gigabit" }, 358 { VIDDID(BROADCOM, BCM5756), "Broadcom BCM5756 Gigabit" }, 359 { VIDDID(BROADCOM, BCM5761), "Broadcom BCM5761 Gigabit" }, 360 { VIDDID(BROADCOM, BCM5761E), "Broadcom BCM5761E Gigabit" }, 361 { VIDDID(BROADCOM, BCM5761S), "Broadcom BCM5761S Gigabit" }, 362 { VIDDID(BROADCOM, BCM5761SE), "Broadcom BCM5761SE Gigabit" }, 363 { VIDDID(BROADCOM, BCM5762), "Broadcom BCM5762 Gigabit" }, 364 { VIDDID(BROADCOM, BCM5764), "Broadcom BCM5764 Gigabit" }, 365 { VIDDID(BROADCOM, BCM5780), "Broadcom BCM5780 Gigabit" }, 366 { VIDDID(BROADCOM, BCM5780S), "Broadcom BCM5780S Gigabit" }, 367 { VIDDID(BROADCOM, BCM5781), "Broadcom BCM5781 Gigabit" }, 368 { VIDDID(BROADCOM, BCM5782), "Broadcom BCM5782 Gigabit" }, 369 { VIDDID(BROADCOM, BCM5784M), "BCM5784M NetLink 1000baseT" }, 370 { VIDDID(BROADCOM, BCM5785F), "BCM5785F NetLink 10/100" }, 371 { VIDDID(BROADCOM, BCM5785G), "BCM5785G NetLink 1000baseT" }, 372 { VIDDID(BROADCOM, BCM5786), "Broadcom BCM5786 Gigabit" }, 373 { VIDDID(BROADCOM, BCM5787), "Broadcom BCM5787 Gigabit" }, 374 { VIDDID(BROADCOM, BCM5787F), "Broadcom BCM5787F 10/100" }, 375 { VIDDID(BROADCOM, BCM5787M), "Broadcom BCM5787M Gigabit" }, 376 { VIDDID(BROADCOM, BCM5788), "Broadcom BCM5788 Gigabit" }, 377 { VIDDID(BROADCOM, BCM5789), "Broadcom BCM5789 Gigabit" }, 378 { VIDDID(BROADCOM, BCM5901), "Broadcom BCM5901 Fast" }, 379 { VIDDID(BROADCOM, BCM5901A2), "Broadcom BCM5901A2 Fast" }, 380 { VIDDID(BROADCOM, BCM5903M), "Broadcom BCM5903M Fast" }, 381 { VIDDID(BROADCOM, BCM5906), "Broadcom BCM5906 Fast" }, 382 { VIDDID(BROADCOM, BCM5906M), "Broadcom BCM5906M Fast" }, 383 { VIDDID(BROADCOM, BCM57760), "Broadcom BCM57760 Gigabit" }, 384 { VIDDID(BROADCOM, BCM57761), "Broadcom BCM57761 Gigabit" }, 385 { VIDDID(BROADCOM, BCM57762), "Broadcom BCM57762 Gigabit" }, 386 { VIDDID(BROADCOM, BCM57764), "Broadcom BCM57764 Gigabit" }, 387 { VIDDID(BROADCOM, BCM57765), "Broadcom BCM57765 Gigabit" }, 388 { VIDDID(BROADCOM, BCM57766), "Broadcom BCM57766 Gigabit" }, 389 { VIDDID(BROADCOM, BCM57767), "Broadcom BCM57767 Gigabit" }, 390 { VIDDID(BROADCOM, BCM57780), "Broadcom BCM57780 Gigabit" }, 391 { VIDDID(BROADCOM, BCM57781), "Broadcom BCM57781 Gigabit" }, 392 { VIDDID(BROADCOM, BCM57782), "Broadcom BCM57782 Gigabit" }, 393 { VIDDID(BROADCOM, BCM57785), "Broadcom BCM57785 Gigabit" }, 394 { VIDDID(BROADCOM, BCM57786), "Broadcom BCM57786 Gigabit" }, 395 { VIDDID(BROADCOM, BCM57787), "Broadcom BCM57787 Gigabit" }, 396 { VIDDID(BROADCOM, BCM57788), "Broadcom BCM57788 Gigabit" }, 397 { VIDDID(BROADCOM, BCM57790), "Broadcom BCM57790 Gigabit" }, 398 { VIDDID(BROADCOM, BCM57791), "Broadcom BCM57791 Gigabit" }, 399 { VIDDID(BROADCOM, BCM57795), "Broadcom BCM57795 Gigabit" }, 400 { VIDDID(SCHNEIDERKOCH, SK_9DX1),"SysKonnect SK-9Dx1 Gigabit" }, 401 { VIDDID(SCHNEIDERKOCH, SK_9MXX),"SysKonnect SK-9Mxx Gigabit" }, 402 { VIDDID(3COM, 3C996), "3Com 3c996 Gigabit" }, 403 { VIDDID(FUJITSU4, PW008GE4), "Fujitsu PW008GE4 Gigabit" }, 404 { VIDDID(FUJITSU4, PW008GE5), "Fujitsu PW008GE5 Gigabit" }, 405 { VIDDID(FUJITSU4, PP250_450_LAN),"Fujitsu Primepower 250/450 Gigabit" }, 406 { 0, 0, NULL }, 407 }; 408 409 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGEF_JUMBO_CAPABLE) 410 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGEF_5700_FAMILY) 411 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGEF_5705_PLUS) 412 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGEF_5714_FAMILY) 413 #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGEF_575X_PLUS) 414 #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGEF_5755_PLUS) 415 #define BGE_IS_57765_FAMILY(sc) ((sc)->bge_flags & BGEF_57765_FAMILY) 416 #define BGE_IS_57765_PLUS(sc) ((sc)->bge_flags & BGEF_57765_PLUS) 417 #define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGEF_5717_PLUS) 418 419 static const struct bge_revision { 420 uint32_t br_chipid; 421 const char *br_name; 422 } bge_revisions[] = { 423 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, 424 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, 425 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, 426 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, 427 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, 428 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, 429 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, 430 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, 431 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, 432 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, 433 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, 434 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, 435 { BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" }, 436 { BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" }, 437 { BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" }, 438 { BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" }, 439 { BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" }, 440 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, 441 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, 442 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, 443 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, 444 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, 445 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, 446 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, 447 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, 448 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, 449 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, 450 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, 451 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, 452 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, 453 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, 454 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, 455 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, 456 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, 457 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, 458 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, 459 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, 460 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, 461 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, 462 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, 463 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, 464 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, 465 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" }, 466 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" }, 467 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" }, 468 { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" }, 469 { BGE_CHIPID_BCM5720_A0, "BCM5720 A0" }, 470 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" }, 471 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" }, 472 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" }, 473 { BGE_CHIPID_BCM5755_C0, "BCM5755 C0" }, 474 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" }, 475 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" }, 476 { BGE_CHIPID_BCM5762_A0, "BCM5762 A0" }, 477 { BGE_CHIPID_BCM5762_B0, "BCM5762 B0" }, 478 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" }, 479 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" }, 480 { BGE_CHIPID_BCM5784_B0, "BCM5784 B0" }, 481 /* 5754 and 5787 share the same ASIC ID */ 482 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" }, 483 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" }, 484 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" }, 485 { BGE_CHIPID_BCM5906_A0, "BCM5906 A0" }, 486 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" }, 487 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" }, 488 { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" }, 489 { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" }, 490 { BGE_CHIPID_BCM57766_A0, "BCM57766 A0" }, 491 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" }, 492 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" }, 493 494 { 0, NULL } 495 }; 496 497 /* 498 * Some defaults for major revisions, so that newer steppings 499 * that we don't know about have a shot at working. 500 */ 501 static const struct bge_revision bge_majorrevs[] = { 502 { BGE_ASICREV_BCM5700, "unknown BCM5700" }, 503 { BGE_ASICREV_BCM5701, "unknown BCM5701" }, 504 { BGE_ASICREV_BCM5703, "unknown BCM5703" }, 505 { BGE_ASICREV_BCM5704, "unknown BCM5704" }, 506 { BGE_ASICREV_BCM5705, "unknown BCM5705" }, 507 { BGE_ASICREV_BCM5750, "unknown BCM5750" }, 508 { BGE_ASICREV_BCM5714, "unknown BCM5714" }, 509 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, 510 { BGE_ASICREV_BCM5752, "unknown BCM5752" }, 511 { BGE_ASICREV_BCM5780, "unknown BCM5780" }, 512 { BGE_ASICREV_BCM5755, "unknown BCM5755" }, 513 { BGE_ASICREV_BCM5761, "unknown BCM5761" }, 514 { BGE_ASICREV_BCM5784, "unknown BCM5784" }, 515 { BGE_ASICREV_BCM5785, "unknown BCM5785" }, 516 /* 5754 and 5787 share the same ASIC ID */ 517 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" }, 518 { BGE_ASICREV_BCM5906, "unknown BCM5906" }, 519 { BGE_ASICREV_BCM57765, "unknown BCM57765" }, 520 { BGE_ASICREV_BCM57766, "unknown BCM57766" }, 521 { BGE_ASICREV_BCM57780, "unknown BCM57780" }, 522 { BGE_ASICREV_BCM5717, "unknown BCM5717" }, 523 { BGE_ASICREV_BCM5719, "unknown BCM5719" }, 524 { BGE_ASICREV_BCM5720, "unknown BCM5720" }, 525 { BGE_ASICREV_BCM5762, "unknown BCM5762" }, 526 527 { 0, NULL } 528 }; 529 530 static int bge_allow_asf = 1; 531 532 CFATTACH_DECL3_NEW(bge, sizeof(struct bge_softc), 533 bge_probe, bge_attach, bge_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 534 535 static uint32_t 536 bge_readmem_ind(struct bge_softc *sc, int off) 537 { 538 pcireg_t val; 539 540 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 && 541 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) 542 return 0; 543 544 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off); 545 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA); 546 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0); 547 return val; 548 } 549 550 static void 551 bge_writemem_ind(struct bge_softc *sc, int off, int val) 552 { 553 554 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off); 555 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA, val); 556 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0); 557 } 558 559 /* 560 * PCI Express only 561 */ 562 static void 563 bge_set_max_readrq(struct bge_softc *sc) 564 { 565 pcireg_t val; 566 567 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap 568 + PCIE_DCSR); 569 val &= ~PCIE_DCSR_MAX_READ_REQ; 570 switch (sc->bge_expmrq) { 571 case 2048: 572 val |= BGE_PCIE_DEVCTL_MAX_READRQ_2048; 573 break; 574 case 4096: 575 val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096; 576 break; 577 default: 578 panic("incorrect expmrq value(%d)", sc->bge_expmrq); 579 break; 580 } 581 pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap 582 + PCIE_DCSR, val); 583 } 584 585 #ifdef notdef 586 static uint32_t 587 bge_readreg_ind(struct bge_softc *sc, int off) 588 { 589 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off); 590 return (pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA)); 591 } 592 #endif 593 594 static void 595 bge_writereg_ind(struct bge_softc *sc, int off, int val) 596 { 597 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off); 598 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA, val); 599 } 600 601 static void 602 bge_writemem_direct(struct bge_softc *sc, int off, int val) 603 { 604 CSR_WRITE_4(sc, off, val); 605 } 606 607 static void 608 bge_writembx(struct bge_softc *sc, int off, int val) 609 { 610 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 611 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; 612 613 CSR_WRITE_4(sc, off, val); 614 } 615 616 static void 617 bge_writembx_flush(struct bge_softc *sc, int off, int val) 618 { 619 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 620 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; 621 622 CSR_WRITE_4_FLUSH(sc, off, val); 623 } 624 625 /* 626 * Clear all stale locks and select the lock for this driver instance. 627 */ 628 void 629 bge_ape_lock_init(struct bge_softc *sc) 630 { 631 struct pci_attach_args *pa = &(sc->bge_pa); 632 uint32_t bit, regbase; 633 int i; 634 635 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 636 regbase = BGE_APE_LOCK_GRANT; 637 else 638 regbase = BGE_APE_PER_LOCK_GRANT; 639 640 /* Clear any stale locks. */ 641 for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) { 642 switch (i) { 643 case BGE_APE_LOCK_PHY0: 644 case BGE_APE_LOCK_PHY1: 645 case BGE_APE_LOCK_PHY2: 646 case BGE_APE_LOCK_PHY3: 647 bit = BGE_APE_LOCK_GRANT_DRIVER0; 648 break; 649 default: 650 if (pa->pa_function == 0) 651 bit = BGE_APE_LOCK_GRANT_DRIVER0; 652 else 653 bit = (1 << pa->pa_function); 654 } 655 APE_WRITE_4(sc, regbase + 4 * i, bit); 656 } 657 658 /* Select the PHY lock based on the device's function number. */ 659 switch (pa->pa_function) { 660 case 0: 661 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0; 662 break; 663 case 1: 664 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1; 665 break; 666 case 2: 667 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2; 668 break; 669 case 3: 670 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3; 671 break; 672 default: 673 printf("%s: PHY lock not supported on function\n", 674 device_xname(sc->bge_dev)); 675 break; 676 } 677 } 678 679 /* 680 * Check for APE firmware, set flags, and print version info. 681 */ 682 void 683 bge_ape_read_fw_ver(struct bge_softc *sc) 684 { 685 const char *fwtype; 686 uint32_t apedata, features; 687 688 /* Check for a valid APE signature in shared memory. */ 689 apedata = APE_READ_4(sc, BGE_APE_SEG_SIG); 690 if (apedata != BGE_APE_SEG_SIG_MAGIC) { 691 sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE; 692 return; 693 } 694 695 /* Check if APE firmware is running. */ 696 apedata = APE_READ_4(sc, BGE_APE_FW_STATUS); 697 if ((apedata & BGE_APE_FW_STATUS_READY) == 0) { 698 printf("%s: APE signature found but FW status not ready! " 699 "0x%08x\n", device_xname(sc->bge_dev), apedata); 700 return; 701 } 702 703 sc->bge_mfw_flags |= BGE_MFW_ON_APE; 704 705 /* Fetch the APE firwmare type and version. */ 706 apedata = APE_READ_4(sc, BGE_APE_FW_VERSION); 707 features = APE_READ_4(sc, BGE_APE_FW_FEATURES); 708 if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) { 709 sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI; 710 fwtype = "NCSI"; 711 } else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) { 712 sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH; 713 fwtype = "DASH"; 714 } else 715 fwtype = "UNKN"; 716 717 /* Print the APE firmware version. */ 718 aprint_normal_dev(sc->bge_dev, "APE firmware %s %d.%d.%d.%d\n", fwtype, 719 (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT, 720 (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT, 721 (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT, 722 (apedata & BGE_APE_FW_VERSION_BLDMSK)); 723 } 724 725 int 726 bge_ape_lock(struct bge_softc *sc, int locknum) 727 { 728 struct pci_attach_args *pa = &(sc->bge_pa); 729 uint32_t bit, gnt, req, status; 730 int i, off; 731 732 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 733 return (0); 734 735 /* Lock request/grant registers have different bases. */ 736 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) { 737 req = BGE_APE_LOCK_REQ; 738 gnt = BGE_APE_LOCK_GRANT; 739 } else { 740 req = BGE_APE_PER_LOCK_REQ; 741 gnt = BGE_APE_PER_LOCK_GRANT; 742 } 743 744 off = 4 * locknum; 745 746 switch (locknum) { 747 case BGE_APE_LOCK_GPIO: 748 /* Lock required when using GPIO. */ 749 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 750 return (0); 751 if (pa->pa_function == 0) 752 bit = BGE_APE_LOCK_REQ_DRIVER0; 753 else 754 bit = (1 << pa->pa_function); 755 break; 756 case BGE_APE_LOCK_GRC: 757 /* Lock required to reset the device. */ 758 if (pa->pa_function == 0) 759 bit = BGE_APE_LOCK_REQ_DRIVER0; 760 else 761 bit = (1 << pa->pa_function); 762 break; 763 case BGE_APE_LOCK_MEM: 764 /* Lock required when accessing certain APE memory. */ 765 if (pa->pa_function == 0) 766 bit = BGE_APE_LOCK_REQ_DRIVER0; 767 else 768 bit = (1 << pa->pa_function); 769 break; 770 case BGE_APE_LOCK_PHY0: 771 case BGE_APE_LOCK_PHY1: 772 case BGE_APE_LOCK_PHY2: 773 case BGE_APE_LOCK_PHY3: 774 /* Lock required when accessing PHYs. */ 775 bit = BGE_APE_LOCK_REQ_DRIVER0; 776 break; 777 default: 778 return (EINVAL); 779 } 780 781 /* Request a lock. */ 782 APE_WRITE_4_FLUSH(sc, req + off, bit); 783 784 /* Wait up to 1 second to acquire lock. */ 785 for (i = 0; i < 20000; i++) { 786 status = APE_READ_4(sc, gnt + off); 787 if (status == bit) 788 break; 789 DELAY(50); 790 } 791 792 /* Handle any errors. */ 793 if (status != bit) { 794 printf("%s: APE lock %d request failed! " 795 "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n", 796 device_xname(sc->bge_dev), 797 locknum, req + off, bit & 0xFFFF, gnt + off, 798 status & 0xFFFF); 799 /* Revoke the lock request. */ 800 APE_WRITE_4(sc, gnt + off, bit); 801 return (EBUSY); 802 } 803 804 return (0); 805 } 806 807 void 808 bge_ape_unlock(struct bge_softc *sc, int locknum) 809 { 810 struct pci_attach_args *pa = &(sc->bge_pa); 811 uint32_t bit, gnt; 812 int off; 813 814 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 815 return; 816 817 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 818 gnt = BGE_APE_LOCK_GRANT; 819 else 820 gnt = BGE_APE_PER_LOCK_GRANT; 821 822 off = 4 * locknum; 823 824 switch (locknum) { 825 case BGE_APE_LOCK_GPIO: 826 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 827 return; 828 if (pa->pa_function == 0) 829 bit = BGE_APE_LOCK_GRANT_DRIVER0; 830 else 831 bit = (1 << pa->pa_function); 832 break; 833 case BGE_APE_LOCK_GRC: 834 if (pa->pa_function == 0) 835 bit = BGE_APE_LOCK_GRANT_DRIVER0; 836 else 837 bit = (1 << pa->pa_function); 838 break; 839 case BGE_APE_LOCK_MEM: 840 if (pa->pa_function == 0) 841 bit = BGE_APE_LOCK_GRANT_DRIVER0; 842 else 843 bit = (1 << pa->pa_function); 844 break; 845 case BGE_APE_LOCK_PHY0: 846 case BGE_APE_LOCK_PHY1: 847 case BGE_APE_LOCK_PHY2: 848 case BGE_APE_LOCK_PHY3: 849 bit = BGE_APE_LOCK_GRANT_DRIVER0; 850 break; 851 default: 852 return; 853 } 854 855 /* Write and flush for consecutive bge_ape_lock() */ 856 APE_WRITE_4_FLUSH(sc, gnt + off, bit); 857 } 858 859 /* 860 * Send an event to the APE firmware. 861 */ 862 void 863 bge_ape_send_event(struct bge_softc *sc, uint32_t event) 864 { 865 uint32_t apedata; 866 int i; 867 868 /* NCSI does not support APE events. */ 869 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 870 return; 871 872 /* Wait up to 1ms for APE to service previous event. */ 873 for (i = 10; i > 0; i--) { 874 if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0) 875 break; 876 apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS); 877 if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) { 878 APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event | 879 BGE_APE_EVENT_STATUS_EVENT_PENDING); 880 bge_ape_unlock(sc, BGE_APE_LOCK_MEM); 881 APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1); 882 break; 883 } 884 bge_ape_unlock(sc, BGE_APE_LOCK_MEM); 885 DELAY(100); 886 } 887 if (i == 0) { 888 printf("%s: APE event 0x%08x send timed out\n", 889 device_xname(sc->bge_dev), event); 890 } 891 } 892 893 void 894 bge_ape_driver_state_change(struct bge_softc *sc, int kind) 895 { 896 uint32_t apedata, event; 897 898 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 899 return; 900 901 switch (kind) { 902 case BGE_RESET_START: 903 /* If this is the first load, clear the load counter. */ 904 apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG); 905 if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC) 906 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0); 907 else { 908 apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT); 909 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata); 910 } 911 APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG, 912 BGE_APE_HOST_SEG_SIG_MAGIC); 913 APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN, 914 BGE_APE_HOST_SEG_LEN_MAGIC); 915 916 /* Add some version info if bge(4) supports it. */ 917 APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID, 918 BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0)); 919 APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR, 920 BGE_APE_HOST_BEHAV_NO_PHYLOCK); 921 APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS, 922 BGE_APE_HOST_HEARTBEAT_INT_DISABLE); 923 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, 924 BGE_APE_HOST_DRVR_STATE_START); 925 event = BGE_APE_EVENT_STATUS_STATE_START; 926 break; 927 case BGE_RESET_SHUTDOWN: 928 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, 929 BGE_APE_HOST_DRVR_STATE_UNLOAD); 930 event = BGE_APE_EVENT_STATUS_STATE_UNLOAD; 931 break; 932 case BGE_RESET_SUSPEND: 933 event = BGE_APE_EVENT_STATUS_STATE_SUSPEND; 934 break; 935 default: 936 return; 937 } 938 939 bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT | 940 BGE_APE_EVENT_STATUS_STATE_CHNGE); 941 } 942 943 static uint8_t 944 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 945 { 946 uint32_t access, byte = 0; 947 int i; 948 949 /* Lock. */ 950 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 951 for (i = 0; i < 8000; i++) { 952 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) 953 break; 954 DELAY(20); 955 } 956 if (i == 8000) 957 return 1; 958 959 /* Enable access. */ 960 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); 961 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); 962 963 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); 964 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); 965 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 966 DELAY(10); 967 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { 968 DELAY(10); 969 break; 970 } 971 } 972 973 if (i == BGE_TIMEOUT * 10) { 974 aprint_error_dev(sc->bge_dev, "nvram read timed out\n"); 975 return 1; 976 } 977 978 /* Get result. */ 979 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); 980 981 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; 982 983 /* Disable access. */ 984 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); 985 986 /* Unlock. */ 987 CSR_WRITE_4_FLUSH(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); 988 989 return 0; 990 } 991 992 /* 993 * Read a sequence of bytes from NVRAM. 994 */ 995 static int 996 bge_read_nvram(struct bge_softc *sc, uint8_t *dest, int off, int cnt) 997 { 998 int error = 0, i; 999 uint8_t byte = 0; 1000 1001 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906) 1002 return 1; 1003 1004 for (i = 0; i < cnt; i++) { 1005 error = bge_nvram_getbyte(sc, off + i, &byte); 1006 if (error) 1007 break; 1008 *(dest + i) = byte; 1009 } 1010 1011 return (error ? 1 : 0); 1012 } 1013 1014 /* 1015 * Read a byte of data stored in the EEPROM at address 'addr.' The 1016 * BCM570x supports both the traditional bitbang interface and an 1017 * auto access interface for reading the EEPROM. We use the auto 1018 * access method. 1019 */ 1020 static uint8_t 1021 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 1022 { 1023 int i; 1024 uint32_t byte = 0; 1025 1026 /* 1027 * Enable use of auto EEPROM access so we can avoid 1028 * having to use the bitbang method. 1029 */ 1030 BGE_SETBIT_FLUSH(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 1031 1032 /* Reset the EEPROM, load the clock period. */ 1033 CSR_WRITE_4_FLUSH(sc, BGE_EE_ADDR, 1034 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 1035 DELAY(20); 1036 1037 /* Issue the read EEPROM command. */ 1038 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 1039 1040 /* Wait for completion */ 1041 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 1042 DELAY(10); 1043 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 1044 break; 1045 } 1046 1047 if (i == BGE_TIMEOUT * 10) { 1048 aprint_error_dev(sc->bge_dev, "eeprom read timed out\n"); 1049 return 1; 1050 } 1051 1052 /* Get result. */ 1053 byte = CSR_READ_4(sc, BGE_EE_DATA); 1054 1055 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 1056 1057 return 0; 1058 } 1059 1060 /* 1061 * Read a sequence of bytes from the EEPROM. 1062 */ 1063 static int 1064 bge_read_eeprom(struct bge_softc *sc, void *destv, int off, int cnt) 1065 { 1066 int error = 0, i; 1067 uint8_t byte = 0; 1068 char *dest = destv; 1069 1070 for (i = 0; i < cnt; i++) { 1071 error = bge_eeprom_getbyte(sc, off + i, &byte); 1072 if (error) 1073 break; 1074 *(dest + i) = byte; 1075 } 1076 1077 return (error ? 1 : 0); 1078 } 1079 1080 static int 1081 bge_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) 1082 { 1083 struct bge_softc *sc = device_private(dev); 1084 uint32_t data; 1085 uint32_t autopoll; 1086 int rv = 0; 1087 int i; 1088 1089 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) 1090 return -1; 1091 1092 /* Reading with autopolling on may trigger PCI errors */ 1093 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 1094 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1095 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL); 1096 BGE_CLRBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1097 DELAY(80); 1098 } 1099 1100 CSR_WRITE_4_FLUSH(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | 1101 BGE_MIPHY(phy) | BGE_MIREG(reg)); 1102 1103 for (i = 0; i < BGE_TIMEOUT; i++) { 1104 delay(10); 1105 data = CSR_READ_4(sc, BGE_MI_COMM); 1106 if (!(data & BGE_MICOMM_BUSY)) { 1107 DELAY(5); 1108 data = CSR_READ_4(sc, BGE_MI_COMM); 1109 break; 1110 } 1111 } 1112 1113 if (i == BGE_TIMEOUT) { 1114 aprint_error_dev(sc->bge_dev, "PHY read timed out\n"); 1115 rv = ETIMEDOUT; 1116 } else if ((data & BGE_MICOMM_READFAIL) != 0) { 1117 /* XXX This error occurs on some devices while attaching. */ 1118 aprint_debug_dev(sc->bge_dev, "PHY read I/O error\n"); 1119 rv = EIO; 1120 } else 1121 *val = data & BGE_MICOMM_DATA; 1122 1123 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1124 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 1125 BGE_SETBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1126 DELAY(80); 1127 } 1128 1129 bge_ape_unlock(sc, sc->bge_phy_ape_lock); 1130 1131 return rv; 1132 } 1133 1134 static int 1135 bge_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) 1136 { 1137 struct bge_softc *sc = device_private(dev); 1138 uint32_t data, autopoll; 1139 int rv = 0; 1140 int i; 1141 1142 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 && 1143 (reg == MII_GTCR || reg == BRGPHY_MII_AUXCTL)) 1144 return 0; 1145 1146 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) 1147 return -1; 1148 1149 /* Reading with autopolling on may trigger PCI errors */ 1150 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 1151 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1152 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL); 1153 BGE_CLRBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1154 DELAY(80); 1155 } 1156 1157 CSR_WRITE_4_FLUSH(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | 1158 BGE_MIPHY(phy) | BGE_MIREG(reg) | val); 1159 1160 for (i = 0; i < BGE_TIMEOUT; i++) { 1161 delay(10); 1162 data = CSR_READ_4(sc, BGE_MI_COMM); 1163 if (!(data & BGE_MICOMM_BUSY)) { 1164 delay(5); 1165 data = CSR_READ_4(sc, BGE_MI_COMM); 1166 break; 1167 } 1168 } 1169 1170 if (i == BGE_TIMEOUT) { 1171 aprint_error_dev(sc->bge_dev, "PHY write timed out\n"); 1172 rv = ETIMEDOUT; 1173 } else if ((data & BGE_MICOMM_READFAIL) != 0) { 1174 aprint_error_dev(sc->bge_dev, "PHY write I/O error\n"); 1175 rv = EIO; 1176 } 1177 1178 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1179 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 1180 BGE_SETBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1181 delay(80); 1182 } 1183 1184 bge_ape_unlock(sc, sc->bge_phy_ape_lock); 1185 1186 if (i == BGE_TIMEOUT) { 1187 aprint_error_dev(sc->bge_dev, "PHY read timed out\n"); 1188 return ETIMEDOUT; 1189 } 1190 1191 return rv; 1192 } 1193 1194 static void 1195 bge_miibus_statchg(struct ifnet *ifp) 1196 { 1197 struct bge_softc *sc = ifp->if_softc; 1198 struct mii_data *mii = &sc->bge_mii; 1199 uint32_t mac_mode, rx_mode, tx_mode; 1200 1201 /* 1202 * Get flow control negotiation result. 1203 */ 1204 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1205 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) 1206 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1207 1208 if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 1209 mii->mii_media_status & IFM_ACTIVE && 1210 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 1211 BGE_STS_SETBIT(sc, BGE_STS_LINK); 1212 else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 1213 (!(mii->mii_media_status & IFM_ACTIVE) || 1214 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 1215 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 1216 1217 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) 1218 return; 1219 1220 /* Set the port mode (MII/GMII) to match the link speed. */ 1221 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & 1222 ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX); 1223 tx_mode = CSR_READ_4(sc, BGE_TX_MODE); 1224 rx_mode = CSR_READ_4(sc, BGE_RX_MODE); 1225 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 1226 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 1227 mac_mode |= BGE_PORTMODE_GMII; 1228 else 1229 mac_mode |= BGE_PORTMODE_MII; 1230 1231 tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE; 1232 rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE; 1233 if ((mii->mii_media_active & IFM_FDX) != 0) { 1234 if (sc->bge_flowflags & IFM_ETH_TXPAUSE) 1235 tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE; 1236 if (sc->bge_flowflags & IFM_ETH_RXPAUSE) 1237 rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE; 1238 } else 1239 mac_mode |= BGE_MACMODE_HALF_DUPLEX; 1240 1241 CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, mac_mode); 1242 DELAY(40); 1243 CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode); 1244 CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode); 1245 } 1246 1247 /* 1248 * Update rx threshold levels to values in a particular slot 1249 * of the interrupt-mitigation table bge_rx_threshes. 1250 */ 1251 static void 1252 bge_set_thresh(struct ifnet *ifp, int lvl) 1253 { 1254 struct bge_softc *sc = ifp->if_softc; 1255 int s; 1256 1257 /* For now, just save the new Rx-intr thresholds and record 1258 * that a threshold update is pending. Updating the hardware 1259 * registers here (even at splhigh()) is observed to 1260 * occasionaly cause glitches where Rx-interrupts are not 1261 * honoured for up to 10 seconds. jonathan@NetBSD.org, 2003-04-05 1262 */ 1263 s = splnet(); 1264 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks; 1265 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds; 1266 sc->bge_pending_rxintr_change = 1; 1267 splx(s); 1268 } 1269 1270 1271 /* 1272 * Update Rx thresholds of all bge devices 1273 */ 1274 static void 1275 bge_update_all_threshes(int lvl) 1276 { 1277 struct ifnet *ifp; 1278 const char * const namebuf = "bge"; 1279 int namelen; 1280 int s; 1281 1282 if (lvl < 0) 1283 lvl = 0; 1284 else if (lvl >= NBGE_RX_THRESH) 1285 lvl = NBGE_RX_THRESH - 1; 1286 1287 namelen = strlen(namebuf); 1288 /* 1289 * Now search all the interfaces for this name/number 1290 */ 1291 s = pserialize_read_enter(); 1292 IFNET_READER_FOREACH(ifp) { 1293 if (strncmp(ifp->if_xname, namebuf, namelen) != 0) 1294 continue; 1295 /* We got a match: update if doing auto-threshold-tuning */ 1296 if (bge_auto_thresh) 1297 bge_set_thresh(ifp, lvl); 1298 } 1299 pserialize_read_exit(s); 1300 } 1301 1302 /* 1303 * Handle events that have triggered interrupts. 1304 */ 1305 static void 1306 bge_handle_events(struct bge_softc *sc) 1307 { 1308 1309 return; 1310 } 1311 1312 /* 1313 * Memory management for jumbo frames. 1314 */ 1315 1316 static int 1317 bge_alloc_jumbo_mem(struct bge_softc *sc) 1318 { 1319 char *ptr, *kva; 1320 bus_dma_segment_t seg; 1321 int i, rseg, state, error; 1322 struct bge_jpool_entry *entry; 1323 1324 state = error = 0; 1325 1326 /* Grab a big chunk o' storage. */ 1327 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0, 1328 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 1329 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n"); 1330 return ENOBUFS; 1331 } 1332 1333 state = 1; 1334 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, (void **)&kva, 1335 BUS_DMA_NOWAIT)) { 1336 aprint_error_dev(sc->bge_dev, 1337 "can't map DMA buffers (%d bytes)\n", (int)BGE_JMEM); 1338 error = ENOBUFS; 1339 goto out; 1340 } 1341 1342 state = 2; 1343 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0, 1344 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) { 1345 aprint_error_dev(sc->bge_dev, "can't create DMA map\n"); 1346 error = ENOBUFS; 1347 goto out; 1348 } 1349 1350 state = 3; 1351 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 1352 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) { 1353 aprint_error_dev(sc->bge_dev, "can't load DMA map\n"); 1354 error = ENOBUFS; 1355 goto out; 1356 } 1357 1358 state = 4; 1359 sc->bge_cdata.bge_jumbo_buf = (void *)kva; 1360 DPRINTFN(1,("bge_jumbo_buf = %p\n", sc->bge_cdata.bge_jumbo_buf)); 1361 1362 SLIST_INIT(&sc->bge_jfree_listhead); 1363 SLIST_INIT(&sc->bge_jinuse_listhead); 1364 1365 /* 1366 * Now divide it up into 9K pieces and save the addresses 1367 * in an array. 1368 */ 1369 ptr = sc->bge_cdata.bge_jumbo_buf; 1370 for (i = 0; i < BGE_JSLOTS; i++) { 1371 sc->bge_cdata.bge_jslots[i] = ptr; 1372 ptr += BGE_JLEN; 1373 entry = malloc(sizeof(struct bge_jpool_entry), 1374 M_DEVBUF, M_WAITOK); 1375 entry->slot = i; 1376 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 1377 entry, jpool_entries); 1378 } 1379 out: 1380 if (error != 0) { 1381 switch (state) { 1382 case 4: 1383 bus_dmamap_unload(sc->bge_dmatag, 1384 sc->bge_cdata.bge_rx_jumbo_map); 1385 /* FALLTHROUGH */ 1386 case 3: 1387 bus_dmamap_destroy(sc->bge_dmatag, 1388 sc->bge_cdata.bge_rx_jumbo_map); 1389 /* FALLTHROUGH */ 1390 case 2: 1391 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 1392 /* FALLTHROUGH */ 1393 case 1: 1394 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 1395 break; 1396 default: 1397 break; 1398 } 1399 } 1400 1401 return error; 1402 } 1403 1404 /* 1405 * Allocate a jumbo buffer. 1406 */ 1407 static void * 1408 bge_jalloc(struct bge_softc *sc) 1409 { 1410 struct bge_jpool_entry *entry; 1411 1412 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 1413 1414 if (entry == NULL) { 1415 aprint_error_dev(sc->bge_dev, "no free jumbo buffers\n"); 1416 return NULL; 1417 } 1418 1419 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 1420 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 1421 return (sc->bge_cdata.bge_jslots[entry->slot]); 1422 } 1423 1424 /* 1425 * Release a jumbo buffer. 1426 */ 1427 static void 1428 bge_jfree(struct mbuf *m, void *buf, size_t size, void *arg) 1429 { 1430 struct bge_jpool_entry *entry; 1431 struct bge_softc *sc; 1432 int i, s; 1433 1434 /* Extract the softc struct pointer. */ 1435 sc = (struct bge_softc *)arg; 1436 1437 if (sc == NULL) 1438 panic("bge_jfree: can't find softc pointer!"); 1439 1440 /* calculate the slot this buffer belongs to */ 1441 1442 i = ((char *)buf 1443 - (char *)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 1444 1445 if ((i < 0) || (i >= BGE_JSLOTS)) 1446 panic("bge_jfree: asked to free buffer that we don't manage!"); 1447 1448 s = splvm(); 1449 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 1450 if (entry == NULL) 1451 panic("bge_jfree: buffer not in use!"); 1452 entry->slot = i; 1453 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 1454 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 1455 1456 if (__predict_true(m != NULL)) 1457 pool_cache_put(mb_cache, m); 1458 splx(s); 1459 } 1460 1461 1462 /* 1463 * Initialize a standard receive ring descriptor. 1464 */ 1465 static int 1466 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m, 1467 bus_dmamap_t dmamap) 1468 { 1469 struct mbuf *m_new = NULL; 1470 struct bge_rx_bd *r; 1471 int error; 1472 1473 if (dmamap == NULL) 1474 dmamap = sc->bge_cdata.bge_rx_std_map[i]; 1475 1476 if (dmamap == NULL) { 1477 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, 1478 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap); 1479 if (error != 0) 1480 return error; 1481 } 1482 1483 sc->bge_cdata.bge_rx_std_map[i] = dmamap; 1484 1485 if (m == NULL) { 1486 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1487 if (m_new == NULL) 1488 return ENOBUFS; 1489 1490 MCLGET(m_new, M_DONTWAIT); 1491 if (!(m_new->m_flags & M_EXT)) { 1492 m_freem(m_new); 1493 return ENOBUFS; 1494 } 1495 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1496 1497 } else { 1498 m_new = m; 1499 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1500 m_new->m_data = m_new->m_ext.ext_buf; 1501 } 1502 if (!(sc->bge_flags & BGEF_RX_ALIGNBUG)) 1503 m_adj(m_new, ETHER_ALIGN); 1504 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new, 1505 BUS_DMA_READ | BUS_DMA_NOWAIT)) { 1506 m_freem(m_new); 1507 return ENOBUFS; 1508 } 1509 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 1510 BUS_DMASYNC_PREREAD); 1511 1512 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 1513 r = &sc->bge_rdata->bge_rx_std_ring[i]; 1514 BGE_HOSTADDR(r->bge_addr, dmamap->dm_segs[0].ds_addr); 1515 r->bge_flags = BGE_RXBDFLAG_END; 1516 r->bge_len = m_new->m_len; 1517 r->bge_idx = i; 1518 1519 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1520 offsetof(struct bge_ring_data, bge_rx_std_ring) + 1521 i * sizeof (struct bge_rx_bd), 1522 sizeof (struct bge_rx_bd), 1523 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1524 1525 return 0; 1526 } 1527 1528 /* 1529 * Initialize a jumbo receive ring descriptor. This allocates 1530 * a jumbo buffer from the pool managed internally by the driver. 1531 */ 1532 static int 1533 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m) 1534 { 1535 struct mbuf *m_new = NULL; 1536 struct bge_rx_bd *r; 1537 void *buf = NULL; 1538 1539 if (m == NULL) { 1540 1541 /* Allocate the mbuf. */ 1542 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1543 if (m_new == NULL) 1544 return ENOBUFS; 1545 1546 /* Allocate the jumbo buffer */ 1547 buf = bge_jalloc(sc); 1548 if (buf == NULL) { 1549 m_freem(m_new); 1550 aprint_error_dev(sc->bge_dev, 1551 "jumbo allocation failed -- packet dropped!\n"); 1552 return ENOBUFS; 1553 } 1554 1555 /* Attach the buffer to the mbuf. */ 1556 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 1557 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF, 1558 bge_jfree, sc); 1559 m_new->m_flags |= M_EXT_RW; 1560 } else { 1561 m_new = m; 1562 buf = m_new->m_data = m_new->m_ext.ext_buf; 1563 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 1564 } 1565 if (!(sc->bge_flags & BGEF_RX_ALIGNBUG)) 1566 m_adj(m_new, ETHER_ALIGN); 1567 bus_dmamap_sync(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 1568 mtod(m_new, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, 1569 BGE_JLEN, BUS_DMASYNC_PREREAD); 1570 /* Set up the descriptor. */ 1571 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 1572 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 1573 BGE_HOSTADDR(r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new)); 1574 r->bge_flags = BGE_RXBDFLAG_END | BGE_RXBDFLAG_JUMBO_RING; 1575 r->bge_len = m_new->m_len; 1576 r->bge_idx = i; 1577 1578 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1579 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 1580 i * sizeof (struct bge_rx_bd), 1581 sizeof (struct bge_rx_bd), 1582 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1583 1584 return 0; 1585 } 1586 1587 /* 1588 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 1589 * that's 1MB or memory, which is a lot. For now, we fill only the first 1590 * 256 ring entries and hope that our CPU is fast enough to keep up with 1591 * the NIC. 1592 */ 1593 static int 1594 bge_init_rx_ring_std(struct bge_softc *sc) 1595 { 1596 int i; 1597 1598 if (sc->bge_flags & BGEF_RXRING_VALID) 1599 return 0; 1600 1601 for (i = 0; i < BGE_SSLOTS; i++) { 1602 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 1603 return ENOBUFS; 1604 } 1605 1606 sc->bge_std = i - 1; 1607 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 1608 1609 sc->bge_flags |= BGEF_RXRING_VALID; 1610 1611 return 0; 1612 } 1613 1614 static void 1615 bge_free_rx_ring_std(struct bge_softc *sc, bool disable) 1616 { 1617 int i; 1618 1619 if (!(sc->bge_flags & BGEF_RXRING_VALID)) 1620 return; 1621 1622 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1623 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 1624 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 1625 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 1626 if (disable) { 1627 bus_dmamap_destroy(sc->bge_dmatag, 1628 sc->bge_cdata.bge_rx_std_map[i]); 1629 sc->bge_cdata.bge_rx_std_map[i] = NULL; 1630 } 1631 } 1632 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0, 1633 sizeof(struct bge_rx_bd)); 1634 } 1635 1636 sc->bge_flags &= ~BGEF_RXRING_VALID; 1637 } 1638 1639 static int 1640 bge_init_rx_ring_jumbo(struct bge_softc *sc) 1641 { 1642 int i; 1643 volatile struct bge_rcb *rcb; 1644 1645 if (sc->bge_flags & BGEF_JUMBO_RXRING_VALID) 1646 return 0; 1647 1648 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1649 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 1650 return ENOBUFS; 1651 } 1652 1653 sc->bge_jumbo = i - 1; 1654 sc->bge_flags |= BGEF_JUMBO_RXRING_VALID; 1655 1656 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1657 rcb->bge_maxlen_flags = 0; 1658 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1659 1660 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1661 1662 return 0; 1663 } 1664 1665 static void 1666 bge_free_rx_ring_jumbo(struct bge_softc *sc) 1667 { 1668 int i; 1669 1670 if (!(sc->bge_flags & BGEF_JUMBO_RXRING_VALID)) 1671 return; 1672 1673 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1674 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 1675 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 1676 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 1677 } 1678 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0, 1679 sizeof(struct bge_rx_bd)); 1680 } 1681 1682 sc->bge_flags &= ~BGEF_JUMBO_RXRING_VALID; 1683 } 1684 1685 static void 1686 bge_free_tx_ring(struct bge_softc *sc, bool disable) 1687 { 1688 int i; 1689 struct txdmamap_pool_entry *dma; 1690 1691 if (!(sc->bge_flags & BGEF_TXRING_VALID)) 1692 return; 1693 1694 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1695 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 1696 m_freem(sc->bge_cdata.bge_tx_chain[i]); 1697 sc->bge_cdata.bge_tx_chain[i] = NULL; 1698 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i], 1699 link); 1700 sc->txdma[i] = 0; 1701 } 1702 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0, 1703 sizeof(struct bge_tx_bd)); 1704 } 1705 1706 if (disable) { 1707 while ((dma = SLIST_FIRST(&sc->txdma_list))) { 1708 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 1709 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap); 1710 if (sc->bge_dma64) { 1711 bus_dmamap_destroy(sc->bge_dmatag32, 1712 dma->dmamap32); 1713 } 1714 free(dma, M_DEVBUF); 1715 } 1716 SLIST_INIT(&sc->txdma_list); 1717 } 1718 1719 sc->bge_flags &= ~BGEF_TXRING_VALID; 1720 } 1721 1722 static int 1723 bge_init_tx_ring(struct bge_softc *sc) 1724 { 1725 struct ifnet *ifp = &sc->ethercom.ec_if; 1726 int i; 1727 bus_dmamap_t dmamap, dmamap32; 1728 bus_size_t maxsegsz; 1729 struct txdmamap_pool_entry *dma; 1730 1731 if (sc->bge_flags & BGEF_TXRING_VALID) 1732 return 0; 1733 1734 sc->bge_txcnt = 0; 1735 sc->bge_tx_saved_considx = 0; 1736 1737 /* Initialize transmit producer index for host-memory send ring. */ 1738 sc->bge_tx_prodidx = 0; 1739 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1740 /* 5700 b2 errata */ 1741 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 1742 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1743 1744 /* NIC-memory send ring not used; initialize to zero. */ 1745 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1746 /* 5700 b2 errata */ 1747 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 1748 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1749 1750 /* Limit DMA segment size for some chips */ 1751 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) && 1752 (ifp->if_mtu <= ETHERMTU)) 1753 maxsegsz = 2048; 1754 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) 1755 maxsegsz = 4096; 1756 else 1757 maxsegsz = ETHER_MAX_LEN_JUMBO; 1758 1759 if (SLIST_FIRST(&sc->txdma_list) != NULL) 1760 goto alloc_done; 1761 1762 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1763 if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX, 1764 BGE_NTXSEG, maxsegsz, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1765 &dmamap)) 1766 return ENOBUFS; 1767 if (dmamap == NULL) 1768 panic("dmamap NULL in bge_init_tx_ring"); 1769 if (sc->bge_dma64) { 1770 if (bus_dmamap_create(sc->bge_dmatag32, BGE_TXDMA_MAX, 1771 BGE_NTXSEG, maxsegsz, 0, 1772 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1773 &dmamap32)) { 1774 bus_dmamap_destroy(sc->bge_dmatag, dmamap); 1775 return ENOBUFS; 1776 } 1777 if (dmamap32 == NULL) 1778 panic("dmamap32 NULL in bge_init_tx_ring"); 1779 } else 1780 dmamap32 = dmamap; 1781 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT); 1782 if (dma == NULL) { 1783 aprint_error_dev(sc->bge_dev, 1784 "can't alloc txdmamap_pool_entry\n"); 1785 bus_dmamap_destroy(sc->bge_dmatag, dmamap); 1786 if (sc->bge_dma64) 1787 bus_dmamap_destroy(sc->bge_dmatag32, dmamap32); 1788 return ENOMEM; 1789 } 1790 dma->dmamap = dmamap; 1791 dma->dmamap32 = dmamap32; 1792 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 1793 } 1794 alloc_done: 1795 sc->bge_flags |= BGEF_TXRING_VALID; 1796 1797 return 0; 1798 } 1799 1800 static void 1801 bge_setmulti(struct bge_softc *sc) 1802 { 1803 struct ethercom *ec = &sc->ethercom; 1804 struct ifnet *ifp = &ec->ec_if; 1805 struct ether_multi *enm; 1806 struct ether_multistep step; 1807 uint32_t hashes[4] = { 0, 0, 0, 0 }; 1808 uint32_t h; 1809 int i; 1810 1811 if (ifp->if_flags & IFF_PROMISC) 1812 goto allmulti; 1813 1814 /* Now program new ones. */ 1815 ETHER_LOCK(ec); 1816 ETHER_FIRST_MULTI(step, ec, enm); 1817 while (enm != NULL) { 1818 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1819 /* 1820 * We must listen to a range of multicast addresses. 1821 * For now, just accept all multicasts, rather than 1822 * trying to set only those filter bits needed to match 1823 * the range. (At this time, the only use of address 1824 * ranges is for IP multicast routing, for which the 1825 * range is big enough to require all bits set.) 1826 */ 1827 ETHER_UNLOCK(ec); 1828 goto allmulti; 1829 } 1830 1831 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1832 1833 /* Just want the 7 least-significant bits. */ 1834 h &= 0x7f; 1835 1836 hashes[(h & 0x60) >> 5] |= 1U << (h & 0x1F); 1837 ETHER_NEXT_MULTI(step, enm); 1838 } 1839 ETHER_UNLOCK(ec); 1840 1841 ifp->if_flags &= ~IFF_ALLMULTI; 1842 goto setit; 1843 1844 allmulti: 1845 ifp->if_flags |= IFF_ALLMULTI; 1846 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff; 1847 1848 setit: 1849 for (i = 0; i < 4; i++) 1850 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1851 } 1852 1853 static void 1854 bge_sig_pre_reset(struct bge_softc *sc, int type) 1855 { 1856 1857 /* 1858 * Some chips don't like this so only do this if ASF is enabled 1859 */ 1860 if (sc->bge_asf_mode) 1861 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); 1862 1863 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1864 switch (type) { 1865 case BGE_RESET_START: 1866 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1867 BGE_FW_DRV_STATE_START); 1868 break; 1869 case BGE_RESET_SHUTDOWN: 1870 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1871 BGE_FW_DRV_STATE_UNLOAD); 1872 break; 1873 case BGE_RESET_SUSPEND: 1874 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1875 BGE_FW_DRV_STATE_SUSPEND); 1876 break; 1877 } 1878 } 1879 1880 if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND) 1881 bge_ape_driver_state_change(sc, type); 1882 } 1883 1884 static void 1885 bge_sig_post_reset(struct bge_softc *sc, int type) 1886 { 1887 1888 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1889 switch (type) { 1890 case BGE_RESET_START: 1891 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1892 BGE_FW_DRV_STATE_START_DONE); 1893 /* START DONE */ 1894 break; 1895 case BGE_RESET_SHUTDOWN: 1896 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1897 BGE_FW_DRV_STATE_UNLOAD_DONE); 1898 break; 1899 } 1900 } 1901 1902 if (type == BGE_RESET_SHUTDOWN) 1903 bge_ape_driver_state_change(sc, type); 1904 } 1905 1906 static void 1907 bge_sig_legacy(struct bge_softc *sc, int type) 1908 { 1909 1910 if (sc->bge_asf_mode) { 1911 switch (type) { 1912 case BGE_RESET_START: 1913 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1914 BGE_FW_DRV_STATE_START); 1915 break; 1916 case BGE_RESET_SHUTDOWN: 1917 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1918 BGE_FW_DRV_STATE_UNLOAD); 1919 break; 1920 } 1921 } 1922 } 1923 1924 static void 1925 bge_wait_for_event_ack(struct bge_softc *sc) 1926 { 1927 int i; 1928 1929 /* wait up to 2500usec */ 1930 for (i = 0; i < 250; i++) { 1931 if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) & 1932 BGE_RX_CPU_DRV_EVENT)) 1933 break; 1934 DELAY(10); 1935 } 1936 } 1937 1938 static void 1939 bge_stop_fw(struct bge_softc *sc) 1940 { 1941 1942 if (sc->bge_asf_mode) { 1943 bge_wait_for_event_ack(sc); 1944 1945 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE); 1946 CSR_WRITE_4_FLUSH(sc, BGE_RX_CPU_EVENT, 1947 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT); 1948 1949 bge_wait_for_event_ack(sc); 1950 } 1951 } 1952 1953 static int 1954 bge_poll_fw(struct bge_softc *sc) 1955 { 1956 uint32_t val; 1957 int i; 1958 1959 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 1960 for (i = 0; i < BGE_TIMEOUT; i++) { 1961 val = CSR_READ_4(sc, BGE_VCPU_STATUS); 1962 if (val & BGE_VCPU_STATUS_INIT_DONE) 1963 break; 1964 DELAY(100); 1965 } 1966 if (i >= BGE_TIMEOUT) { 1967 aprint_error_dev(sc->bge_dev, "reset timed out\n"); 1968 return -1; 1969 } 1970 } else { 1971 /* 1972 * Poll the value location we just wrote until 1973 * we see the 1's complement of the magic number. 1974 * This indicates that the firmware initialization 1975 * is complete. 1976 * XXX 1000ms for Flash and 10000ms for SEEPROM. 1977 */ 1978 for (i = 0; i < BGE_TIMEOUT; i++) { 1979 val = bge_readmem_ind(sc, BGE_SRAM_FW_MB); 1980 if (val == ~BGE_SRAM_FW_MB_MAGIC) 1981 break; 1982 DELAY(10); 1983 } 1984 1985 if ((i >= BGE_TIMEOUT) 1986 && ((sc->bge_flags & BGEF_NO_EEPROM) == 0)) { 1987 aprint_error_dev(sc->bge_dev, 1988 "firmware handshake timed out, val = %x\n", val); 1989 return -1; 1990 } 1991 } 1992 1993 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) { 1994 /* tg3 says we have to wait extra time */ 1995 delay(10 * 1000); 1996 } 1997 1998 return 0; 1999 } 2000 2001 int 2002 bge_phy_addr(struct bge_softc *sc) 2003 { 2004 struct pci_attach_args *pa = &(sc->bge_pa); 2005 int phy_addr = 1; 2006 2007 /* 2008 * PHY address mapping for various devices. 2009 * 2010 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr | 2011 * ---------+-------+-------+-------+-------+ 2012 * BCM57XX | 1 | X | X | X | 2013 * BCM5704 | 1 | X | 1 | X | 2014 * BCM5717 | 1 | 8 | 2 | 9 | 2015 * BCM5719 | 1 | 8 | 2 | 9 | 2016 * BCM5720 | 1 | 8 | 2 | 9 | 2017 * 2018 * | F2 Cu | F2 Sr | F3 Cu | F3 Sr | 2019 * ---------+-------+-------+-------+-------+ 2020 * BCM57XX | X | X | X | X | 2021 * BCM5704 | X | X | X | X | 2022 * BCM5717 | X | X | X | X | 2023 * BCM5719 | 3 | 10 | 4 | 11 | 2024 * BCM5720 | X | X | X | X | 2025 * 2026 * Other addresses may respond but they are not 2027 * IEEE compliant PHYs and should be ignored. 2028 */ 2029 switch (BGE_ASICREV(sc->bge_chipid)) { 2030 case BGE_ASICREV_BCM5717: 2031 case BGE_ASICREV_BCM5719: 2032 case BGE_ASICREV_BCM5720: 2033 phy_addr = pa->pa_function; 2034 if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) { 2035 phy_addr += (CSR_READ_4(sc, BGE_SGDIG_STS) & 2036 BGE_SGDIGSTS_IS_SERDES) ? 8 : 1; 2037 } else { 2038 phy_addr += (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) & 2039 BGE_CPMU_PHY_STRAP_IS_SERDES) ? 8 : 1; 2040 } 2041 } 2042 2043 return phy_addr; 2044 } 2045 2046 /* 2047 * Do endian, PCI and DMA initialization. Also check the on-board ROM 2048 * self-test results. 2049 */ 2050 static int 2051 bge_chipinit(struct bge_softc *sc) 2052 { 2053 uint32_t dma_rw_ctl, misc_ctl, mode_ctl, reg; 2054 int i; 2055 2056 /* Set endianness before we access any non-PCI registers. */ 2057 misc_ctl = BGE_INIT; 2058 if (sc->bge_flags & BGEF_TAGGED_STATUS) 2059 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS; 2060 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 2061 misc_ctl); 2062 2063 /* 2064 * Clear the MAC statistics block in the NIC's 2065 * internal memory. 2066 */ 2067 for (i = BGE_STATS_BLOCK; 2068 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 2069 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0); 2070 2071 for (i = BGE_STATUS_BLOCK; 2072 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 2073 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0); 2074 2075 /* 5717 workaround from tg3 */ 2076 if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) { 2077 /* Save */ 2078 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 2079 2080 /* Temporary modify MODE_CTL to control TLP */ 2081 reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK; 2082 CSR_WRITE_4(sc, BGE_MODE_CTL, reg | BGE_MODECTL_PCIE_TLPADDR1); 2083 2084 /* Control TLP */ 2085 reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG + 2086 BGE_TLP_PHYCTL1); 2087 CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_PHYCTL1, 2088 reg | BGE_TLP_PHYCTL1_EN_L1PLLPD); 2089 2090 /* Restore */ 2091 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 2092 } 2093 2094 if (BGE_IS_57765_FAMILY(sc)) { 2095 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) { 2096 /* Save */ 2097 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 2098 2099 /* Temporary modify MODE_CTL to control TLP */ 2100 reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK; 2101 CSR_WRITE_4(sc, BGE_MODE_CTL, 2102 reg | BGE_MODECTL_PCIE_TLPADDR1); 2103 2104 /* Control TLP */ 2105 reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG + 2106 BGE_TLP_PHYCTL5); 2107 CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_PHYCTL5, 2108 reg | BGE_TLP_PHYCTL5_DIS_L2CLKREQ); 2109 2110 /* Restore */ 2111 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 2112 } 2113 if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_57765_AX) { 2114 /* 2115 * For the 57766 and non Ax versions of 57765, bootcode 2116 * needs to setup the PCIE Fast Training Sequence (FTS) 2117 * value to prevent transmit hangs. 2118 */ 2119 reg = CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL); 2120 CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, 2121 reg | BGE_CPMU_PADRNG_CTL_RDIV2); 2122 2123 /* Save */ 2124 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 2125 2126 /* Temporary modify MODE_CTL to control TLP */ 2127 reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK; 2128 CSR_WRITE_4(sc, BGE_MODE_CTL, 2129 reg | BGE_MODECTL_PCIE_TLPADDR0); 2130 2131 /* Control TLP */ 2132 reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG + 2133 BGE_TLP_FTSMAX); 2134 reg &= ~BGE_TLP_FTSMAX_MSK; 2135 CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_FTSMAX, 2136 reg | BGE_TLP_FTSMAX_VAL); 2137 2138 /* Restore */ 2139 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 2140 } 2141 2142 reg = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK); 2143 reg &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK; 2144 reg |= BGE_CPMU_LSPD_10MB_MACCLK_6_25; 2145 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, reg); 2146 } 2147 2148 /* Set up the PCI DMA control register. */ 2149 dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD; 2150 if (sc->bge_flags & BGEF_PCIE) { 2151 /* Read watermark not used, 128 bytes for write. */ 2152 DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n", 2153 device_xname(sc->bge_dev))); 2154 if (sc->bge_mps >= 256) 2155 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); 2156 else 2157 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 2158 } else if (sc->bge_flags & BGEF_PCIX) { 2159 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", 2160 device_xname(sc->bge_dev))); 2161 /* PCI-X bus */ 2162 if (BGE_IS_5714_FAMILY(sc)) { 2163 /* 256 bytes for read and write. */ 2164 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) | 2165 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2); 2166 2167 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780) 2168 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 2169 else 2170 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL; 2171 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) { 2172 /* 2173 * In the BCM5703, the DMA read watermark should 2174 * be set to less than or equal to the maximum 2175 * memory read byte count of the PCI-X command 2176 * register. 2177 */ 2178 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) | 2179 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 2180 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 2181 /* 1536 bytes for read, 384 bytes for write. */ 2182 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 2183 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 2184 } else { 2185 /* 384 bytes for read and write. */ 2186 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) | 2187 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) | 2188 (0x0F); 2189 } 2190 2191 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 2192 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 2193 uint32_t tmp; 2194 2195 /* Set ONEDMA_ATONCE for hardware workaround. */ 2196 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; 2197 if (tmp == 6 || tmp == 7) 2198 dma_rw_ctl |= 2199 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 2200 2201 /* Set PCI-X DMA write workaround. */ 2202 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE; 2203 } 2204 } else { 2205 /* Conventional PCI bus: 256 bytes for read and write. */ 2206 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", 2207 device_xname(sc->bge_dev))); 2208 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 2209 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); 2210 2211 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 && 2212 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750) 2213 dma_rw_ctl |= 0x0F; 2214 } 2215 2216 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 2217 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) 2218 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM | 2219 BGE_PCIDMARWCTL_ASRT_ALL_BE; 2220 2221 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 2222 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 2223 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 2224 2225 if (BGE_IS_57765_PLUS(sc)) { 2226 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT; 2227 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) 2228 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK; 2229 2230 /* 2231 * Enable HW workaround for controllers that misinterpret 2232 * a status tag update and leave interrupts permanently 2233 * disabled. 2234 */ 2235 if (!BGE_IS_57765_FAMILY(sc) && 2236 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 && 2237 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5762) 2238 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA; 2239 } 2240 2241 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, 2242 dma_rw_ctl); 2243 2244 /* 2245 * Set up general mode register. 2246 */ 2247 mode_ctl = BGE_DMA_SWAP_OPTIONS; 2248 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 || 2249 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2250 /* Retain Host-2-BMC settings written by APE firmware. */ 2251 mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) & 2252 (BGE_MODECTL_BYTESWAP_B2HRX_DATA | 2253 BGE_MODECTL_WORDSWAP_B2HRX_DATA | 2254 BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE); 2255 } 2256 mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS | 2257 BGE_MODECTL_TX_NO_PHDR_CSUM; 2258 2259 /* 2260 * BCM5701 B5 have a bug causing data corruption when using 2261 * 64-bit DMA reads, which can be terminated early and then 2262 * completed later as 32-bit accesses, in combination with 2263 * certain bridges. 2264 */ 2265 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 && 2266 sc->bge_chipid == BGE_CHIPID_BCM5701_B5) 2267 mode_ctl |= BGE_MODECTL_FORCE_PCI32; 2268 2269 /* 2270 * Tell the firmware the driver is running 2271 */ 2272 if (sc->bge_asf_mode & ASF_STACKUP) 2273 mode_ctl |= BGE_MODECTL_STACKUP; 2274 2275 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 2276 2277 /* 2278 * Disable memory write invalidate. Apparently it is not supported 2279 * properly by these devices. 2280 */ 2281 PCI_CLRBIT(sc->sc_pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, 2282 PCI_COMMAND_INVALIDATE_ENABLE); 2283 2284 #ifdef __brokenalpha__ 2285 /* 2286 * Must insure that we do not cross an 8K (bytes) boundary 2287 * for DMA reads. Our highest limit is 1K bytes. This is a 2288 * restriction on some ALPHA platforms with early revision 2289 * 21174 PCI chipsets, such as the AlphaPC 164lx 2290 */ 2291 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 2292 #endif 2293 2294 /* Set the timer prescaler (always 66MHz) */ 2295 CSR_WRITE_4_FLUSH(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); 2296 2297 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 2298 DELAY(40); /* XXX */ 2299 2300 /* Put PHY into ready state */ 2301 BGE_CLRBIT_FLUSH(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); 2302 DELAY(40); 2303 } 2304 2305 return 0; 2306 } 2307 2308 static int 2309 bge_blockinit(struct bge_softc *sc) 2310 { 2311 volatile struct bge_rcb *rcb; 2312 bus_size_t rcb_addr; 2313 struct ifnet *ifp = &sc->ethercom.ec_if; 2314 bge_hostaddr taddr; 2315 uint32_t dmactl, rdmareg, mimode, val; 2316 int i, limit; 2317 2318 /* 2319 * Initialize the memory window pointer register so that 2320 * we can access the first 32K of internal NIC RAM. This will 2321 * allow us to set up the TX send ring RCBs and the RX return 2322 * ring RCBs, plus other things which live in NIC memory. 2323 */ 2324 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0); 2325 2326 if (!BGE_IS_5705_PLUS(sc)) { 2327 /* 57XX step 33 */ 2328 /* Configure mbuf memory pool */ 2329 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); 2330 2331 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 2332 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 2333 else 2334 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 2335 2336 /* 57XX step 34 */ 2337 /* Configure DMA resource pool */ 2338 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 2339 BGE_DMA_DESCRIPTORS); 2340 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 2341 } 2342 2343 /* 5718 step 11, 57XX step 35 */ 2344 /* 2345 * Configure mbuf pool watermarks. New broadcom docs strongly 2346 * recommend these. 2347 */ 2348 if (BGE_IS_5717_PLUS(sc)) { 2349 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 2350 if (ifp->if_mtu > ETHERMTU) { 2351 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e); 2352 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea); 2353 } else { 2354 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a); 2355 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0); 2356 } 2357 } else if (BGE_IS_5705_PLUS(sc)) { 2358 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 2359 2360 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 2361 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); 2362 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); 2363 } else { 2364 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 2365 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 2366 } 2367 } else { 2368 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 2369 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 2370 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 2371 } 2372 2373 /* 57XX step 36 */ 2374 /* Configure DMA resource watermarks */ 2375 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 2376 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 2377 2378 /* 5718 step 13, 57XX step 38 */ 2379 /* Enable buffer manager */ 2380 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_ATTN; 2381 /* 2382 * Change the arbitration algorithm of TXMBUF read request to 2383 * round-robin instead of priority based for BCM5719. When 2384 * TXFIFO is almost empty, RDMA will hold its request until 2385 * TXFIFO is not almost empty. 2386 */ 2387 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) 2388 val |= BGE_BMANMODE_NO_TX_UNDERRUN; 2389 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2390 sc->bge_chipid == BGE_CHIPID_BCM5719_A0 || 2391 sc->bge_chipid == BGE_CHIPID_BCM5720_A0) 2392 val |= BGE_BMANMODE_LOMBUF_ATTN; 2393 CSR_WRITE_4(sc, BGE_BMAN_MODE, val); 2394 2395 /* 57XX step 39 */ 2396 /* Poll for buffer manager start indication */ 2397 for (i = 0; i < BGE_TIMEOUT * 2; i++) { 2398 DELAY(10); 2399 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 2400 break; 2401 } 2402 2403 if (i == BGE_TIMEOUT * 2) { 2404 aprint_error_dev(sc->bge_dev, 2405 "buffer manager failed to start\n"); 2406 return ENXIO; 2407 } 2408 2409 /* 57XX step 40 */ 2410 /* Enable flow-through queues */ 2411 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 2412 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 2413 2414 /* Wait until queue initialization is complete */ 2415 for (i = 0; i < BGE_TIMEOUT * 2; i++) { 2416 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 2417 break; 2418 DELAY(10); 2419 } 2420 2421 if (i == BGE_TIMEOUT * 2) { 2422 aprint_error_dev(sc->bge_dev, 2423 "flow-through queue init failed\n"); 2424 return ENXIO; 2425 } 2426 2427 /* 2428 * Summary of rings supported by the controller: 2429 * 2430 * Standard Receive Producer Ring 2431 * - This ring is used to feed receive buffers for "standard" 2432 * sized frames (typically 1536 bytes) to the controller. 2433 * 2434 * Jumbo Receive Producer Ring 2435 * - This ring is used to feed receive buffers for jumbo sized 2436 * frames (i.e. anything bigger than the "standard" frames) 2437 * to the controller. 2438 * 2439 * Mini Receive Producer Ring 2440 * - This ring is used to feed receive buffers for "mini" 2441 * sized frames to the controller. 2442 * - This feature required external memory for the controller 2443 * but was never used in a production system. Should always 2444 * be disabled. 2445 * 2446 * Receive Return Ring 2447 * - After the controller has placed an incoming frame into a 2448 * receive buffer that buffer is moved into a receive return 2449 * ring. The driver is then responsible to passing the 2450 * buffer up to the stack. Many versions of the controller 2451 * support multiple RR rings. 2452 * 2453 * Send Ring 2454 * - This ring is used for outgoing frames. Many versions of 2455 * the controller support multiple send rings. 2456 */ 2457 2458 /* 5718 step 15, 57XX step 41 */ 2459 /* Initialize the standard RX ring control block */ 2460 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 2461 BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); 2462 /* 5718 step 16 */ 2463 if (BGE_IS_57765_PLUS(sc)) { 2464 /* 2465 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32) 2466 * Bits 15-2 : Maximum RX frame size 2467 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 2468 * Bit 0 : Reserved 2469 */ 2470 rcb->bge_maxlen_flags = 2471 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2); 2472 } else if (BGE_IS_5705_PLUS(sc)) { 2473 /* 2474 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32) 2475 * Bits 15-2 : Reserved (should be 0) 2476 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 2477 * Bit 0 : Reserved 2478 */ 2479 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 2480 } else { 2481 /* 2482 * Ring size is always XXX entries 2483 * Bits 31-16: Maximum RX frame size 2484 * Bits 15-2 : Reserved (should be 0) 2485 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 2486 * Bit 0 : Reserved 2487 */ 2488 rcb->bge_maxlen_flags = 2489 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 2490 } 2491 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2492 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2493 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 2494 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717; 2495 else 2496 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 2497 /* Write the standard receive producer ring control block. */ 2498 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 2499 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 2500 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 2501 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 2502 2503 /* Reset the standard receive producer ring producer index. */ 2504 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); 2505 2506 /* 57XX step 42 */ 2507 /* 2508 * Initialize the jumbo RX ring control block 2509 * We set the 'ring disabled' bit in the flags 2510 * field until we're actually ready to start 2511 * using this ring (i.e. once we set the MTU 2512 * high enough to require it). 2513 */ 2514 if (BGE_IS_JUMBO_CAPABLE(sc)) { 2515 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 2516 BGE_HOSTADDR(rcb->bge_hostaddr, 2517 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); 2518 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 2519 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED); 2520 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2521 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2522 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 2523 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717; 2524 else 2525 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 2526 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 2527 rcb->bge_hostaddr.bge_addr_hi); 2528 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 2529 rcb->bge_hostaddr.bge_addr_lo); 2530 /* Program the jumbo receive producer ring RCB parameters. */ 2531 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 2532 rcb->bge_maxlen_flags); 2533 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 2534 /* Reset the jumbo receive producer ring producer index. */ 2535 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 2536 } 2537 2538 /* 57XX step 43 */ 2539 /* Disable the mini receive producer ring RCB. */ 2540 if (BGE_IS_5700_FAMILY(sc)) { 2541 /* Set up dummy disabled mini ring RCB */ 2542 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 2543 rcb->bge_maxlen_flags = 2544 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 2545 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 2546 rcb->bge_maxlen_flags); 2547 /* Reset the mini receive producer ring producer index. */ 2548 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 2549 2550 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2551 offsetof(struct bge_ring_data, bge_info), 2552 sizeof (struct bge_gib), 2553 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2554 } 2555 2556 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */ 2557 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 2558 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 || 2559 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 || 2560 sc->bge_chipid == BGE_CHIPID_BCM5906_A2) 2561 CSR_WRITE_4(sc, BGE_ISO_PKT_TX, 2562 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2); 2563 } 2564 /* 5718 step 14, 57XX step 44 */ 2565 /* 2566 * The BD ring replenish thresholds control how often the 2567 * hardware fetches new BD's from the producer rings in host 2568 * memory. Setting the value too low on a busy system can 2569 * starve the hardware and recue the throughpout. 2570 * 2571 * Set the BD ring replenish thresholds. The recommended 2572 * values are 1/8th the number of descriptors allocated to 2573 * each ring, but since we try to avoid filling the entire 2574 * ring we set these to the minimal value of 8. This needs to 2575 * be done on several of the supported chip revisions anyway, 2576 * to work around HW bugs. 2577 */ 2578 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, 8); 2579 if (BGE_IS_JUMBO_CAPABLE(sc)) 2580 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 8); 2581 2582 /* 5718 step 18 */ 2583 if (BGE_IS_5717_PLUS(sc)) { 2584 CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4); 2585 CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4); 2586 } 2587 2588 /* 57XX step 45 */ 2589 /* 2590 * Disable all send rings by setting the 'ring disabled' bit 2591 * in the flags field of all the TX send ring control blocks, 2592 * located in NIC memory. 2593 */ 2594 if (BGE_IS_5700_FAMILY(sc)) { 2595 /* 5700 to 5704 had 16 send rings. */ 2596 limit = BGE_TX_RINGS_EXTSSRAM_MAX; 2597 } else if (BGE_IS_5717_PLUS(sc)) { 2598 limit = BGE_TX_RINGS_5717_MAX; 2599 } else if (BGE_IS_57765_FAMILY(sc) || 2600 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2601 limit = BGE_TX_RINGS_57765_MAX; 2602 } else 2603 limit = 1; 2604 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 2605 for (i = 0; i < limit; i++) { 2606 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2607 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 2608 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 2609 rcb_addr += sizeof(struct bge_rcb); 2610 } 2611 2612 /* 57XX step 46 and 47 */ 2613 /* Configure send ring RCB 0 (we use only the first ring) */ 2614 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 2615 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 2616 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 2617 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 2618 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2619 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2620 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 2621 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, BGE_SEND_RING_5717); 2622 else 2623 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 2624 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 2625 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2626 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 2627 2628 /* 57XX step 48 */ 2629 /* 2630 * Disable all receive return rings by setting the 2631 * 'ring diabled' bit in the flags field of all the receive 2632 * return ring control blocks, located in NIC memory. 2633 */ 2634 if (BGE_IS_5717_PLUS(sc)) { 2635 /* Should be 17, use 16 until we get an SRAM map. */ 2636 limit = 16; 2637 } else if (BGE_IS_5700_FAMILY(sc)) 2638 limit = BGE_RX_RINGS_MAX; 2639 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 2640 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762 || 2641 BGE_IS_57765_FAMILY(sc)) 2642 limit = 4; 2643 else 2644 limit = 1; 2645 /* Disable all receive return rings */ 2646 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 2647 for (i = 0; i < limit; i++) { 2648 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 2649 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 2650 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2651 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 2652 BGE_RCB_FLAG_RING_DISABLED)); 2653 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 2654 bge_writembx(sc, BGE_MBX_RX_CONS0_LO + 2655 (i * (sizeof(uint64_t))), 0); 2656 rcb_addr += sizeof(struct bge_rcb); 2657 } 2658 2659 /* 57XX step 49 */ 2660 /* 2661 * Set up receive return ring 0. Note that the NIC address 2662 * for RX return rings is 0x0. The return rings live entirely 2663 * within the host, so the nicaddr field in the RCB isn't used. 2664 */ 2665 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 2666 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 2667 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 2668 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 2669 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 2670 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2671 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 2672 2673 /* 5718 step 24, 57XX step 53 */ 2674 /* Set random backoff seed for TX */ 2675 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 2676 (CLLADDR(ifp->if_sadl)[0] + CLLADDR(ifp->if_sadl)[1] + 2677 CLLADDR(ifp->if_sadl)[2] + CLLADDR(ifp->if_sadl)[3] + 2678 CLLADDR(ifp->if_sadl)[4] + CLLADDR(ifp->if_sadl)[5]) & 2679 BGE_TX_BACKOFF_SEED_MASK); 2680 2681 /* 5718 step 26, 57XX step 55 */ 2682 /* Set inter-packet gap */ 2683 val = 0x2620; 2684 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 || 2685 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) 2686 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) & 2687 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK); 2688 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val); 2689 2690 /* 5718 step 27, 57XX step 56 */ 2691 /* 2692 * Specify which ring to use for packets that don't match 2693 * any RX rules. 2694 */ 2695 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 2696 2697 /* 5718 step 28, 57XX step 57 */ 2698 /* 2699 * Configure number of RX lists. One interrupt distribution 2700 * list, sixteen active lists, one bad frames class. 2701 */ 2702 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 2703 2704 /* 5718 step 29, 57XX step 58 */ 2705 /* Inialize RX list placement stats mask. */ 2706 if (BGE_IS_575X_PLUS(sc)) { 2707 val = CSR_READ_4(sc, BGE_RXLP_STATS_ENABLE_MASK); 2708 val &= ~BGE_RXLPSTATCONTROL_DACK_FIX; 2709 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, val); 2710 } else 2711 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 2712 2713 /* 5718 step 30, 57XX step 59 */ 2714 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 2715 2716 /* 5718 step 33, 57XX step 62 */ 2717 /* Disable host coalescing until we get it set up */ 2718 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 2719 2720 /* 5718 step 34, 57XX step 63 */ 2721 /* Poll to make sure it's shut down. */ 2722 for (i = 0; i < BGE_TIMEOUT * 2; i++) { 2723 DELAY(10); 2724 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 2725 break; 2726 } 2727 2728 if (i == BGE_TIMEOUT * 2) { 2729 aprint_error_dev(sc->bge_dev, 2730 "host coalescing engine failed to idle\n"); 2731 return ENXIO; 2732 } 2733 2734 /* 5718 step 35, 36, 37 */ 2735 /* Set up host coalescing defaults */ 2736 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 2737 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 2738 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 2739 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 2740 if (!(BGE_IS_5705_PLUS(sc))) { 2741 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 2742 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 2743 } 2744 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 2745 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 2746 2747 /* Set up address of statistics block */ 2748 if (BGE_IS_5700_FAMILY(sc)) { 2749 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 2750 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 2751 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 2752 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi); 2753 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo); 2754 } 2755 2756 /* 5718 step 38 */ 2757 /* Set up address of status block */ 2758 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block)); 2759 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 2760 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi); 2761 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo); 2762 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 2763 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 2764 2765 /* Set up status block size. */ 2766 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 && 2767 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) { 2768 val = BGE_STATBLKSZ_FULL; 2769 bzero(&sc->bge_rdata->bge_status_block, BGE_STATUS_BLK_SZ); 2770 } else { 2771 val = BGE_STATBLKSZ_32BYTE; 2772 bzero(&sc->bge_rdata->bge_status_block, 32); 2773 } 2774 2775 /* 5718 step 39, 57XX step 73 */ 2776 /* Turn on host coalescing state machine */ 2777 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE); 2778 2779 /* 5718 step 40, 57XX step 74 */ 2780 /* Turn on RX BD completion state machine and enable attentions */ 2781 CSR_WRITE_4(sc, BGE_RBDC_MODE, 2782 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN); 2783 2784 /* 5718 step 41, 57XX step 75 */ 2785 /* Turn on RX list placement state machine */ 2786 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 2787 2788 /* 57XX step 76 */ 2789 /* Turn on RX list selector state machine. */ 2790 if (!(BGE_IS_5705_PLUS(sc))) 2791 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 2792 2793 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | 2794 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | 2795 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | 2796 BGE_MACMODE_FRMHDR_DMA_ENB; 2797 2798 if (sc->bge_flags & BGEF_FIBER_TBI) 2799 val |= BGE_PORTMODE_TBI; 2800 else if (sc->bge_flags & BGEF_FIBER_MII) 2801 val |= BGE_PORTMODE_GMII; 2802 else 2803 val |= BGE_PORTMODE_MII; 2804 2805 /* 5718 step 42 and 43, 57XX step 77 and 78 */ 2806 /* Allow APE to send/receive frames. */ 2807 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) 2808 val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; 2809 2810 /* Turn on DMA, clear stats */ 2811 CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, val); 2812 /* 5718 step 44 */ 2813 DELAY(40); 2814 2815 /* 5718 step 45, 57XX step 79 */ 2816 /* Set misc. local control, enable interrupts on attentions */ 2817 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 2818 if (BGE_IS_5717_PLUS(sc)) { 2819 CSR_READ_4(sc, BGE_MISC_LOCAL_CTL); /* Flush */ 2820 /* 5718 step 46 */ 2821 DELAY(100); 2822 } 2823 2824 /* 57XX step 81 */ 2825 /* Turn on DMA completion state machine */ 2826 if (!(BGE_IS_5705_PLUS(sc))) 2827 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 2828 2829 /* 5718 step 47, 57XX step 82 */ 2830 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS; 2831 2832 /* 5718 step 48 */ 2833 /* Enable host coalescing bug fix. */ 2834 if (BGE_IS_5755_PLUS(sc)) 2835 val |= BGE_WDMAMODE_STATUS_TAG_FIX; 2836 2837 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785) 2838 val |= BGE_WDMAMODE_BURST_ALL_DATA; 2839 2840 /* Turn on write DMA state machine */ 2841 CSR_WRITE_4_FLUSH(sc, BGE_WDMA_MODE, val); 2842 /* 5718 step 49 */ 2843 DELAY(40); 2844 2845 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 2846 2847 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717) 2848 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS; 2849 2850 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2851 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 2852 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) 2853 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | 2854 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | 2855 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; 2856 2857 if (sc->bge_flags & BGEF_PCIE) 2858 val |= BGE_RDMAMODE_FIFO_LONG_BURST; 2859 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) { 2860 if (ifp->if_mtu <= ETHERMTU) 2861 val |= BGE_RDMAMODE_JMB_2K_MMRR; 2862 } 2863 if (sc->bge_flags & BGEF_TSO) { 2864 val |= BGE_RDMAMODE_TSO4_ENABLE; 2865 if (BGE_IS_5717_PLUS(sc)) 2866 val |= BGE_RDMAMODE_TSO6_ENABLE; 2867 } 2868 2869 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 || 2870 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2871 val |= CSR_READ_4(sc, BGE_RDMA_MODE) & 2872 BGE_RDMAMODE_H2BNC_VLAN_DET; 2873 /* 2874 * Allow multiple outstanding read requests from 2875 * non-LSO read DMA engine. 2876 */ 2877 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS; 2878 } 2879 2880 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 2881 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2882 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 2883 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780 || 2884 BGE_IS_57765_PLUS(sc)) { 2885 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) 2886 rdmareg = BGE_RDMA_RSRVCTRL_REG2; 2887 else 2888 rdmareg = BGE_RDMA_RSRVCTRL; 2889 dmactl = CSR_READ_4(sc, rdmareg); 2890 /* 2891 * Adjust tx margin to prevent TX data corruption and 2892 * fix internal FIFO overflow. 2893 */ 2894 if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0 || 2895 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2896 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK | 2897 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK | 2898 BGE_RDMA_RSRVCTRL_TXMRGN_MASK); 2899 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K | 2900 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K | 2901 BGE_RDMA_RSRVCTRL_TXMRGN_320B; 2902 } 2903 /* 2904 * Enable fix for read DMA FIFO overruns. 2905 * The fix is to limit the number of RX BDs 2906 * the hardware would fetch at a fime. 2907 */ 2908 CSR_WRITE_4(sc, rdmareg, dmactl | 2909 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 2910 } 2911 2912 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) { 2913 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 2914 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | 2915 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | 2916 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 2917 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { 2918 /* 2919 * Allow 4KB burst length reads for non-LSO frames. 2920 * Enable 512B burst length reads for buffer descriptors. 2921 */ 2922 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 2923 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | 2924 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 | 2925 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 2926 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2927 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2, 2928 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2) | 2929 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | 2930 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 2931 } 2932 /* Turn on read DMA state machine */ 2933 CSR_WRITE_4_FLUSH(sc, BGE_RDMA_MODE, val); 2934 /* 5718 step 52 */ 2935 delay(40); 2936 2937 if (sc->bge_flags & BGEF_RDMA_BUG) { 2938 for (i = 0; i < BGE_NUM_RDMA_CHANNELS / 2; i++) { 2939 val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4); 2940 if ((val & 0xFFFF) > BGE_FRAMELEN) 2941 break; 2942 if (((val >> 16) & 0xFFFF) > BGE_FRAMELEN) 2943 break; 2944 } 2945 if (i != BGE_NUM_RDMA_CHANNELS / 2) { 2946 val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL); 2947 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) 2948 val |= BGE_RDMA_TX_LENGTH_WA_5719; 2949 else 2950 val |= BGE_RDMA_TX_LENGTH_WA_5720; 2951 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val); 2952 } 2953 } 2954 2955 /* 5718 step 56, 57XX step 84 */ 2956 /* Turn on RX data completion state machine */ 2957 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 2958 2959 /* Turn on RX data and RX BD initiator state machine */ 2960 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 2961 2962 /* 57XX step 85 */ 2963 /* Turn on Mbuf cluster free state machine */ 2964 if (!BGE_IS_5705_PLUS(sc)) 2965 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 2966 2967 /* 5718 step 57, 57XX step 86 */ 2968 /* Turn on send data completion state machine */ 2969 val = BGE_SDCMODE_ENABLE; 2970 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 2971 val |= BGE_SDCMODE_CDELAY; 2972 CSR_WRITE_4(sc, BGE_SDC_MODE, val); 2973 2974 /* 5718 step 58 */ 2975 /* Turn on send BD completion state machine */ 2976 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 2977 2978 /* 57XX step 88 */ 2979 /* Turn on RX BD initiator state machine */ 2980 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 2981 2982 /* 5718 step 60, 57XX step 90 */ 2983 /* Turn on send data initiator state machine */ 2984 if (sc->bge_flags & BGEF_TSO) { 2985 /* XXX: magic value from Linux driver */ 2986 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 2987 BGE_SDIMODE_HW_LSO_PRE_DMA); 2988 } else 2989 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 2990 2991 /* 5718 step 61, 57XX step 91 */ 2992 /* Turn on send BD initiator state machine */ 2993 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 2994 2995 /* 5718 step 62, 57XX step 92 */ 2996 /* Turn on send BD selector state machine */ 2997 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 2998 2999 /* 5718 step 31, 57XX step 60 */ 3000 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 3001 /* 5718 step 32, 57XX step 61 */ 3002 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 3003 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER); 3004 3005 /* ack/clear link change events */ 3006 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 3007 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 3008 BGE_MACSTAT_LINK_CHANGED); 3009 CSR_WRITE_4(sc, BGE_MI_STS, 0); 3010 3011 /* 3012 * Enable attention when the link has changed state for 3013 * devices that use auto polling. 3014 */ 3015 if (sc->bge_flags & BGEF_FIBER_TBI) { 3016 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 3017 } else { 3018 if ((sc->bge_flags & BGEF_CPMU_PRESENT) != 0) 3019 mimode = BGE_MIMODE_500KHZ_CONST; 3020 else 3021 mimode = BGE_MIMODE_BASE; 3022 /* 5718 step 68. 5718 step 69 (optionally). */ 3023 if (BGE_IS_5700_FAMILY(sc) || 3024 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705) { 3025 mimode |= BGE_MIMODE_AUTOPOLL; 3026 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 3027 } 3028 mimode |= BGE_MIMODE_PHYADDR(sc->bge_phy_addr); 3029 CSR_WRITE_4(sc, BGE_MI_MODE, mimode); 3030 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) 3031 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 3032 BGE_EVTENB_MI_INTERRUPT); 3033 } 3034 3035 /* 3036 * Clear any pending link state attention. 3037 * Otherwise some link state change events may be lost until attention 3038 * is cleared by bge_intr() -> bge_link_upd() sequence. 3039 * It's not necessary on newer BCM chips - perhaps enabling link 3040 * state change attentions implies clearing pending attention. 3041 */ 3042 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 3043 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 3044 BGE_MACSTAT_LINK_CHANGED); 3045 3046 /* Enable link state change attentions. */ 3047 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 3048 3049 return 0; 3050 } 3051 3052 static const struct bge_revision * 3053 bge_lookup_rev(uint32_t chipid) 3054 { 3055 const struct bge_revision *br; 3056 3057 for (br = bge_revisions; br->br_name != NULL; br++) { 3058 if (br->br_chipid == chipid) 3059 return br; 3060 } 3061 3062 for (br = bge_majorrevs; br->br_name != NULL; br++) { 3063 if (br->br_chipid == BGE_ASICREV(chipid)) 3064 return br; 3065 } 3066 3067 return NULL; 3068 } 3069 3070 static const struct bge_product * 3071 bge_lookup(const struct pci_attach_args *pa) 3072 { 3073 const struct bge_product *bp; 3074 3075 for (bp = bge_products; bp->bp_name != NULL; bp++) { 3076 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor && 3077 PCI_PRODUCT(pa->pa_id) == bp->bp_product) 3078 return bp; 3079 } 3080 3081 return NULL; 3082 } 3083 3084 static uint32_t 3085 bge_chipid(const struct pci_attach_args *pa) 3086 { 3087 uint32_t id; 3088 3089 id = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) 3090 >> BGE_PCIMISCCTL_ASICREV_SHIFT; 3091 3092 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) { 3093 switch (PCI_PRODUCT(pa->pa_id)) { 3094 case PCI_PRODUCT_BROADCOM_BCM5717: 3095 case PCI_PRODUCT_BROADCOM_BCM5718: 3096 case PCI_PRODUCT_BROADCOM_BCM5719: 3097 case PCI_PRODUCT_BROADCOM_BCM5720: 3098 case PCI_PRODUCT_BROADCOM_BCM5725: 3099 case PCI_PRODUCT_BROADCOM_BCM5727: 3100 case PCI_PRODUCT_BROADCOM_BCM5762: 3101 case PCI_PRODUCT_BROADCOM_BCM57764: 3102 case PCI_PRODUCT_BROADCOM_BCM57767: 3103 case PCI_PRODUCT_BROADCOM_BCM57787: 3104 id = pci_conf_read(pa->pa_pc, pa->pa_tag, 3105 BGE_PCI_GEN2_PRODID_ASICREV); 3106 break; 3107 case PCI_PRODUCT_BROADCOM_BCM57761: 3108 case PCI_PRODUCT_BROADCOM_BCM57762: 3109 case PCI_PRODUCT_BROADCOM_BCM57765: 3110 case PCI_PRODUCT_BROADCOM_BCM57766: 3111 case PCI_PRODUCT_BROADCOM_BCM57781: 3112 case PCI_PRODUCT_BROADCOM_BCM57782: 3113 case PCI_PRODUCT_BROADCOM_BCM57785: 3114 case PCI_PRODUCT_BROADCOM_BCM57786: 3115 case PCI_PRODUCT_BROADCOM_BCM57791: 3116 case PCI_PRODUCT_BROADCOM_BCM57795: 3117 id = pci_conf_read(pa->pa_pc, pa->pa_tag, 3118 BGE_PCI_GEN15_PRODID_ASICREV); 3119 break; 3120 default: 3121 id = pci_conf_read(pa->pa_pc, pa->pa_tag, 3122 BGE_PCI_PRODID_ASICREV); 3123 break; 3124 } 3125 } 3126 3127 return id; 3128 } 3129 3130 /* 3131 * Return true if MSI can be used with this device. 3132 */ 3133 static int 3134 bge_can_use_msi(struct bge_softc *sc) 3135 { 3136 int can_use_msi = 0; 3137 3138 switch (BGE_ASICREV(sc->bge_chipid)) { 3139 case BGE_ASICREV_BCM5714_A0: 3140 case BGE_ASICREV_BCM5714: 3141 /* 3142 * Apparently, MSI doesn't work when these chips are 3143 * configured in single-port mode. 3144 */ 3145 break; 3146 case BGE_ASICREV_BCM5750: 3147 if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_AX && 3148 BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_BX) 3149 can_use_msi = 1; 3150 break; 3151 default: 3152 if (BGE_IS_575X_PLUS(sc)) 3153 can_use_msi = 1; 3154 } 3155 return (can_use_msi); 3156 } 3157 3158 /* 3159 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 3160 * against our list and return its name if we find a match. Note 3161 * that since the Broadcom controller contains VPD support, we 3162 * can get the device name string from the controller itself instead 3163 * of the compiled-in string. This is a little slow, but it guarantees 3164 * we'll always announce the right product name. 3165 */ 3166 static int 3167 bge_probe(device_t parent, cfdata_t match, void *aux) 3168 { 3169 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 3170 3171 if (bge_lookup(pa) != NULL) 3172 return 1; 3173 3174 return 0; 3175 } 3176 3177 static void 3178 bge_attach(device_t parent, device_t self, void *aux) 3179 { 3180 struct bge_softc *sc = device_private(self); 3181 struct pci_attach_args *pa = aux; 3182 prop_dictionary_t dict; 3183 const struct bge_product *bp; 3184 const struct bge_revision *br; 3185 pci_chipset_tag_t pc; 3186 const char *intrstr = NULL; 3187 uint32_t hwcfg, hwcfg2, hwcfg3, hwcfg4, hwcfg5; 3188 uint32_t command; 3189 struct ifnet *ifp; 3190 struct mii_data * const mii = &sc->bge_mii; 3191 uint32_t misccfg, mimode, macmode; 3192 void * kva; 3193 u_char eaddr[ETHER_ADDR_LEN]; 3194 pcireg_t memtype, subid, reg; 3195 bus_addr_t memaddr; 3196 uint32_t pm_ctl; 3197 bool no_seeprom; 3198 int capmask, trys; 3199 int mii_flags; 3200 int map_flags; 3201 char intrbuf[PCI_INTRSTR_LEN]; 3202 3203 bp = bge_lookup(pa); 3204 KASSERT(bp != NULL); 3205 3206 sc->sc_pc = pa->pa_pc; 3207 sc->sc_pcitag = pa->pa_tag; 3208 sc->bge_dev = self; 3209 3210 sc->bge_pa = *pa; 3211 pc = sc->sc_pc; 3212 subid = pci_conf_read(pc, sc->sc_pcitag, PCI_SUBSYS_ID_REG); 3213 3214 aprint_naive(": Ethernet controller\n"); 3215 aprint_normal(": %s Ethernet\n", bp->bp_name); 3216 3217 /* 3218 * Map control/status registers. 3219 */ 3220 DPRINTFN(5, ("Map control/status regs\n")); 3221 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 3222 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 3223 pci_conf_write(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, command); 3224 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 3225 3226 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 3227 aprint_error_dev(sc->bge_dev, 3228 "failed to enable memory mapping!\n"); 3229 return; 3230 } 3231 3232 DPRINTFN(5, ("pci_mem_find\n")); 3233 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_pcitag, BGE_PCI_BAR0); 3234 switch (memtype) { 3235 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 3236 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 3237 #if 0 3238 if (pci_mapreg_map(pa, BGE_PCI_BAR0, 3239 memtype, 0, &sc->bge_btag, &sc->bge_bhandle, 3240 &memaddr, &sc->bge_bsize) == 0) 3241 break; 3242 #else 3243 /* 3244 * Workaround for PCI prefetchable bit. Some BCM5717-5720 based 3245 * system get NMI on boot (PR#48451). This problem might not be 3246 * the driver's bug but our PCI common part's bug. Until we 3247 * find a real reason, we ignore the prefetchable bit. 3248 */ 3249 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0, 3250 memtype, &memaddr, &sc->bge_bsize, &map_flags) == 0) { 3251 map_flags &= ~BUS_SPACE_MAP_PREFETCHABLE; 3252 if (bus_space_map(pa->pa_memt, memaddr, sc->bge_bsize, 3253 map_flags, &sc->bge_bhandle) == 0) { 3254 sc->bge_btag = pa->pa_memt; 3255 break; 3256 } 3257 } 3258 #endif 3259 /* FALLTHROUGH */ 3260 default: 3261 aprint_error_dev(sc->bge_dev, "can't find mem space\n"); 3262 return; 3263 } 3264 3265 /* Save various chip information. */ 3266 sc->bge_chipid = bge_chipid(pa); 3267 sc->bge_phy_addr = bge_phy_addr(sc); 3268 3269 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PCIEXPRESS, 3270 &sc->bge_pciecap, NULL) != 0) { 3271 /* PCIe */ 3272 sc->bge_flags |= BGEF_PCIE; 3273 /* Extract supported maximum payload size. */ 3274 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 3275 sc->bge_pciecap + PCIE_DCAP); 3276 sc->bge_mps = 128 << (reg & PCIE_DCAP_MAX_PAYLOAD); 3277 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 3278 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 3279 sc->bge_expmrq = 2048; 3280 else 3281 sc->bge_expmrq = 4096; 3282 bge_set_max_readrq(sc); 3283 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785) { 3284 /* PCIe without PCIe cap */ 3285 sc->bge_flags |= BGEF_PCIE; 3286 } else if ((pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE) & 3287 BGE_PCISTATE_PCI_BUSMODE) == 0) { 3288 /* PCI-X */ 3289 sc->bge_flags |= BGEF_PCIX; 3290 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIX, 3291 &sc->bge_pcixcap, NULL) == 0) 3292 aprint_error_dev(sc->bge_dev, 3293 "unable to find PCIX capability\n"); 3294 } 3295 3296 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) { 3297 /* 3298 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) 3299 * can clobber the chip's PCI config-space power control 3300 * registers, leaving the card in D3 powersave state. We do 3301 * not have memory-mapped registers in this state, so force 3302 * device into D0 state before starting initialization. 3303 */ 3304 pm_ctl = pci_conf_read(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD); 3305 pm_ctl &= ~(PCI_PWR_D0 | PCI_PWR_D1 | PCI_PWR_D2 | PCI_PWR_D3); 3306 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */ 3307 pci_conf_write(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD, pm_ctl); 3308 DELAY(1000); /* 27 usec is allegedly sufficent */ 3309 } 3310 3311 /* Save chipset family. */ 3312 switch (BGE_ASICREV(sc->bge_chipid)) { 3313 case BGE_ASICREV_BCM5717: 3314 case BGE_ASICREV_BCM5719: 3315 case BGE_ASICREV_BCM5720: 3316 sc->bge_flags |= BGEF_5717_PLUS; 3317 /* FALLTHROUGH */ 3318 case BGE_ASICREV_BCM5762: 3319 case BGE_ASICREV_BCM57765: 3320 case BGE_ASICREV_BCM57766: 3321 if (!BGE_IS_5717_PLUS(sc)) 3322 sc->bge_flags |= BGEF_57765_FAMILY; 3323 sc->bge_flags |= BGEF_57765_PLUS | BGEF_5755_PLUS | 3324 BGEF_575X_PLUS | BGEF_5705_PLUS | BGEF_JUMBO_CAPABLE; 3325 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 3326 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { 3327 /* 3328 * Enable work around for DMA engine miscalculation 3329 * of TXMBUF available space. 3330 */ 3331 sc->bge_flags |= BGEF_RDMA_BUG; 3332 3333 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) && 3334 (sc->bge_chipid == BGE_CHIPID_BCM5719_A0)) { 3335 /* Jumbo frame on BCM5719 A0 does not work. */ 3336 sc->bge_flags &= ~BGEF_JUMBO_CAPABLE; 3337 } 3338 } 3339 break; 3340 case BGE_ASICREV_BCM5755: 3341 case BGE_ASICREV_BCM5761: 3342 case BGE_ASICREV_BCM5784: 3343 case BGE_ASICREV_BCM5785: 3344 case BGE_ASICREV_BCM5787: 3345 case BGE_ASICREV_BCM57780: 3346 sc->bge_flags |= BGEF_5755_PLUS | BGEF_575X_PLUS | BGEF_5705_PLUS; 3347 break; 3348 case BGE_ASICREV_BCM5700: 3349 case BGE_ASICREV_BCM5701: 3350 case BGE_ASICREV_BCM5703: 3351 case BGE_ASICREV_BCM5704: 3352 sc->bge_flags |= BGEF_5700_FAMILY | BGEF_JUMBO_CAPABLE; 3353 break; 3354 case BGE_ASICREV_BCM5714_A0: 3355 case BGE_ASICREV_BCM5780: 3356 case BGE_ASICREV_BCM5714: 3357 sc->bge_flags |= BGEF_5714_FAMILY | BGEF_JUMBO_CAPABLE; 3358 /* FALLTHROUGH */ 3359 case BGE_ASICREV_BCM5750: 3360 case BGE_ASICREV_BCM5752: 3361 case BGE_ASICREV_BCM5906: 3362 sc->bge_flags |= BGEF_575X_PLUS; 3363 /* FALLTHROUGH */ 3364 case BGE_ASICREV_BCM5705: 3365 sc->bge_flags |= BGEF_5705_PLUS; 3366 break; 3367 } 3368 3369 /* Identify chips with APE processor. */ 3370 switch (BGE_ASICREV(sc->bge_chipid)) { 3371 case BGE_ASICREV_BCM5717: 3372 case BGE_ASICREV_BCM5719: 3373 case BGE_ASICREV_BCM5720: 3374 case BGE_ASICREV_BCM5761: 3375 case BGE_ASICREV_BCM5762: 3376 sc->bge_flags |= BGEF_APE; 3377 break; 3378 } 3379 3380 /* 3381 * The 40bit DMA bug applies to the 5714/5715 controllers and is 3382 * not actually a MAC controller bug but an issue with the embedded 3383 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround. 3384 */ 3385 if (BGE_IS_5714_FAMILY(sc) && ((sc->bge_flags & BGEF_PCIX) != 0)) 3386 sc->bge_flags |= BGEF_40BIT_BUG; 3387 3388 /* Chips with APE need BAR2 access for APE registers/memory. */ 3389 if ((sc->bge_flags & BGEF_APE) != 0) { 3390 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2); 3391 #if 0 3392 if (pci_mapreg_map(pa, BGE_PCI_BAR2, memtype, 0, 3393 &sc->bge_apetag, &sc->bge_apehandle, NULL, 3394 &sc->bge_apesize)) { 3395 aprint_error_dev(sc->bge_dev, 3396 "couldn't map BAR2 memory\n"); 3397 return; 3398 } 3399 #else 3400 /* 3401 * Workaround for PCI prefetchable bit. Some BCM5717-5720 based 3402 * system get NMI on boot (PR#48451). This problem might not be 3403 * the driver's bug but our PCI common part's bug. Until we 3404 * find a real reason, we ignore the prefetchable bit. 3405 */ 3406 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2, 3407 memtype, &memaddr, &sc->bge_apesize, &map_flags) != 0) { 3408 aprint_error_dev(sc->bge_dev, 3409 "couldn't map BAR2 memory\n"); 3410 return; 3411 } 3412 3413 map_flags &= ~BUS_SPACE_MAP_PREFETCHABLE; 3414 if (bus_space_map(pa->pa_memt, memaddr, 3415 sc->bge_apesize, map_flags, &sc->bge_apehandle) != 0) { 3416 aprint_error_dev(sc->bge_dev, 3417 "couldn't map BAR2 memory\n"); 3418 return; 3419 } 3420 sc->bge_apetag = pa->pa_memt; 3421 #endif 3422 3423 /* Enable APE register/memory access by host driver. */ 3424 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE); 3425 reg |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | 3426 BGE_PCISTATE_ALLOW_APE_SHMEM_WR | 3427 BGE_PCISTATE_ALLOW_APE_PSPACE_WR; 3428 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE, reg); 3429 3430 bge_ape_lock_init(sc); 3431 bge_ape_read_fw_ver(sc); 3432 } 3433 3434 /* Identify the chips that use an CPMU. */ 3435 if (BGE_IS_5717_PLUS(sc) || 3436 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 3437 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 3438 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 3439 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) 3440 sc->bge_flags |= BGEF_CPMU_PRESENT; 3441 3442 /* 3443 * When using the BCM5701 in PCI-X mode, data corruption has 3444 * been observed in the first few bytes of some received packets. 3445 * Aligning the packet buffer in memory eliminates the corruption. 3446 * Unfortunately, this misaligns the packet payloads. On platforms 3447 * which do not support unaligned accesses, we will realign the 3448 * payloads by copying the received packets. 3449 */ 3450 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 && 3451 sc->bge_flags & BGEF_PCIX) 3452 sc->bge_flags |= BGEF_RX_ALIGNBUG; 3453 3454 if (BGE_IS_5700_FAMILY(sc)) 3455 sc->bge_flags |= BGEF_JUMBO_CAPABLE; 3456 3457 misccfg = CSR_READ_4(sc, BGE_MISC_CFG); 3458 misccfg &= BGE_MISCCFG_BOARD_ID_MASK; 3459 3460 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 3461 (misccfg == BGE_MISCCFG_BOARD_ID_5788 || 3462 misccfg == BGE_MISCCFG_BOARD_ID_5788M)) 3463 sc->bge_flags |= BGEF_IS_5788; 3464 3465 /* 3466 * Some controllers seem to require a special firmware to use 3467 * TSO. But the firmware is not available to FreeBSD and Linux 3468 * claims that the TSO performed by the firmware is slower than 3469 * hardware based TSO. Moreover the firmware based TSO has one 3470 * known bug which can't handle TSO if ethernet header + IP/TCP 3471 * header is greater than 80 bytes. The workaround for the TSO 3472 * bug exist but it seems it's too expensive than not using 3473 * TSO at all. Some hardwares also have the TSO bug so limit 3474 * the TSO to the controllers that are not affected TSO issues 3475 * (e.g. 5755 or higher). 3476 */ 3477 if (BGE_IS_5755_PLUS(sc)) { 3478 /* 3479 * BCM5754 and BCM5787 shares the same ASIC id so 3480 * explicit device id check is required. 3481 */ 3482 if ((PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754) && 3483 (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754M)) 3484 sc->bge_flags |= BGEF_TSO; 3485 /* TSO on BCM5719 A0 does not work. */ 3486 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) && 3487 (sc->bge_chipid == BGE_CHIPID_BCM5719_A0)) 3488 sc->bge_flags &= ~BGEF_TSO; 3489 } 3490 3491 capmask = 0xffffffff; /* XXX BMSR_DEFCAPMASK */ 3492 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 && 3493 (misccfg == 0x4000 || misccfg == 0x8000)) || 3494 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 3495 PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && 3496 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 || 3497 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 || 3498 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) || 3499 (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && 3500 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F || 3501 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F || 3502 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) || 3503 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 || 3504 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57791 || 3505 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57795 || 3506 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 3507 /* These chips are 10/100 only. */ 3508 capmask &= ~BMSR_EXTSTAT; 3509 sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED; 3510 } 3511 3512 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 3513 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 3514 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && 3515 sc->bge_chipid != BGE_CHIPID_BCM5705_A1))) 3516 sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED; 3517 3518 /* Set various PHY bug flags. */ 3519 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || 3520 sc->bge_chipid == BGE_CHIPID_BCM5701_B0) 3521 sc->bge_phy_flags |= BGEPHYF_CRC_BUG; 3522 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX || 3523 BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX) 3524 sc->bge_phy_flags |= BGEPHYF_ADC_BUG; 3525 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) 3526 sc->bge_phy_flags |= BGEPHYF_5704_A0_BUG; 3527 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 3528 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) && 3529 PCI_VENDOR(subid) == PCI_VENDOR_DELL) 3530 sc->bge_phy_flags |= BGEPHYF_NO_3LED; 3531 if (BGE_IS_5705_PLUS(sc) && 3532 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 && 3533 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 && 3534 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780 && 3535 !BGE_IS_57765_PLUS(sc)) { 3536 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 3537 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 3538 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 3539 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) { 3540 if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 && 3541 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756) 3542 sc->bge_phy_flags |= BGEPHYF_JITTER_BUG; 3543 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M) 3544 sc->bge_phy_flags |= BGEPHYF_ADJUST_TRIM; 3545 } else 3546 sc->bge_phy_flags |= BGEPHYF_BER_BUG; 3547 } 3548 3549 /* 3550 * SEEPROM check. 3551 * First check if firmware knows we do not have SEEPROM. 3552 */ 3553 if (prop_dictionary_get_bool(device_properties(self), 3554 "without-seeprom", &no_seeprom) && no_seeprom) 3555 sc->bge_flags |= BGEF_NO_EEPROM; 3556 3557 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 3558 sc->bge_flags |= BGEF_NO_EEPROM; 3559 3560 /* Now check the 'ROM failed' bit on the RX CPU */ 3561 else if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) 3562 sc->bge_flags |= BGEF_NO_EEPROM; 3563 3564 sc->bge_asf_mode = 0; 3565 /* No ASF if APE present. */ 3566 if ((sc->bge_flags & BGEF_APE) == 0) { 3567 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == 3568 BGE_SRAM_DATA_SIG_MAGIC)) { 3569 if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG) & 3570 BGE_HWCFG_ASF) { 3571 sc->bge_asf_mode |= ASF_ENABLE; 3572 sc->bge_asf_mode |= ASF_STACKUP; 3573 if (BGE_IS_575X_PLUS(sc)) 3574 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; 3575 } 3576 } 3577 } 3578 3579 int counts[PCI_INTR_TYPE_SIZE] = { 3580 [PCI_INTR_TYPE_INTX] = 1, 3581 [PCI_INTR_TYPE_MSI] = 1, 3582 [PCI_INTR_TYPE_MSIX] = 1, 3583 }; 3584 int max_type = PCI_INTR_TYPE_MSIX; 3585 3586 if (!bge_can_use_msi(sc)) { 3587 /* MSI broken, allow only INTx */ 3588 max_type = PCI_INTR_TYPE_INTX; 3589 } 3590 3591 if (pci_intr_alloc(pa, &sc->bge_pihp, counts, max_type) != 0) { 3592 aprint_error_dev(sc->bge_dev, "couldn't alloc interrupt\n"); 3593 return; 3594 } 3595 3596 DPRINTFN(5, ("pci_intr_string\n")); 3597 intrstr = pci_intr_string(pc, sc->bge_pihp[0], intrbuf, 3598 sizeof(intrbuf)); 3599 DPRINTFN(5, ("pci_intr_establish\n")); 3600 sc->bge_intrhand = pci_intr_establish_xname(pc, sc->bge_pihp[0], 3601 IPL_NET, bge_intr, sc, device_xname(sc->bge_dev)); 3602 if (sc->bge_intrhand == NULL) { 3603 pci_intr_release(pc, sc->bge_pihp, 1); 3604 sc->bge_pihp = NULL; 3605 3606 aprint_error_dev(self, "couldn't establish interrupt"); 3607 if (intrstr != NULL) 3608 aprint_error(" at %s", intrstr); 3609 aprint_error("\n"); 3610 return; 3611 } 3612 aprint_normal_dev(sc->bge_dev, "interrupting at %s\n", intrstr); 3613 3614 switch (pci_intr_type(pc, sc->bge_pihp[0])) { 3615 case PCI_INTR_TYPE_MSIX: 3616 case PCI_INTR_TYPE_MSI: 3617 KASSERT(bge_can_use_msi(sc)); 3618 sc->bge_flags |= BGEF_MSI; 3619 break; 3620 default: 3621 /* nothing to do */ 3622 break; 3623 } 3624 3625 /* 3626 * All controllers except BCM5700 supports tagged status but 3627 * we use tagged status only for MSI case on BCM5717. Otherwise 3628 * MSI on BCM5717 does not work. 3629 */ 3630 if (BGE_IS_57765_PLUS(sc) && sc->bge_flags & BGEF_MSI) 3631 sc->bge_flags |= BGEF_TAGGED_STATUS; 3632 3633 /* 3634 * Reset NVRAM before bge_reset(). It's required to acquire NVRAM 3635 * lock in bge_reset(). 3636 */ 3637 CSR_WRITE_4_FLUSH(sc, BGE_EE_ADDR, 3638 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 3639 delay(1000); 3640 BGE_SETBIT_FLUSH(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 3641 3642 bge_stop_fw(sc); 3643 bge_sig_pre_reset(sc, BGE_RESET_START); 3644 if (bge_reset(sc)) 3645 aprint_error_dev(sc->bge_dev, "chip reset failed\n"); 3646 3647 /* 3648 * Read the hardware config word in the first 32k of NIC internal 3649 * memory, or fall back to the config word in the EEPROM. 3650 * Note: on some BCM5700 cards, this value appears to be unset. 3651 */ 3652 hwcfg = hwcfg2 = hwcfg3 = hwcfg4 = hwcfg5 = 0; 3653 if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == 3654 BGE_SRAM_DATA_SIG_MAGIC) { 3655 uint32_t tmp; 3656 3657 hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG); 3658 tmp = bge_readmem_ind(sc, BGE_SRAM_DATA_VER) >> 3659 BGE_SRAM_DATA_VER_SHIFT; 3660 if ((0 < tmp) && (tmp < 0x100)) 3661 hwcfg2 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_2); 3662 if (sc->bge_flags & BGEF_PCIE) 3663 hwcfg3 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_3); 3664 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785) 3665 hwcfg4 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_4); 3666 if (BGE_IS_5717_PLUS(sc)) 3667 hwcfg5 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_5); 3668 } else if (!(sc->bge_flags & BGEF_NO_EEPROM)) { 3669 bge_read_eeprom(sc, (void *)&hwcfg, 3670 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 3671 hwcfg = be32toh(hwcfg); 3672 } 3673 aprint_normal_dev(sc->bge_dev, 3674 "HW config %08x, %08x, %08x, %08x %08x\n", 3675 hwcfg, hwcfg2, hwcfg3, hwcfg4, hwcfg5); 3676 3677 bge_sig_legacy(sc, BGE_RESET_START); 3678 bge_sig_post_reset(sc, BGE_RESET_START); 3679 3680 if (bge_chipinit(sc)) { 3681 aprint_error_dev(sc->bge_dev, "chip initialization failed\n"); 3682 bge_release_resources(sc); 3683 return; 3684 } 3685 3686 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) { 3687 BGE_SETBIT_FLUSH(sc, BGE_MISC_LOCAL_CTL, 3688 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUTEN1); 3689 DELAY(100); 3690 } 3691 3692 /* Set MI_MODE */ 3693 mimode = BGE_MIMODE_PHYADDR(sc->bge_phy_addr); 3694 if ((sc->bge_flags & BGEF_CPMU_PRESENT) != 0) 3695 mimode |= BGE_MIMODE_500KHZ_CONST; 3696 else 3697 mimode |= BGE_MIMODE_BASE; 3698 CSR_WRITE_4_FLUSH(sc, BGE_MI_MODE, mimode); 3699 DELAY(80); 3700 3701 /* 3702 * Get station address from the EEPROM. 3703 */ 3704 if (bge_get_eaddr(sc, eaddr)) { 3705 aprint_error_dev(sc->bge_dev, 3706 "failed to read station address\n"); 3707 bge_release_resources(sc); 3708 return; 3709 } 3710 3711 br = bge_lookup_rev(sc->bge_chipid); 3712 3713 if (br == NULL) { 3714 aprint_normal_dev(sc->bge_dev, "unknown ASIC (0x%x)", 3715 sc->bge_chipid); 3716 } else { 3717 aprint_normal_dev(sc->bge_dev, "ASIC %s (0x%x)", 3718 br->br_name, sc->bge_chipid); 3719 } 3720 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr)); 3721 3722 /* Allocate the general information block and ring buffers. */ 3723 if (pci_dma64_available(pa)) { 3724 sc->bge_dmatag = pa->pa_dmat64; 3725 sc->bge_dmatag32 = pa->pa_dmat; 3726 sc->bge_dma64 = true; 3727 } else { 3728 sc->bge_dmatag = pa->pa_dmat; 3729 sc->bge_dmatag32 = pa->pa_dmat; 3730 sc->bge_dma64 = false; 3731 } 3732 3733 /* 40bit DMA workaround */ 3734 if (sizeof(bus_addr_t) > 4) { 3735 if ((sc->bge_flags & BGEF_40BIT_BUG) != 0) { 3736 bus_dma_tag_t olddmatag = sc->bge_dmatag; /* save */ 3737 3738 if (bus_dmatag_subregion(olddmatag, 0, 3739 (bus_addr_t)(1ULL << 40), &(sc->bge_dmatag), 3740 BUS_DMA_NOWAIT) != 0) { 3741 aprint_error_dev(self, 3742 "WARNING: failed to restrict dma range," 3743 " falling back to parent bus dma range\n"); 3744 sc->bge_dmatag = olddmatag; 3745 } 3746 } 3747 } 3748 SLIST_INIT(&sc->txdma_list); 3749 DPRINTFN(5, ("bus_dmamem_alloc\n")); 3750 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 3751 PAGE_SIZE, 0, &sc->bge_ring_seg, 1, 3752 &sc->bge_ring_rseg, BUS_DMA_NOWAIT)) { 3753 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n"); 3754 return; 3755 } 3756 DPRINTFN(5, ("bus_dmamem_map\n")); 3757 if (bus_dmamem_map(sc->bge_dmatag, &sc->bge_ring_seg, 3758 sc->bge_ring_rseg, sizeof(struct bge_ring_data), &kva, 3759 BUS_DMA_NOWAIT)) { 3760 aprint_error_dev(sc->bge_dev, 3761 "can't map DMA buffers (%zu bytes)\n", 3762 sizeof(struct bge_ring_data)); 3763 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, 3764 sc->bge_ring_rseg); 3765 return; 3766 } 3767 DPRINTFN(5, ("bus_dmamem_create\n")); 3768 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 3769 sizeof(struct bge_ring_data), 0, 3770 BUS_DMA_NOWAIT, &sc->bge_ring_map)) { 3771 aprint_error_dev(sc->bge_dev, "can't create DMA map\n"); 3772 bus_dmamem_unmap(sc->bge_dmatag, kva, 3773 sizeof(struct bge_ring_data)); 3774 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, 3775 sc->bge_ring_rseg); 3776 return; 3777 } 3778 DPRINTFN(5, ("bus_dmamem_load\n")); 3779 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 3780 sizeof(struct bge_ring_data), NULL, 3781 BUS_DMA_NOWAIT)) { 3782 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 3783 bus_dmamem_unmap(sc->bge_dmatag, kva, 3784 sizeof(struct bge_ring_data)); 3785 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, 3786 sc->bge_ring_rseg); 3787 return; 3788 } 3789 3790 DPRINTFN(5, ("bzero\n")); 3791 sc->bge_rdata = (struct bge_ring_data *)kva; 3792 3793 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data)); 3794 3795 /* Try to allocate memory for jumbo buffers. */ 3796 if (BGE_IS_JUMBO_CAPABLE(sc)) { 3797 if (bge_alloc_jumbo_mem(sc)) { 3798 aprint_error_dev(sc->bge_dev, 3799 "jumbo buffer allocation failed\n"); 3800 } else 3801 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 3802 } 3803 3804 /* Set default tuneable values. */ 3805 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 3806 sc->bge_rx_coal_ticks = 150; 3807 sc->bge_rx_max_coal_bds = 64; 3808 sc->bge_tx_coal_ticks = 300; 3809 sc->bge_tx_max_coal_bds = 400; 3810 if (BGE_IS_5705_PLUS(sc)) { 3811 sc->bge_tx_coal_ticks = (12 * 5); 3812 sc->bge_tx_max_coal_bds = (12 * 5); 3813 aprint_verbose_dev(sc->bge_dev, 3814 "setting short Tx thresholds\n"); 3815 } 3816 3817 if (BGE_IS_5717_PLUS(sc)) 3818 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 3819 else if (BGE_IS_5705_PLUS(sc)) 3820 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 3821 else 3822 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 3823 3824 /* Set up ifnet structure */ 3825 ifp = &sc->ethercom.ec_if; 3826 ifp->if_softc = sc; 3827 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 3828 ifp->if_ioctl = bge_ioctl; 3829 ifp->if_stop = bge_stop; 3830 ifp->if_start = bge_start; 3831 ifp->if_init = bge_init; 3832 ifp->if_watchdog = bge_watchdog; 3833 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(BGE_TX_RING_CNT - 1, IFQ_MAXLEN)); 3834 IFQ_SET_READY(&ifp->if_snd); 3835 DPRINTFN(5, ("strcpy if_xname\n")); 3836 strcpy(ifp->if_xname, device_xname(sc->bge_dev)); 3837 3838 if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) 3839 sc->ethercom.ec_if.if_capabilities |= 3840 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx; 3841 #if 1 /* XXX TCP/UDP checksum offload breaks with pf(4) */ 3842 sc->ethercom.ec_if.if_capabilities |= 3843 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 3844 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 3845 #endif 3846 sc->ethercom.ec_capabilities |= 3847 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 3848 sc->ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING; 3849 3850 if (sc->bge_flags & BGEF_TSO) 3851 sc->ethercom.ec_if.if_capabilities |= IFCAP_TSOv4; 3852 3853 /* 3854 * Do MII setup. 3855 */ 3856 DPRINTFN(5, ("mii setup\n")); 3857 mii->mii_ifp = ifp; 3858 mii->mii_readreg = bge_miibus_readreg; 3859 mii->mii_writereg = bge_miibus_writereg; 3860 mii->mii_statchg = bge_miibus_statchg; 3861 3862 /* 3863 * Figure out what sort of media we have by checking the hardware 3864 * config word. Note: on some BCM5700 cards, this value appears to be 3865 * unset. If that's the case, we have to rely on identifying the NIC 3866 * by its PCI subsystem ID, as we do below for the SysKonnect SK-9D41. 3867 * The SysKonnect SK-9D41 is a 1000baseSX card. 3868 */ 3869 if (PCI_PRODUCT(subid) == SK_SUBSYSID_9D41 || 3870 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) { 3871 if (BGE_IS_5705_PLUS(sc)) { 3872 sc->bge_flags |= BGEF_FIBER_MII; 3873 sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED; 3874 } else 3875 sc->bge_flags |= BGEF_FIBER_TBI; 3876 } 3877 3878 /* Set bge_phy_flags before prop_dictionary_set_uint32() */ 3879 if (BGE_IS_JUMBO_CAPABLE(sc)) 3880 sc->bge_phy_flags |= BGEPHYF_JUMBO_CAPABLE; 3881 3882 /* set phyflags and chipid before mii_attach() */ 3883 dict = device_properties(self); 3884 prop_dictionary_set_uint32(dict, "phyflags", sc->bge_phy_flags); 3885 prop_dictionary_set_uint32(dict, "chipid", sc->bge_chipid); 3886 3887 macmode = CSR_READ_4(sc, BGE_MAC_MODE); 3888 macmode &= ~BGE_MACMODE_PORTMODE; 3889 /* Initialize ifmedia structures. */ 3890 if (sc->bge_flags & BGEF_FIBER_TBI) { 3891 CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, 3892 macmode | BGE_PORTMODE_TBI); 3893 DELAY(40); 3894 3895 sc->ethercom.ec_ifmedia = &sc->bge_ifmedia; 3896 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 3897 bge_ifmedia_sts); 3898 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER |IFM_1000_SX, 0, NULL); 3899 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX |IFM_FDX, 3900 0, NULL); 3901 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); 3902 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO); 3903 /* Pretend the user requested this setting */ 3904 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 3905 } else { 3906 uint16_t phyreg; 3907 int rv; 3908 /* 3909 * Do transceiver setup and tell the firmware the 3910 * driver is down so we can try to get access the 3911 * probe if ASF is running. Retry a couple of times 3912 * if we get a conflict with the ASF firmware accessing 3913 * the PHY. 3914 */ 3915 if (sc->bge_flags & BGEF_FIBER_MII) 3916 macmode |= BGE_PORTMODE_GMII; 3917 else 3918 macmode |= BGE_PORTMODE_MII; 3919 CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, macmode); 3920 DELAY(40); 3921 3922 /* 3923 * Do transceiver setup and tell the firmware the 3924 * driver is down so we can try to get access the 3925 * probe if ASF is running. Retry a couple of times 3926 * if we get a conflict with the ASF firmware accessing 3927 * the PHY. 3928 */ 3929 trys = 0; 3930 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3931 sc->ethercom.ec_mii = mii; 3932 ifmedia_init(&mii->mii_media, 0, bge_ifmedia_upd, 3933 bge_ifmedia_sts); 3934 mii_flags = MIIF_DOPAUSE; 3935 if (sc->bge_flags & BGEF_FIBER_MII) 3936 mii_flags |= MIIF_HAVEFIBER; 3937 again: 3938 bge_asf_driver_up(sc); 3939 rv = bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr, 3940 MII_BMCR, &phyreg); 3941 if ((rv != 0) || ((phyreg & BMCR_PDOWN) != 0)) { 3942 int i; 3943 3944 bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr, 3945 MII_BMCR, BMCR_RESET); 3946 /* Wait up to 500ms for it to complete. */ 3947 for (i = 0; i < 500; i++) { 3948 bge_miibus_readreg(sc->bge_dev, 3949 sc->bge_phy_addr, MII_BMCR, &phyreg); 3950 if ((phyreg & BMCR_RESET) == 0) 3951 break; 3952 DELAY(1000); 3953 } 3954 } 3955 3956 mii_attach(sc->bge_dev, mii, capmask, sc->bge_phy_addr, 3957 MII_OFFSET_ANY, mii_flags); 3958 3959 if (LIST_EMPTY(&mii->mii_phys) && (trys++ < 4)) 3960 goto again; 3961 3962 if (LIST_EMPTY(&mii->mii_phys)) { 3963 aprint_error_dev(sc->bge_dev, "no PHY found!\n"); 3964 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 3965 0, NULL); 3966 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL); 3967 } else 3968 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 3969 3970 /* 3971 * Now tell the firmware we are going up after probing the PHY 3972 */ 3973 if (sc->bge_asf_mode & ASF_STACKUP) 3974 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3975 } 3976 3977 /* 3978 * Call MI attach routine. 3979 */ 3980 DPRINTFN(5, ("if_attach\n")); 3981 if_attach(ifp); 3982 if_deferred_start_init(ifp, NULL); 3983 DPRINTFN(5, ("ether_ifattach\n")); 3984 ether_ifattach(ifp, eaddr); 3985 ether_set_ifflags_cb(&sc->ethercom, bge_ifflags_cb); 3986 rnd_attach_source(&sc->rnd_source, device_xname(sc->bge_dev), 3987 RND_TYPE_NET, RND_FLAG_DEFAULT); 3988 #ifdef BGE_EVENT_COUNTERS 3989 /* 3990 * Attach event counters. 3991 */ 3992 evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR, 3993 NULL, device_xname(sc->bge_dev), "intr"); 3994 evcnt_attach_dynamic(&sc->bge_ev_intr_spurious, EVCNT_TYPE_INTR, 3995 NULL, device_xname(sc->bge_dev), "intr_spurious"); 3996 evcnt_attach_dynamic(&sc->bge_ev_intr_spurious2, EVCNT_TYPE_INTR, 3997 NULL, device_xname(sc->bge_dev), "intr_spurious2"); 3998 evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC, 3999 NULL, device_xname(sc->bge_dev), "tx_xoff"); 4000 evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC, 4001 NULL, device_xname(sc->bge_dev), "tx_xon"); 4002 evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC, 4003 NULL, device_xname(sc->bge_dev), "rx_xoff"); 4004 evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC, 4005 NULL, device_xname(sc->bge_dev), "rx_xon"); 4006 evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC, 4007 NULL, device_xname(sc->bge_dev), "rx_macctl"); 4008 evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC, 4009 NULL, device_xname(sc->bge_dev), "xoffentered"); 4010 #endif /* BGE_EVENT_COUNTERS */ 4011 DPRINTFN(5, ("callout_init\n")); 4012 callout_init(&sc->bge_timeout, 0); 4013 callout_setfunc(&sc->bge_timeout, bge_tick, sc); 4014 4015 if (pmf_device_register(self, NULL, NULL)) 4016 pmf_class_network_register(self, ifp); 4017 else 4018 aprint_error_dev(self, "couldn't establish power handler\n"); 4019 4020 bge_sysctl_init(sc); 4021 4022 #ifdef BGE_DEBUG 4023 bge_debug_info(sc); 4024 #endif 4025 } 4026 4027 /* 4028 * Stop all chip I/O so that the kernel's probe routines don't 4029 * get confused by errant DMAs when rebooting. 4030 */ 4031 static int 4032 bge_detach(device_t self, int flags __unused) 4033 { 4034 struct bge_softc *sc = device_private(self); 4035 struct ifnet *ifp = &sc->ethercom.ec_if; 4036 int s; 4037 4038 s = splnet(); 4039 /* Stop the interface. Callouts are stopped in it. */ 4040 bge_stop(ifp, 1); 4041 splx(s); 4042 4043 mii_detach(&sc->bge_mii, MII_PHY_ANY, MII_OFFSET_ANY); 4044 4045 ether_ifdetach(ifp); 4046 if_detach(ifp); 4047 4048 /* Delete all remaining media. */ 4049 ifmedia_fini(&sc->bge_mii.mii_media); 4050 4051 bge_release_resources(sc); 4052 4053 return 0; 4054 } 4055 4056 static void 4057 bge_release_resources(struct bge_softc *sc) 4058 { 4059 4060 /* Detach sysctl */ 4061 if (sc->bge_log != NULL) 4062 sysctl_teardown(&sc->bge_log); 4063 4064 #ifdef BGE_EVENT_COUNTERS 4065 /* Detach event counters. */ 4066 evcnt_detach(&sc->bge_ev_intr); 4067 evcnt_detach(&sc->bge_ev_intr_spurious); 4068 evcnt_detach(&sc->bge_ev_intr_spurious2); 4069 evcnt_detach(&sc->bge_ev_tx_xoff); 4070 evcnt_detach(&sc->bge_ev_tx_xon); 4071 evcnt_detach(&sc->bge_ev_rx_xoff); 4072 evcnt_detach(&sc->bge_ev_rx_xon); 4073 evcnt_detach(&sc->bge_ev_rx_macctl); 4074 evcnt_detach(&sc->bge_ev_xoffentered); 4075 #endif /* BGE_EVENT_COUNTERS */ 4076 4077 /* Disestablish the interrupt handler */ 4078 if (sc->bge_intrhand != NULL) { 4079 pci_intr_disestablish(sc->sc_pc, sc->bge_intrhand); 4080 pci_intr_release(sc->sc_pc, sc->bge_pihp, 1); 4081 sc->bge_intrhand = NULL; 4082 } 4083 4084 if (sc->bge_dmatag != NULL) { 4085 bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map); 4086 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 4087 bus_dmamem_unmap(sc->bge_dmatag, (void *)sc->bge_rdata, 4088 sizeof(struct bge_ring_data)); 4089 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, 4090 sc->bge_ring_rseg); 4091 } 4092 4093 /* Unmap the device registers */ 4094 if (sc->bge_bsize != 0) { 4095 bus_space_unmap(sc->bge_btag, sc->bge_bhandle, sc->bge_bsize); 4096 sc->bge_bsize = 0; 4097 } 4098 4099 /* Unmap the APE registers */ 4100 if (sc->bge_apesize != 0) { 4101 bus_space_unmap(sc->bge_apetag, sc->bge_apehandle, 4102 sc->bge_apesize); 4103 sc->bge_apesize = 0; 4104 } 4105 } 4106 4107 static int 4108 bge_reset(struct bge_softc *sc) 4109 { 4110 uint32_t cachesize, command; 4111 uint32_t reset, mac_mode, mac_mode_mask; 4112 pcireg_t devctl, reg; 4113 int i, val; 4114 void (*write_op)(struct bge_softc *, int, int); 4115 4116 /* Make mask for BGE_MAC_MODE register. */ 4117 mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE; 4118 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) 4119 mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; 4120 /* Keep mac_mode_mask's bits of BGE_MAC_MODE register into mac_mode */ 4121 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask; 4122 4123 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) && 4124 (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)) { 4125 if (sc->bge_flags & BGEF_PCIE) 4126 write_op = bge_writemem_direct; 4127 else 4128 write_op = bge_writemem_ind; 4129 } else 4130 write_op = bge_writereg_ind; 4131 4132 /* 57XX step 4 */ 4133 /* Acquire the NVM lock */ 4134 if ((sc->bge_flags & BGEF_NO_EEPROM) == 0 && 4135 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5700 && 4136 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5701) { 4137 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 4138 for (i = 0; i < 8000; i++) { 4139 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & 4140 BGE_NVRAMSWARB_GNT1) 4141 break; 4142 DELAY(20); 4143 } 4144 if (i == 8000) { 4145 printf("%s: NVRAM lock timedout!\n", 4146 device_xname(sc->bge_dev)); 4147 } 4148 } 4149 4150 /* Take APE lock when performing reset. */ 4151 bge_ape_lock(sc, BGE_APE_LOCK_GRC); 4152 4153 /* 57XX step 3 */ 4154 /* Save some important PCI state. */ 4155 cachesize = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ); 4156 /* 5718 reset step 3 */ 4157 command = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD); 4158 4159 /* 5718 reset step 5, 57XX step 5b-5d */ 4160 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 4161 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 4162 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW); 4163 4164 /* XXX ???: Disable fastboot on controllers that support it. */ 4165 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || 4166 BGE_IS_5755_PLUS(sc)) 4167 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0); 4168 4169 /* 5718 reset step 2, 57XX step 6 */ 4170 /* 4171 * Write the magic number to SRAM at offset 0xB50. 4172 * When firmware finishes its initialization it will 4173 * write ~BGE_MAGIC_NUMBER to the same location. 4174 */ 4175 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); 4176 4177 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) { 4178 val = CSR_READ_4(sc, BGE_PCIE_LINKCTL); 4179 val = (val & ~BGE_PCIE_LINKCTL_L1_PLL_PDEN) 4180 | BGE_PCIE_LINKCTL_L1_PLL_PDDIS; 4181 CSR_WRITE_4(sc, BGE_PCIE_LINKCTL, val); 4182 } 4183 4184 /* 5718 reset step 6, 57XX step 7 */ 4185 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ; 4186 /* 4187 * XXX: from FreeBSD/Linux; no documentation 4188 */ 4189 if (sc->bge_flags & BGEF_PCIE) { 4190 if ((BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) && 4191 !BGE_IS_57765_PLUS(sc) && 4192 (CSR_READ_4(sc, BGE_PHY_TEST_CTRL_REG) == 4193 (BGE_PHY_PCIE_LTASS_MODE | BGE_PHY_PCIE_SCRAM_MODE))) { 4194 /* PCI Express 1.0 system */ 4195 CSR_WRITE_4(sc, BGE_PHY_TEST_CTRL_REG, 4196 BGE_PHY_PCIE_SCRAM_MODE); 4197 } 4198 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 4199 /* 4200 * Prevent PCI Express link training 4201 * during global reset. 4202 */ 4203 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); 4204 reset |= (1 << 29); 4205 } 4206 } 4207 4208 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 4209 i = CSR_READ_4(sc, BGE_VCPU_STATUS); 4210 CSR_WRITE_4(sc, BGE_VCPU_STATUS, 4211 i | BGE_VCPU_STATUS_DRV_RESET); 4212 i = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); 4213 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, 4214 i & ~BGE_VCPU_EXT_CTRL_HALT_CPU); 4215 } 4216 4217 /* 4218 * Set GPHY Power Down Override to leave GPHY 4219 * powered up in D0 uninitialized. 4220 */ 4221 if (BGE_IS_5705_PLUS(sc) && 4222 (sc->bge_flags & BGEF_CPMU_PRESENT) == 0) 4223 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE; 4224 4225 /* Issue global reset */ 4226 write_op(sc, BGE_MISC_CFG, reset); 4227 4228 /* 5718 reset step 7, 57XX step 8 */ 4229 if (sc->bge_flags & BGEF_PCIE) 4230 delay(100*1000); /* too big */ 4231 else 4232 delay(1000); 4233 4234 if (sc->bge_flags & BGEF_PCIE) { 4235 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 4236 DELAY(500000); 4237 /* XXX: Magic Numbers */ 4238 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 4239 BGE_PCI_UNKNOWN0); 4240 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 4241 BGE_PCI_UNKNOWN0, 4242 reg | (1 << 15)); 4243 } 4244 devctl = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 4245 sc->bge_pciecap + PCIE_DCSR); 4246 /* Clear enable no snoop and disable relaxed ordering. */ 4247 devctl &= ~(PCIE_DCSR_ENA_RELAX_ORD | 4248 PCIE_DCSR_ENA_NO_SNOOP); 4249 4250 /* Set PCIE max payload size to 128 for older PCIe devices */ 4251 if ((sc->bge_flags & BGEF_CPMU_PRESENT) == 0) 4252 devctl &= ~(0x00e0); 4253 /* Clear device status register. Write 1b to clear */ 4254 devctl |= PCIE_DCSR_URD | PCIE_DCSR_FED 4255 | PCIE_DCSR_NFED | PCIE_DCSR_CED; 4256 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 4257 sc->bge_pciecap + PCIE_DCSR, devctl); 4258 bge_set_max_readrq(sc); 4259 } 4260 4261 /* From Linux: dummy read to flush PCI posted writes */ 4262 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD); 4263 4264 /* 4265 * Reset some of the PCI state that got zapped by reset 4266 * To modify the PCISTATE register, BGE_PCIMISCCTL_PCISTATE_RW must be 4267 * set, too. 4268 */ 4269 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 4270 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 4271 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW); 4272 val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE; 4273 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 && 4274 (sc->bge_flags & BGEF_PCIX) != 0) 4275 val |= BGE_PCISTATE_RETRY_SAME_DMA; 4276 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) 4277 val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | 4278 BGE_PCISTATE_ALLOW_APE_SHMEM_WR | 4279 BGE_PCISTATE_ALLOW_APE_PSPACE_WR; 4280 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE, val); 4281 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ, cachesize); 4282 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, command); 4283 4284 /* 57xx step 11: disable PCI-X Relaxed Ordering. */ 4285 if (sc->bge_flags & BGEF_PCIX) { 4286 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap 4287 + PCIX_CMD); 4288 /* Set max memory read byte count to 2K */ 4289 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) { 4290 reg &= ~PCIX_CMD_BYTECNT_MASK; 4291 reg |= PCIX_CMD_BCNT_2048; 4292 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704){ 4293 /* 4294 * For 5704, set max outstanding split transaction 4295 * field to 0 (0 means it supports 1 request) 4296 */ 4297 reg &= ~(PCIX_CMD_SPLTRANS_MASK 4298 | PCIX_CMD_BYTECNT_MASK); 4299 reg |= PCIX_CMD_BCNT_2048; 4300 } 4301 pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap 4302 + PCIX_CMD, reg & ~PCIX_CMD_RELAXED_ORDER); 4303 } 4304 4305 /* 5718 reset step 10, 57XX step 12 */ 4306 /* Enable memory arbiter. */ 4307 if (BGE_IS_5714_FAMILY(sc)) { 4308 val = CSR_READ_4(sc, BGE_MARB_MODE); 4309 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); 4310 } else 4311 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 4312 4313 /* XXX 5721, 5751 and 5752 */ 4314 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750) { 4315 /* Step 19: */ 4316 BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, 1 << 29 | 1 << 25); 4317 /* Step 20: */ 4318 BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, BGE_TLP_DATA_FIFO_PROTECT); 4319 } 4320 4321 /* 5718 reset step 12, 57XX step 15 and 16 */ 4322 /* Fix up byte swapping */ 4323 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS); 4324 4325 /* 5718 reset step 13, 57XX step 17 */ 4326 /* Poll until the firmware initialization is complete */ 4327 bge_poll_fw(sc); 4328 4329 /* 57XX step 21 */ 4330 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_BX) { 4331 pcireg_t msidata; 4332 4333 msidata = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 4334 BGE_PCI_MSI_DATA); 4335 msidata |= ((1 << 13 | 1 << 12 | 1 << 10) << 16); 4336 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MSI_DATA, 4337 msidata); 4338 } 4339 4340 /* 57XX step 18 */ 4341 /* Write mac mode. */ 4342 val = CSR_READ_4(sc, BGE_MAC_MODE); 4343 /* Restore mac_mode_mask's bits using mac_mode */ 4344 val = (val & ~mac_mode_mask) | mac_mode; 4345 CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, val); 4346 DELAY(40); 4347 4348 bge_ape_unlock(sc, BGE_APE_LOCK_GRC); 4349 4350 /* 4351 * The 5704 in TBI mode apparently needs some special 4352 * adjustment to insure the SERDES drive level is set 4353 * to 1.2V. 4354 */ 4355 if (sc->bge_flags & BGEF_FIBER_TBI && 4356 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 4357 uint32_t serdescfg; 4358 4359 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 4360 serdescfg = (serdescfg & ~0xFFF) | 0x880; 4361 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 4362 } 4363 4364 if (sc->bge_flags & BGEF_PCIE && 4365 !BGE_IS_57765_PLUS(sc) && 4366 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 && 4367 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) { 4368 uint32_t v; 4369 4370 /* Enable PCI Express bug fix */ 4371 v = CSR_READ_4(sc, BGE_TLP_CONTROL_REG); 4372 CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG, 4373 v | BGE_TLP_DATA_FIFO_PROTECT); 4374 } 4375 4376 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 4377 BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE, 4378 CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 4379 4380 return 0; 4381 } 4382 4383 /* 4384 * Frame reception handling. This is called if there's a frame 4385 * on the receive return list. 4386 * 4387 * Note: we have to be able to handle two possibilities here: 4388 * 1) the frame is from the jumbo receive ring 4389 * 2) the frame is from the standard receive ring 4390 */ 4391 4392 static void 4393 bge_rxeof(struct bge_softc *sc) 4394 { 4395 struct ifnet *ifp; 4396 uint16_t rx_prod, rx_cons; 4397 int stdcnt = 0, jumbocnt = 0; 4398 bus_dmamap_t dmamap; 4399 bus_addr_t offset, toff; 4400 bus_size_t tlen; 4401 int tosync; 4402 4403 rx_cons = sc->bge_rx_saved_considx; 4404 rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx; 4405 4406 /* Nothing to do */ 4407 if (rx_cons == rx_prod) 4408 return; 4409 4410 ifp = &sc->ethercom.ec_if; 4411 4412 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4413 offsetof(struct bge_ring_data, bge_status_block), 4414 sizeof (struct bge_status_block), 4415 BUS_DMASYNC_POSTREAD); 4416 4417 offset = offsetof(struct bge_ring_data, bge_rx_return_ring); 4418 tosync = rx_prod - rx_cons; 4419 4420 if (tosync != 0) 4421 rnd_add_uint32(&sc->rnd_source, tosync); 4422 4423 toff = offset + (rx_cons * sizeof (struct bge_rx_bd)); 4424 4425 if (tosync < 0) { 4426 tlen = (sc->bge_return_ring_cnt - rx_cons) * 4427 sizeof (struct bge_rx_bd); 4428 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4429 toff, tlen, BUS_DMASYNC_POSTREAD); 4430 tosync = -tosync; 4431 } 4432 4433 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4434 offset, tosync * sizeof (struct bge_rx_bd), 4435 BUS_DMASYNC_POSTREAD); 4436 4437 while (rx_cons != rx_prod) { 4438 struct bge_rx_bd *cur_rx; 4439 uint32_t rxidx; 4440 struct mbuf *m = NULL; 4441 4442 cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons]; 4443 4444 rxidx = cur_rx->bge_idx; 4445 BGE_INC(rx_cons, sc->bge_return_ring_cnt); 4446 4447 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 4448 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 4449 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 4450 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 4451 jumbocnt++; 4452 bus_dmamap_sync(sc->bge_dmatag, 4453 sc->bge_cdata.bge_rx_jumbo_map, 4454 mtod(m, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, 4455 BGE_JLEN, BUS_DMASYNC_POSTREAD); 4456 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 4457 if_statinc(ifp, if_ierrors); 4458 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 4459 continue; 4460 } 4461 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 4462 NULL)== ENOBUFS) { 4463 if_statinc(ifp, if_ierrors); 4464 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 4465 continue; 4466 } 4467 } else { 4468 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 4469 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 4470 4471 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 4472 stdcnt++; 4473 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; 4474 sc->bge_cdata.bge_rx_std_map[rxidx] = NULL; 4475 if (dmamap == NULL) { 4476 if_statinc(ifp, if_ierrors); 4477 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 4478 continue; 4479 } 4480 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, 4481 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 4482 bus_dmamap_unload(sc->bge_dmatag, dmamap); 4483 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 4484 if_statinc(ifp, if_ierrors); 4485 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 4486 continue; 4487 } 4488 if (bge_newbuf_std(sc, sc->bge_std, 4489 NULL, dmamap) == ENOBUFS) { 4490 if_statinc(ifp, if_ierrors); 4491 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 4492 continue; 4493 } 4494 } 4495 4496 #ifndef __NO_STRICT_ALIGNMENT 4497 /* 4498 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect, 4499 * the Rx buffer has the layer-2 header unaligned. 4500 * If our CPU requires alignment, re-align by copying. 4501 */ 4502 if (sc->bge_flags & BGEF_RX_ALIGNBUG) { 4503 memmove(mtod(m, char *) + ETHER_ALIGN, m->m_data, 4504 cur_rx->bge_len); 4505 m->m_data += ETHER_ALIGN; 4506 } 4507 #endif 4508 4509 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 4510 m_set_rcvif(m, ifp); 4511 4512 bge_rxcsum(sc, cur_rx, m); 4513 4514 /* 4515 * If we received a packet with a vlan tag, pass it 4516 * to vlan_input() instead of ether_input(). 4517 */ 4518 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) 4519 vlan_set_tag(m, cur_rx->bge_vlan_tag); 4520 4521 if_percpuq_enqueue(ifp->if_percpuq, m); 4522 } 4523 4524 sc->bge_rx_saved_considx = rx_cons; 4525 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 4526 if (stdcnt) 4527 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 4528 if (jumbocnt) 4529 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 4530 } 4531 4532 static void 4533 bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m) 4534 { 4535 4536 if (BGE_IS_57765_PLUS(sc)) { 4537 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) { 4538 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) != 0) 4539 m->m_pkthdr.csum_flags = M_CSUM_IPv4; 4540 if ((cur_rx->bge_error_flag & 4541 BGE_RXERRFLAG_IP_CSUM_NOK) != 0) 4542 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 4543 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 4544 m->m_pkthdr.csum_data = 4545 cur_rx->bge_tcp_udp_csum; 4546 m->m_pkthdr.csum_flags |= 4547 (M_CSUM_TCPv4 | M_CSUM_UDPv4 |M_CSUM_DATA); 4548 } 4549 } 4550 } else { 4551 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) != 0) 4552 m->m_pkthdr.csum_flags = M_CSUM_IPv4; 4553 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0) 4554 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 4555 /* 4556 * Rx transport checksum-offload may also 4557 * have bugs with packets which, when transmitted, 4558 * were `runts' requiring padding. 4559 */ 4560 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 4561 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/ 4562 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) { 4563 m->m_pkthdr.csum_data = 4564 cur_rx->bge_tcp_udp_csum; 4565 m->m_pkthdr.csum_flags |= 4566 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_DATA); 4567 } 4568 } 4569 } 4570 4571 static void 4572 bge_txeof(struct bge_softc *sc) 4573 { 4574 struct bge_tx_bd *cur_tx = NULL; 4575 struct ifnet *ifp; 4576 struct txdmamap_pool_entry *dma; 4577 bus_addr_t offset, toff; 4578 bus_size_t tlen; 4579 int tosync; 4580 struct mbuf *m; 4581 4582 ifp = &sc->ethercom.ec_if; 4583 4584 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4585 offsetof(struct bge_ring_data, bge_status_block), 4586 sizeof (struct bge_status_block), 4587 BUS_DMASYNC_POSTREAD); 4588 4589 offset = offsetof(struct bge_ring_data, bge_tx_ring); 4590 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx - 4591 sc->bge_tx_saved_considx; 4592 4593 if (tosync != 0) 4594 rnd_add_uint32(&sc->rnd_source, tosync); 4595 4596 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd)); 4597 4598 if (tosync < 0) { 4599 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) * 4600 sizeof (struct bge_tx_bd); 4601 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4602 toff, tlen, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 4603 tosync = -tosync; 4604 } 4605 4606 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4607 offset, tosync * sizeof (struct bge_tx_bd), 4608 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 4609 4610 /* 4611 * Go through our tx ring and free mbufs for those 4612 * frames that have been sent. 4613 */ 4614 while (sc->bge_tx_saved_considx != 4615 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 4616 uint32_t idx = 0; 4617 4618 idx = sc->bge_tx_saved_considx; 4619 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 4620 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 4621 if_statinc(ifp, if_opackets); 4622 m = sc->bge_cdata.bge_tx_chain[idx]; 4623 if (m != NULL) { 4624 sc->bge_cdata.bge_tx_chain[idx] = NULL; 4625 dma = sc->txdma[idx]; 4626 if (dma->is_dma32) { 4627 bus_dmamap_sync(sc->bge_dmatag32, dma->dmamap32, 4628 0, dma->dmamap32->dm_mapsize, 4629 BUS_DMASYNC_POSTWRITE); 4630 bus_dmamap_unload( 4631 sc->bge_dmatag32, dma->dmamap32); 4632 } else { 4633 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 4634 0, dma->dmamap->dm_mapsize, 4635 BUS_DMASYNC_POSTWRITE); 4636 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap); 4637 } 4638 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 4639 sc->txdma[idx] = NULL; 4640 4641 m_freem(m); 4642 } 4643 sc->bge_txcnt--; 4644 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 4645 ifp->if_timer = 0; 4646 } 4647 4648 if (cur_tx != NULL) 4649 ifp->if_flags &= ~IFF_OACTIVE; 4650 } 4651 4652 static int 4653 bge_intr(void *xsc) 4654 { 4655 struct bge_softc *sc; 4656 struct ifnet *ifp; 4657 uint32_t pcistate, statusword, statustag; 4658 uint32_t intrmask = BGE_PCISTATE_INTR_NOT_ACTIVE; 4659 4660 sc = xsc; 4661 ifp = &sc->ethercom.ec_if; 4662 4663 /* 5717 and newer chips have no BGE_PCISTATE_INTR_NOT_ACTIVE bit */ 4664 if (BGE_IS_5717_PLUS(sc)) 4665 intrmask = 0; 4666 4667 /* It is possible for the interrupt to arrive before 4668 * the status block is updated prior to the interrupt. 4669 * Reading the PCI State register will confirm whether the 4670 * interrupt is ours and will flush the status block. 4671 */ 4672 pcistate = CSR_READ_4(sc, BGE_PCI_PCISTATE); 4673 4674 /* read status word from status block */ 4675 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4676 offsetof(struct bge_ring_data, bge_status_block), 4677 sizeof (struct bge_status_block), 4678 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 4679 statusword = sc->bge_rdata->bge_status_block.bge_status; 4680 statustag = sc->bge_rdata->bge_status_block.bge_status_tag << 24; 4681 4682 if (sc->bge_flags & BGEF_TAGGED_STATUS) { 4683 if (sc->bge_lasttag == statustag && 4684 (~pcistate & intrmask)) { 4685 BGE_EVCNT_INCR(sc->bge_ev_intr_spurious); 4686 return (0); 4687 } 4688 sc->bge_lasttag = statustag; 4689 } else { 4690 if (!(statusword & BGE_STATFLAG_UPDATED) && 4691 !(~pcistate & intrmask)) { 4692 BGE_EVCNT_INCR(sc->bge_ev_intr_spurious2); 4693 return (0); 4694 } 4695 statustag = 0; 4696 } 4697 /* Ack interrupt and stop others from occurring. */ 4698 bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 1); 4699 BGE_EVCNT_INCR(sc->bge_ev_intr); 4700 4701 /* clear status word */ 4702 sc->bge_rdata->bge_status_block.bge_status = 0; 4703 4704 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4705 offsetof(struct bge_ring_data, bge_status_block), 4706 sizeof (struct bge_status_block), 4707 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4708 4709 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 4710 statusword & BGE_STATFLAG_LINKSTATE_CHANGED || 4711 BGE_STS_BIT(sc, BGE_STS_LINK_EVT)) 4712 bge_link_upd(sc); 4713 4714 if (ifp->if_flags & IFF_RUNNING) { 4715 /* Check RX return ring producer/consumer */ 4716 bge_rxeof(sc); 4717 4718 /* Check TX ring producer/consumer */ 4719 bge_txeof(sc); 4720 } 4721 4722 if (sc->bge_pending_rxintr_change) { 4723 uint32_t rx_ticks = sc->bge_rx_coal_ticks; 4724 uint32_t rx_bds = sc->bge_rx_max_coal_bds; 4725 4726 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks); 4727 DELAY(10); 4728 (void)CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); 4729 4730 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds); 4731 DELAY(10); 4732 (void)CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); 4733 4734 sc->bge_pending_rxintr_change = 0; 4735 } 4736 bge_handle_events(sc); 4737 4738 /* Re-enable interrupts. */ 4739 bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, statustag); 4740 4741 if (ifp->if_flags & IFF_RUNNING) 4742 if_schedule_deferred_start(ifp); 4743 4744 return 1; 4745 } 4746 4747 static void 4748 bge_asf_driver_up(struct bge_softc *sc) 4749 { 4750 if (sc->bge_asf_mode & ASF_STACKUP) { 4751 /* Send ASF heartbeat aprox. every 2s */ 4752 if (sc->bge_asf_count) 4753 sc->bge_asf_count --; 4754 else { 4755 sc->bge_asf_count = 2; 4756 4757 bge_wait_for_event_ack(sc); 4758 4759 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, 4760 BGE_FW_CMD_DRV_ALIVE3); 4761 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4); 4762 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB, 4763 BGE_FW_HB_TIMEOUT_SEC); 4764 CSR_WRITE_4_FLUSH(sc, BGE_RX_CPU_EVENT, 4765 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | 4766 BGE_RX_CPU_DRV_EVENT); 4767 } 4768 } 4769 } 4770 4771 static void 4772 bge_tick(void *xsc) 4773 { 4774 struct bge_softc *sc = xsc; 4775 struct mii_data *mii = &sc->bge_mii; 4776 int s; 4777 4778 s = splnet(); 4779 4780 if (BGE_IS_5705_PLUS(sc)) 4781 bge_stats_update_regs(sc); 4782 else 4783 bge_stats_update(sc); 4784 4785 if (sc->bge_flags & BGEF_FIBER_TBI) { 4786 /* 4787 * Since in TBI mode auto-polling can't be used we should poll 4788 * link status manually. Here we register pending link event 4789 * and trigger interrupt. 4790 */ 4791 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT); 4792 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 4793 } else { 4794 /* 4795 * Do not touch PHY if we have link up. This could break 4796 * IPMI/ASF mode or produce extra input errors. 4797 * (extra input errors was reported for bcm5701 & bcm5704). 4798 */ 4799 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) 4800 mii_tick(mii); 4801 } 4802 4803 bge_asf_driver_up(sc); 4804 4805 if (!sc->bge_detaching) 4806 callout_schedule(&sc->bge_timeout, hz); 4807 4808 splx(s); 4809 } 4810 4811 static void 4812 bge_stats_update_regs(struct bge_softc *sc) 4813 { 4814 struct ifnet *ifp = &sc->ethercom.ec_if; 4815 4816 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 4817 4818 if_statadd_ref(nsr, if_collisions, 4819 CSR_READ_4(sc, BGE_MAC_STATS + 4820 offsetof(struct bge_mac_stats_regs, etherStatsCollisions))); 4821 4822 /* 4823 * On BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0, 4824 * RXLP_LOCSTAT_IFIN_DROPS includes unwanted multicast frames 4825 * (silicon bug). There's no reliable workaround so just 4826 * ignore the counter 4827 */ 4828 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 && 4829 sc->bge_chipid != BGE_CHIPID_BCM5719_A0 && 4830 sc->bge_chipid != BGE_CHIPID_BCM5720_A0) { 4831 if_statadd_ref(nsr, if_ierrors, 4832 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS)); 4833 } 4834 if_statadd_ref(nsr, if_ierrors, 4835 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS)); 4836 if_statadd_ref(nsr, if_ierrors, 4837 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS)); 4838 4839 IF_STAT_PUTREF(ifp); 4840 4841 if (sc->bge_flags & BGEF_RDMA_BUG) { 4842 uint32_t val, ucast, mcast, bcast; 4843 4844 ucast = CSR_READ_4(sc, BGE_MAC_STATS + 4845 offsetof(struct bge_mac_stats_regs, ifHCOutUcastPkts)); 4846 mcast = CSR_READ_4(sc, BGE_MAC_STATS + 4847 offsetof(struct bge_mac_stats_regs, ifHCOutMulticastPkts)); 4848 bcast = CSR_READ_4(sc, BGE_MAC_STATS + 4849 offsetof(struct bge_mac_stats_regs, ifHCOutBroadcastPkts)); 4850 4851 /* 4852 * If controller transmitted more than BGE_NUM_RDMA_CHANNELS 4853 * frames, it's safe to disable workaround for DMA engine's 4854 * miscalculation of TXMBUF space. 4855 */ 4856 if (ucast + mcast + bcast > BGE_NUM_RDMA_CHANNELS) { 4857 val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL); 4858 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) 4859 val &= ~BGE_RDMA_TX_LENGTH_WA_5719; 4860 else 4861 val &= ~BGE_RDMA_TX_LENGTH_WA_5720; 4862 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val); 4863 sc->bge_flags &= ~BGEF_RDMA_BUG; 4864 } 4865 } 4866 } 4867 4868 static void 4869 bge_stats_update(struct bge_softc *sc) 4870 { 4871 struct ifnet *ifp = &sc->ethercom.ec_if; 4872 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 4873 4874 #define READ_STAT(sc, stats, stat) \ 4875 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 4876 4877 uint64_t collisions = 4878 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) + 4879 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) + 4880 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) + 4881 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)); 4882 4883 if_statadd(ifp, if_collisions, collisions - sc->bge_if_collisions); 4884 sc->bge_if_collisions = collisions; 4885 4886 4887 BGE_EVCNT_UPD(sc->bge_ev_tx_xoff, 4888 READ_STAT(sc, stats, outXoffSent.bge_addr_lo)); 4889 BGE_EVCNT_UPD(sc->bge_ev_tx_xon, 4890 READ_STAT(sc, stats, outXonSent.bge_addr_lo)); 4891 BGE_EVCNT_UPD(sc->bge_ev_rx_xoff, 4892 READ_STAT(sc, stats, 4893 xoffPauseFramesReceived.bge_addr_lo)); 4894 BGE_EVCNT_UPD(sc->bge_ev_rx_xon, 4895 READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo)); 4896 BGE_EVCNT_UPD(sc->bge_ev_rx_macctl, 4897 READ_STAT(sc, stats, 4898 macControlFramesReceived.bge_addr_lo)); 4899 BGE_EVCNT_UPD(sc->bge_ev_xoffentered, 4900 READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo)); 4901 4902 #undef READ_STAT 4903 4904 #ifdef notdef 4905 ifp->if_collisions += 4906 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 4907 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 4908 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 4909 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 4910 ifp->if_collisions; 4911 #endif 4912 } 4913 4914 /* 4915 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 4916 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 4917 * but when such padded frames employ the bge IP/TCP checksum offload, 4918 * the hardware checksum assist gives incorrect results (possibly 4919 * from incorporating its own padding into the UDP/TCP checksum; who knows). 4920 * If we pad such runts with zeros, the onboard checksum comes out correct. 4921 */ 4922 static inline int 4923 bge_cksum_pad(struct mbuf *pkt) 4924 { 4925 struct mbuf *last = NULL; 4926 int padlen; 4927 4928 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len; 4929 4930 /* if there's only the packet-header and we can pad there, use it. */ 4931 if (pkt->m_pkthdr.len == pkt->m_len && 4932 M_TRAILINGSPACE(pkt) >= padlen) { 4933 last = pkt; 4934 } else { 4935 /* 4936 * Walk packet chain to find last mbuf. We will either 4937 * pad there, or append a new mbuf and pad it 4938 * (thus perhaps avoiding the bcm5700 dma-min bug). 4939 */ 4940 for (last = pkt; last->m_next != NULL; last = last->m_next) { 4941 continue; /* do nothing */ 4942 } 4943 4944 /* `last' now points to last in chain. */ 4945 if (M_TRAILINGSPACE(last) < padlen) { 4946 /* Allocate new empty mbuf, pad it. Compact later. */ 4947 struct mbuf *n; 4948 MGET(n, M_DONTWAIT, MT_DATA); 4949 if (n == NULL) 4950 return ENOBUFS; 4951 n->m_len = 0; 4952 last->m_next = n; 4953 last = n; 4954 } 4955 } 4956 4957 KDASSERT(!M_READONLY(last)); 4958 KDASSERT(M_TRAILINGSPACE(last) >= padlen); 4959 4960 /* Now zero the pad area, to avoid the bge cksum-assist bug */ 4961 memset(mtod(last, char *) + last->m_len, 0, padlen); 4962 last->m_len += padlen; 4963 pkt->m_pkthdr.len += padlen; 4964 return 0; 4965 } 4966 4967 /* 4968 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes. 4969 */ 4970 static inline int 4971 bge_compact_dma_runt(struct mbuf *pkt) 4972 { 4973 struct mbuf *m, *prev; 4974 int totlen; 4975 4976 prev = NULL; 4977 totlen = 0; 4978 4979 for (m = pkt; m != NULL; prev = m, m = m->m_next) { 4980 int mlen = m->m_len; 4981 int shortfall = 8 - mlen ; 4982 4983 totlen += mlen; 4984 if (mlen == 0) 4985 continue; 4986 if (mlen >= 8) 4987 continue; 4988 4989 /* If we get here, mbuf data is too small for DMA engine. 4990 * Try to fix by shuffling data to prev or next in chain. 4991 * If that fails, do a compacting deep-copy of the whole chain. 4992 */ 4993 4994 /* Internal frag. If fits in prev, copy it there. */ 4995 if (prev && M_TRAILINGSPACE(prev) >= m->m_len) { 4996 memcpy(prev->m_data + prev->m_len, m->m_data, mlen); 4997 prev->m_len += mlen; 4998 m->m_len = 0; 4999 /* XXX stitch chain */ 5000 prev->m_next = m_free(m); 5001 m = prev; 5002 continue; 5003 } else if (m->m_next != NULL && 5004 M_TRAILINGSPACE(m) >= shortfall && 5005 m->m_next->m_len >= (8 + shortfall)) { 5006 /* m is writable and have enough data in next, pull up. */ 5007 5008 memcpy(m->m_data + m->m_len, m->m_next->m_data, 5009 shortfall); 5010 m->m_len += shortfall; 5011 m->m_next->m_len -= shortfall; 5012 m->m_next->m_data += shortfall; 5013 } else if (m->m_next == NULL || 1) { 5014 /* Got a runt at the very end of the packet. 5015 * borrow data from the tail of the preceding mbuf and 5016 * update its length in-place. (The original data is 5017 * still valid, so we can do this even if prev is not 5018 * writable.) 5019 */ 5020 5021 /* 5022 * If we'd make prev a runt, just move all of its data. 5023 */ 5024 KASSERT(prev != NULL /*, ("runt but null PREV")*/); 5025 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/); 5026 5027 if ((prev->m_len - shortfall) < 8) 5028 shortfall = prev->m_len; 5029 5030 #ifdef notyet /* just do the safe slow thing for now */ 5031 if (!M_READONLY(m)) { 5032 if (M_LEADINGSPACE(m) < shorfall) { 5033 void *m_dat; 5034 m_dat = M_BUFADDR(m); 5035 memmove(m_dat, mtod(m, void*), 5036 m->m_len); 5037 m->m_data = m_dat; 5038 } 5039 } else 5040 #endif /* just do the safe slow thing */ 5041 { 5042 struct mbuf * n = NULL; 5043 int newprevlen = prev->m_len - shortfall; 5044 5045 MGET(n, M_NOWAIT, MT_DATA); 5046 if (n == NULL) 5047 return ENOBUFS; 5048 KASSERT(m->m_len + shortfall < MLEN 5049 /*, 5050 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/); 5051 5052 /* first copy the data we're stealing from prev */ 5053 memcpy(n->m_data, prev->m_data + newprevlen, 5054 shortfall); 5055 5056 /* update prev->m_len accordingly */ 5057 prev->m_len -= shortfall; 5058 5059 /* copy data from runt m */ 5060 memcpy(n->m_data + shortfall, m->m_data, 5061 m->m_len); 5062 5063 /* n holds what we stole from prev, plus m */ 5064 n->m_len = shortfall + m->m_len; 5065 5066 /* stitch n into chain and free m */ 5067 n->m_next = m->m_next; 5068 prev->m_next = n; 5069 /* KASSERT(m->m_next == NULL); */ 5070 m->m_next = NULL; 5071 m_free(m); 5072 m = n; /* for continuing loop */ 5073 } 5074 } 5075 } 5076 return 0; 5077 } 5078 5079 /* 5080 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 5081 * pointers to descriptors. 5082 */ 5083 static int 5084 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx) 5085 { 5086 struct ifnet *ifp = &sc->ethercom.ec_if; 5087 struct bge_tx_bd *f, *prev_f; 5088 uint32_t frag, cur; 5089 uint16_t csum_flags = 0; 5090 uint16_t txbd_tso_flags = 0; 5091 struct txdmamap_pool_entry *dma; 5092 bus_dmamap_t dmamap; 5093 bus_dma_tag_t dmatag; 5094 int i = 0; 5095 int use_tso, maxsegsize, error; 5096 bool have_vtag; 5097 uint16_t vtag; 5098 bool remap; 5099 5100 if (m_head->m_pkthdr.csum_flags) { 5101 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4) 5102 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 5103 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4 |M_CSUM_UDPv4)) 5104 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 5105 } 5106 5107 /* 5108 * If we were asked to do an outboard checksum, and the NIC 5109 * has the bug where it sometimes adds in the Ethernet padding, 5110 * explicitly pad with zeros so the cksum will be correct either way. 5111 * (For now, do this for all chip versions, until newer 5112 * are confirmed to not require the workaround.) 5113 */ 5114 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 || 5115 #ifdef notyet 5116 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 || 5117 #endif 5118 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD) 5119 goto check_dma_bug; 5120 5121 if (bge_cksum_pad(m_head) != 0) 5122 return ENOBUFS; 5123 5124 check_dma_bug: 5125 if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)) 5126 goto doit; 5127 5128 /* 5129 * bcm5700 Revision B silicon cannot handle DMA descriptors with 5130 * less than eight bytes. If we encounter a teeny mbuf 5131 * at the end of a chain, we can pad. Otherwise, copy. 5132 */ 5133 if (bge_compact_dma_runt(m_head) != 0) 5134 return ENOBUFS; 5135 5136 doit: 5137 dma = SLIST_FIRST(&sc->txdma_list); 5138 if (dma == NULL) { 5139 ifp->if_flags |= IFF_OACTIVE; 5140 return ENOBUFS; 5141 } 5142 dmamap = dma->dmamap; 5143 dmatag = sc->bge_dmatag; 5144 dma->is_dma32 = false; 5145 5146 /* 5147 * Set up any necessary TSO state before we start packing... 5148 */ 5149 use_tso = (m_head->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 5150 if (!use_tso) { 5151 maxsegsize = 0; 5152 } else { /* TSO setup */ 5153 unsigned mss; 5154 struct ether_header *eh; 5155 unsigned ip_tcp_hlen, iptcp_opt_words, tcp_seg_flags, offset; 5156 unsigned bge_hlen; 5157 struct mbuf * m0 = m_head; 5158 struct ip *ip; 5159 struct tcphdr *th; 5160 int iphl, hlen; 5161 5162 /* 5163 * XXX It would be nice if the mbuf pkthdr had offset 5164 * fields for the protocol headers. 5165 */ 5166 5167 eh = mtod(m0, struct ether_header *); 5168 switch (htons(eh->ether_type)) { 5169 case ETHERTYPE_IP: 5170 offset = ETHER_HDR_LEN; 5171 break; 5172 5173 case ETHERTYPE_VLAN: 5174 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 5175 break; 5176 5177 default: 5178 /* 5179 * Don't support this protocol or encapsulation. 5180 */ 5181 return ENOBUFS; 5182 } 5183 5184 /* 5185 * TCP/IP headers are in the first mbuf; we can do 5186 * this the easy way. 5187 */ 5188 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 5189 hlen = iphl + offset; 5190 if (__predict_false(m0->m_len < 5191 (hlen + sizeof(struct tcphdr)))) { 5192 5193 aprint_error_dev(sc->bge_dev, 5194 "TSO: hard case m0->m_len == %d < ip/tcp hlen %zd," 5195 "not handled yet\n", 5196 m0->m_len, hlen+ sizeof(struct tcphdr)); 5197 #ifdef NOTYET 5198 /* 5199 * XXX jonathan@NetBSD.org: untested. 5200 * how to force this branch to be taken? 5201 */ 5202 BGE_EVCNT_INCR(sc->bge_ev_txtsopain); 5203 5204 m_copydata(m0, offset, sizeof(ip), &ip); 5205 m_copydata(m0, hlen, sizeof(th), &th); 5206 5207 ip.ip_len = 0; 5208 5209 m_copyback(m0, hlen + offsetof(struct ip, ip_len), 5210 sizeof(ip.ip_len), &ip.ip_len); 5211 5212 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 5213 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 5214 5215 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 5216 sizeof(th.th_sum), &th.th_sum); 5217 5218 hlen += th.th_off << 2; 5219 iptcp_opt_words = hlen; 5220 #else 5221 /* 5222 * if_wm "hard" case not yet supported, can we not 5223 * mandate it out of existence? 5224 */ 5225 (void) ip; (void)th; (void) ip_tcp_hlen; 5226 5227 return ENOBUFS; 5228 #endif 5229 } else { 5230 ip = (struct ip *) (mtod(m0, char *) + offset); 5231 th = (struct tcphdr *) (mtod(m0, char *) + hlen); 5232 ip_tcp_hlen = iphl + (th->th_off << 2); 5233 5234 /* Total IP/TCP options, in 32-bit words */ 5235 iptcp_opt_words = (ip_tcp_hlen 5236 - sizeof(struct tcphdr) 5237 - sizeof(struct ip)) >> 2; 5238 } 5239 if (BGE_IS_575X_PLUS(sc)) { 5240 th->th_sum = 0; 5241 csum_flags = 0; 5242 } else { 5243 /* 5244 * XXX jonathan@NetBSD.org: 5705 untested. 5245 * Requires TSO firmware patch for 5701/5703/5704. 5246 */ 5247 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 5248 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 5249 } 5250 5251 mss = m_head->m_pkthdr.segsz; 5252 txbd_tso_flags |= 5253 BGE_TXBDFLAG_CPU_PRE_DMA | 5254 BGE_TXBDFLAG_CPU_POST_DMA; 5255 5256 /* 5257 * Our NIC TSO-assist assumes TSO has standard, optionless 5258 * IPv4 and TCP headers, which total 40 bytes. By default, 5259 * the NIC copies 40 bytes of IP/TCP header from the 5260 * supplied header into the IP/TCP header portion of 5261 * each post-TSO-segment. If the supplied packet has IP or 5262 * TCP options, we need to tell the NIC to copy those extra 5263 * bytes into each post-TSO header, in addition to the normal 5264 * 40-byte IP/TCP header (and to leave space accordingly). 5265 * Unfortunately, the driver encoding of option length 5266 * varies across different ASIC families. 5267 */ 5268 tcp_seg_flags = 0; 5269 bge_hlen = ip_tcp_hlen >> 2; 5270 if (BGE_IS_5717_PLUS(sc)) { 5271 tcp_seg_flags = (bge_hlen & 0x3) << 14; 5272 txbd_tso_flags |= 5273 ((bge_hlen & 0xF8) << 7) | ((bge_hlen & 0x4) << 2); 5274 } else if (BGE_IS_5705_PLUS(sc)) { 5275 tcp_seg_flags = bge_hlen << 11; 5276 } else { 5277 /* XXX iptcp_opt_words or bge_hlen ? */ 5278 txbd_tso_flags |= iptcp_opt_words << 12; 5279 } 5280 maxsegsize = mss | tcp_seg_flags; 5281 ip->ip_len = htons(mss + ip_tcp_hlen); 5282 ip->ip_sum = 0; 5283 5284 } /* TSO setup */ 5285 5286 have_vtag = vlan_has_tag(m_head); 5287 if (have_vtag) 5288 vtag = vlan_get_tag(m_head); 5289 5290 /* 5291 * Start packing the mbufs in this chain into 5292 * the fragment pointers. Stop when we run out 5293 * of fragments or hit the end of the mbuf chain. 5294 */ 5295 remap = true; 5296 load_again: 5297 error = bus_dmamap_load_mbuf(dmatag, dmamap, m_head, BUS_DMA_NOWAIT); 5298 if (__predict_false(error)) { 5299 if (error == EFBIG && remap) { 5300 struct mbuf *m; 5301 remap = false; 5302 m = m_defrag(m_head, M_NOWAIT); 5303 if (m != NULL) { 5304 KASSERT(m == m_head); 5305 goto load_again; 5306 } 5307 } 5308 return error; 5309 } 5310 /* 5311 * Sanity check: avoid coming within 16 descriptors 5312 * of the end of the ring. 5313 */ 5314 if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) { 5315 BGE_TSO_PRINTF(("%s: " 5316 " dmamap_load_mbuf too close to ring wrap\n", 5317 device_xname(sc->bge_dev))); 5318 goto fail_unload; 5319 } 5320 5321 /* Iterate over dmap-map fragments. */ 5322 f = prev_f = NULL; 5323 cur = frag = *txidx; 5324 5325 for (i = 0; i < dmamap->dm_nsegs; i++) { 5326 f = &sc->bge_rdata->bge_tx_ring[frag]; 5327 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 5328 break; 5329 5330 BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr); 5331 f->bge_len = dmamap->dm_segs[i].ds_len; 5332 if (sizeof(bus_addr_t) > 4 && dma->is_dma32 == false && use_tso && ( 5333 (dmamap->dm_segs[i].ds_addr & 0xffffffff00000000) != 5334 ((dmamap->dm_segs[i].ds_addr + f->bge_len) & 0xffffffff00000000) || 5335 (prev_f != NULL && 5336 prev_f->bge_addr.bge_addr_hi != f->bge_addr.bge_addr_hi)) 5337 ) { 5338 /* 5339 * watchdog timeout issue was observed with TSO, 5340 * limiting DMA address space to 32bits seems to 5341 * address the issue. 5342 */ 5343 bus_dmamap_unload(dmatag, dmamap); 5344 dmatag = sc->bge_dmatag32; 5345 dmamap = dma->dmamap32; 5346 dma->is_dma32 = true; 5347 remap = true; 5348 goto load_again; 5349 } 5350 5351 /* 5352 * For 5751 and follow-ons, for TSO we must turn 5353 * off checksum-assist flag in the tx-descr, and 5354 * supply the ASIC-revision-specific encoding 5355 * of TSO flags and segsize. 5356 */ 5357 if (use_tso) { 5358 if (BGE_IS_575X_PLUS(sc) || i == 0) { 5359 f->bge_rsvd = maxsegsize; 5360 f->bge_flags = csum_flags | txbd_tso_flags; 5361 } else { 5362 f->bge_rsvd = 0; 5363 f->bge_flags = 5364 (csum_flags | txbd_tso_flags) & 0x0fff; 5365 } 5366 } else { 5367 f->bge_rsvd = 0; 5368 f->bge_flags = csum_flags; 5369 } 5370 5371 if (have_vtag) { 5372 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 5373 f->bge_vlan_tag = vtag; 5374 } else { 5375 f->bge_vlan_tag = 0; 5376 } 5377 prev_f = f; 5378 cur = frag; 5379 BGE_INC(frag, BGE_TX_RING_CNT); 5380 } 5381 5382 if (i < dmamap->dm_nsegs) { 5383 BGE_TSO_PRINTF(("%s: reached %d < dm_nsegs %d\n", 5384 device_xname(sc->bge_dev), i, dmamap->dm_nsegs)); 5385 goto fail_unload; 5386 } 5387 5388 bus_dmamap_sync(dmatag, dmamap, 0, dmamap->dm_mapsize, 5389 BUS_DMASYNC_PREWRITE); 5390 5391 if (frag == sc->bge_tx_saved_considx) { 5392 BGE_TSO_PRINTF(("%s: frag %d = wrapped id %d?\n", 5393 device_xname(sc->bge_dev), frag, sc->bge_tx_saved_considx)); 5394 5395 goto fail_unload; 5396 } 5397 5398 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 5399 sc->bge_cdata.bge_tx_chain[cur] = m_head; 5400 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 5401 sc->txdma[cur] = dma; 5402 sc->bge_txcnt += dmamap->dm_nsegs; 5403 5404 *txidx = frag; 5405 5406 return 0; 5407 5408 fail_unload: 5409 bus_dmamap_unload(dmatag, dmamap); 5410 ifp->if_flags |= IFF_OACTIVE; 5411 5412 return ENOBUFS; 5413 } 5414 5415 /* 5416 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 5417 * to the mbuf data regions directly in the transmit descriptors. 5418 */ 5419 static void 5420 bge_start(struct ifnet *ifp) 5421 { 5422 struct bge_softc *sc; 5423 struct mbuf *m_head = NULL; 5424 struct mbuf *m; 5425 uint32_t prodidx; 5426 int pkts = 0; 5427 int error; 5428 5429 sc = ifp->if_softc; 5430 5431 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 5432 return; 5433 5434 prodidx = sc->bge_tx_prodidx; 5435 5436 while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 5437 IFQ_POLL(&ifp->if_snd, m_head); 5438 if (m_head == NULL) 5439 break; 5440 5441 #if 0 5442 /* 5443 * XXX 5444 * safety overkill. If this is a fragmented packet chain 5445 * with delayed TCP/UDP checksums, then only encapsulate 5446 * it if we have enough descriptors to handle the entire 5447 * chain at once. 5448 * (paranoia -- may not actually be needed) 5449 */ 5450 if (m_head->m_flags & M_FIRSTFRAG && 5451 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 5452 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 5453 M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) { 5454 ifp->if_flags |= IFF_OACTIVE; 5455 break; 5456 } 5457 } 5458 #endif 5459 5460 /* 5461 * Pack the data into the transmit ring. If we 5462 * don't have room, set the OACTIVE flag and wait 5463 * for the NIC to drain the ring. 5464 */ 5465 error = bge_encap(sc, m_head, &prodidx); 5466 if (__predict_false(error)) { 5467 if (ifp->if_flags & IFF_OACTIVE) { 5468 /* just wait for the transmit ring to drain */ 5469 break; 5470 } 5471 IFQ_DEQUEUE(&ifp->if_snd, m); 5472 KASSERT(m == m_head); 5473 m_freem(m_head); 5474 continue; 5475 } 5476 5477 /* now we are committed to transmit the packet */ 5478 IFQ_DEQUEUE(&ifp->if_snd, m); 5479 KASSERT(m == m_head); 5480 pkts++; 5481 5482 /* 5483 * If there's a BPF listener, bounce a copy of this frame 5484 * to him. 5485 */ 5486 bpf_mtap(ifp, m_head, BPF_D_OUT); 5487 } 5488 if (pkts == 0) 5489 return; 5490 5491 /* Transmit */ 5492 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 5493 /* 5700 b2 errata */ 5494 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 5495 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 5496 5497 sc->bge_tx_prodidx = prodidx; 5498 5499 /* 5500 * Set a timeout in case the chip goes out to lunch. 5501 */ 5502 ifp->if_timer = 5; 5503 } 5504 5505 static int 5506 bge_init(struct ifnet *ifp) 5507 { 5508 struct bge_softc *sc = ifp->if_softc; 5509 const uint16_t *m; 5510 uint32_t mode, reg; 5511 int s, error = 0; 5512 5513 s = splnet(); 5514 5515 ifp = &sc->ethercom.ec_if; 5516 5517 /* Cancel pending I/O and flush buffers. */ 5518 bge_stop(ifp, 0); 5519 5520 bge_stop_fw(sc); 5521 bge_sig_pre_reset(sc, BGE_RESET_START); 5522 bge_reset(sc); 5523 bge_sig_legacy(sc, BGE_RESET_START); 5524 5525 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) { 5526 reg = CSR_READ_4(sc, BGE_CPMU_CTRL); 5527 reg &= ~(BGE_CPMU_CTRL_LINK_AWARE_MODE | 5528 BGE_CPMU_CTRL_LINK_IDLE_MODE); 5529 CSR_WRITE_4(sc, BGE_CPMU_CTRL, reg); 5530 5531 reg = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK); 5532 reg &= ~BGE_CPMU_LSPD_10MB_CLK; 5533 reg |= BGE_CPMU_LSPD_10MB_MACCLK_6_25; 5534 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, reg); 5535 5536 reg = CSR_READ_4(sc, BGE_CPMU_LNK_AWARE_PWRMD); 5537 reg &= ~BGE_CPMU_LNK_AWARE_MACCLK_MASK; 5538 reg |= BGE_CPMU_LNK_AWARE_MACCLK_6_25; 5539 CSR_WRITE_4(sc, BGE_CPMU_LNK_AWARE_PWRMD, reg); 5540 5541 reg = CSR_READ_4(sc, BGE_CPMU_HST_ACC); 5542 reg &= ~BGE_CPMU_HST_ACC_MACCLK_MASK; 5543 reg |= BGE_CPMU_HST_ACC_MACCLK_6_25; 5544 CSR_WRITE_4(sc, BGE_CPMU_HST_ACC, reg); 5545 } 5546 5547 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) { 5548 pcireg_t aercap; 5549 5550 reg = CSR_READ_4(sc, BGE_PCIE_PWRMNG_THRESH); 5551 reg = (reg & ~BGE_PCIE_PWRMNG_L1THRESH_MASK) 5552 | BGE_PCIE_PWRMNG_L1THRESH_4MS 5553 | BGE_PCIE_PWRMNG_EXTASPMTMR_EN; 5554 CSR_WRITE_4(sc, BGE_PCIE_PWRMNG_THRESH, reg); 5555 5556 reg = CSR_READ_4(sc, BGE_PCIE_EIDLE_DELAY); 5557 reg = (reg & ~BGE_PCIE_EIDLE_DELAY_MASK) 5558 | BGE_PCIE_EIDLE_DELAY_13CLK; 5559 CSR_WRITE_4(sc, BGE_PCIE_EIDLE_DELAY, reg); 5560 5561 /* Clear correctable error */ 5562 if (pci_get_ext_capability(sc->sc_pc, sc->sc_pcitag, 5563 PCI_EXTCAP_AER, &aercap, NULL) != 0) 5564 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 5565 aercap + PCI_AER_COR_STATUS, 0xffffffff); 5566 5567 reg = CSR_READ_4(sc, BGE_PCIE_LINKCTL); 5568 reg = (reg & ~BGE_PCIE_LINKCTL_L1_PLL_PDEN) 5569 | BGE_PCIE_LINKCTL_L1_PLL_PDDIS; 5570 CSR_WRITE_4(sc, BGE_PCIE_LINKCTL, reg); 5571 } 5572 5573 bge_sig_post_reset(sc, BGE_RESET_START); 5574 5575 bge_chipinit(sc); 5576 5577 /* 5578 * Init the various state machines, ring 5579 * control blocks and firmware. 5580 */ 5581 error = bge_blockinit(sc); 5582 if (error != 0) { 5583 aprint_error_dev(sc->bge_dev, "initialization error %d\n", 5584 error); 5585 splx(s); 5586 return error; 5587 } 5588 5589 ifp = &sc->ethercom.ec_if; 5590 5591 /* 5718 step 25, 57XX step 54 */ 5592 /* Specify MTU. */ 5593 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 5594 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 5595 5596 /* 5718 step 23 */ 5597 /* Load our MAC address. */ 5598 m = (const uint16_t *)&(CLLADDR(ifp->if_sadl)[0]); 5599 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 5600 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, 5601 ((uint32_t)htons(m[1]) << 16) | htons(m[2])); 5602 5603 /* Enable or disable promiscuous mode as needed. */ 5604 if (ifp->if_flags & IFF_PROMISC) 5605 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 5606 else 5607 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 5608 5609 /* Program multicast filter. */ 5610 bge_setmulti(sc); 5611 5612 /* Init RX ring. */ 5613 bge_init_rx_ring_std(sc); 5614 5615 /* 5616 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 5617 * memory to insure that the chip has in fact read the first 5618 * entry of the ring. 5619 */ 5620 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 5621 uint32_t v, i; 5622 for (i = 0; i < 10; i++) { 5623 DELAY(20); 5624 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 5625 if (v == (MCLBYTES - ETHER_ALIGN)) 5626 break; 5627 } 5628 if (i == 10) 5629 aprint_error_dev(sc->bge_dev, 5630 "5705 A0 chip failed to load RX ring\n"); 5631 } 5632 5633 /* Init jumbo RX ring. */ 5634 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 5635 bge_init_rx_ring_jumbo(sc); 5636 5637 /* Init our RX return ring index */ 5638 sc->bge_rx_saved_considx = 0; 5639 5640 /* Init TX ring. */ 5641 bge_init_tx_ring(sc); 5642 5643 /* 5718 step 63, 57XX step 94 */ 5644 /* Enable TX MAC state machine lockup fix. */ 5645 mode = CSR_READ_4(sc, BGE_TX_MODE); 5646 if (BGE_IS_5755_PLUS(sc) || 5647 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 5648 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX; 5649 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 || 5650 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 5651 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 5652 mode |= CSR_READ_4(sc, BGE_TX_MODE) & 5653 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 5654 } 5655 5656 /* Turn on transmitter */ 5657 CSR_WRITE_4_FLUSH(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE); 5658 /* 5718 step 64 */ 5659 DELAY(100); 5660 5661 /* 5718 step 65, 57XX step 95 */ 5662 /* Turn on receiver */ 5663 mode = CSR_READ_4(sc, BGE_RX_MODE); 5664 if (BGE_IS_5755_PLUS(sc)) 5665 mode |= BGE_RXMODE_IPV6_ENABLE; 5666 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) 5667 mode |= BGE_RXMODE_IPV4_FRAG_FIX; 5668 CSR_WRITE_4_FLUSH(sc, BGE_RX_MODE, mode | BGE_RXMODE_ENABLE); 5669 /* 5718 step 66 */ 5670 DELAY(10); 5671 5672 /* 5718 step 12, 57XX step 37 */ 5673 /* 5674 * XXX Doucments of 5718 series and 577xx say the recommended value 5675 * is 1, but tg3 set 1 only on 57765 series. 5676 */ 5677 if (BGE_IS_57765_PLUS(sc)) 5678 reg = 1; 5679 else 5680 reg = 2; 5681 CSR_WRITE_4_FLUSH(sc, BGE_MAX_RX_FRAME_LOWAT, reg); 5682 5683 /* Tell firmware we're alive. */ 5684 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 5685 5686 /* Enable host interrupts. */ 5687 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 5688 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 5689 bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 0); 5690 5691 if ((error = bge_ifmedia_upd(ifp)) != 0) 5692 goto out; 5693 5694 ifp->if_flags |= IFF_RUNNING; 5695 ifp->if_flags &= ~IFF_OACTIVE; 5696 5697 callout_schedule(&sc->bge_timeout, hz); 5698 5699 out: 5700 sc->bge_if_flags = ifp->if_flags; 5701 splx(s); 5702 5703 return error; 5704 } 5705 5706 /* 5707 * Set media options. 5708 */ 5709 static int 5710 bge_ifmedia_upd(struct ifnet *ifp) 5711 { 5712 struct bge_softc *sc = ifp->if_softc; 5713 struct mii_data *mii = &sc->bge_mii; 5714 struct ifmedia *ifm = &sc->bge_ifmedia; 5715 int rc; 5716 5717 /* If this is a 1000baseX NIC, enable the TBI port. */ 5718 if (sc->bge_flags & BGEF_FIBER_TBI) { 5719 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 5720 return EINVAL; 5721 switch (IFM_SUBTYPE(ifm->ifm_media)) { 5722 case IFM_AUTO: 5723 /* 5724 * The BCM5704 ASIC appears to have a special 5725 * mechanism for programming the autoneg 5726 * advertisement registers in TBI mode. 5727 */ 5728 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 5729 uint32_t sgdig; 5730 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS); 5731 if (sgdig & BGE_SGDIGSTS_DONE) { 5732 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 5733 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 5734 sgdig |= BGE_SGDIGCFG_AUTO | 5735 BGE_SGDIGCFG_PAUSE_CAP | 5736 BGE_SGDIGCFG_ASYM_PAUSE; 5737 CSR_WRITE_4_FLUSH(sc, BGE_SGDIG_CFG, 5738 sgdig | BGE_SGDIGCFG_SEND); 5739 DELAY(5); 5740 CSR_WRITE_4_FLUSH(sc, BGE_SGDIG_CFG, 5741 sgdig); 5742 } 5743 } 5744 break; 5745 case IFM_1000_SX: 5746 if ((ifm->ifm_media & IFM_FDX) != 0) { 5747 BGE_CLRBIT_FLUSH(sc, BGE_MAC_MODE, 5748 BGE_MACMODE_HALF_DUPLEX); 5749 } else { 5750 BGE_SETBIT_FLUSH(sc, BGE_MAC_MODE, 5751 BGE_MACMODE_HALF_DUPLEX); 5752 } 5753 DELAY(40); 5754 break; 5755 default: 5756 return EINVAL; 5757 } 5758 /* XXX 802.3x flow control for 1000BASE-SX */ 5759 return 0; 5760 } 5761 5762 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784) && 5763 (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5784_AX)) { 5764 uint32_t reg; 5765 5766 reg = CSR_READ_4(sc, BGE_CPMU_CTRL); 5767 if ((reg & BGE_CPMU_CTRL_GPHY_10MB_RXONLY) != 0) { 5768 reg &= ~BGE_CPMU_CTRL_GPHY_10MB_RXONLY; 5769 CSR_WRITE_4(sc, BGE_CPMU_CTRL, reg); 5770 } 5771 } 5772 5773 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT); 5774 if ((rc = mii_mediachg(mii)) == ENXIO) 5775 return 0; 5776 5777 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) { 5778 uint32_t reg; 5779 5780 reg = CSR_READ_4(sc, BGE_CPMU_LSPD_1000MB_CLK); 5781 if ((reg & BGE_CPMU_LSPD_1000MB_MACCLK_MASK) 5782 == (BGE_CPMU_LSPD_1000MB_MACCLK_12_5)) { 5783 reg &= ~BGE_CPMU_LSPD_1000MB_MACCLK_MASK; 5784 delay(40); 5785 CSR_WRITE_4(sc, BGE_CPMU_LSPD_1000MB_CLK, reg); 5786 } 5787 } 5788 5789 /* 5790 * Force an interrupt so that we will call bge_link_upd 5791 * if needed and clear any pending link state attention. 5792 * Without this we are not getting any further interrupts 5793 * for link state changes and thus will not UP the link and 5794 * not be able to send in bge_start. The only way to get 5795 * things working was to receive a packet and get a RX intr. 5796 */ 5797 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 5798 sc->bge_flags & BGEF_IS_5788) 5799 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 5800 else 5801 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 5802 5803 return rc; 5804 } 5805 5806 /* 5807 * Report current media status. 5808 */ 5809 static void 5810 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 5811 { 5812 struct bge_softc *sc = ifp->if_softc; 5813 struct mii_data *mii = &sc->bge_mii; 5814 5815 if (sc->bge_flags & BGEF_FIBER_TBI) { 5816 ifmr->ifm_status = IFM_AVALID; 5817 ifmr->ifm_active = IFM_ETHER; 5818 if (CSR_READ_4(sc, BGE_MAC_STS) & 5819 BGE_MACSTAT_TBI_PCS_SYNCHED) 5820 ifmr->ifm_status |= IFM_ACTIVE; 5821 ifmr->ifm_active |= IFM_1000_SX; 5822 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 5823 ifmr->ifm_active |= IFM_HDX; 5824 else 5825 ifmr->ifm_active |= IFM_FDX; 5826 return; 5827 } 5828 5829 mii_pollstat(mii); 5830 ifmr->ifm_status = mii->mii_media_status; 5831 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) | 5832 sc->bge_flowflags; 5833 } 5834 5835 static int 5836 bge_ifflags_cb(struct ethercom *ec) 5837 { 5838 struct ifnet *ifp = &ec->ec_if; 5839 struct bge_softc *sc = ifp->if_softc; 5840 u_short change = ifp->if_flags ^ sc->bge_if_flags; 5841 5842 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) 5843 return ENETRESET; 5844 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0) 5845 return 0; 5846 5847 if ((ifp->if_flags & IFF_PROMISC) == 0) 5848 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 5849 else 5850 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 5851 5852 bge_setmulti(sc); 5853 5854 sc->bge_if_flags = ifp->if_flags; 5855 return 0; 5856 } 5857 5858 static int 5859 bge_ioctl(struct ifnet *ifp, u_long command, void *data) 5860 { 5861 struct bge_softc *sc = ifp->if_softc; 5862 struct ifreq *ifr = (struct ifreq *) data; 5863 int s, error = 0; 5864 struct mii_data *mii; 5865 5866 s = splnet(); 5867 5868 switch (command) { 5869 case SIOCSIFMEDIA: 5870 /* XXX Flow control is not supported for 1000BASE-SX */ 5871 if (sc->bge_flags & BGEF_FIBER_TBI) { 5872 ifr->ifr_media &= ~IFM_ETH_FMASK; 5873 sc->bge_flowflags = 0; 5874 } 5875 5876 /* Flow control requires full-duplex mode. */ 5877 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 5878 (ifr->ifr_media & IFM_FDX) == 0) { 5879 ifr->ifr_media &= ~IFM_ETH_FMASK; 5880 } 5881 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 5882 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 5883 /* We can do both TXPAUSE and RXPAUSE. */ 5884 ifr->ifr_media |= 5885 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 5886 } 5887 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 5888 } 5889 5890 if (sc->bge_flags & BGEF_FIBER_TBI) { 5891 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 5892 command); 5893 } else { 5894 mii = &sc->bge_mii; 5895 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 5896 command); 5897 } 5898 break; 5899 default: 5900 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) 5901 break; 5902 5903 error = 0; 5904 5905 if (command != SIOCADDMULTI && command != SIOCDELMULTI) 5906 ; 5907 else if (ifp->if_flags & IFF_RUNNING) 5908 bge_setmulti(sc); 5909 break; 5910 } 5911 5912 splx(s); 5913 5914 return error; 5915 } 5916 5917 static void 5918 bge_watchdog(struct ifnet *ifp) 5919 { 5920 struct bge_softc *sc; 5921 uint32_t status; 5922 5923 sc = ifp->if_softc; 5924 5925 /* If pause frames are active then don't reset the hardware. */ 5926 if ((CSR_READ_4(sc, BGE_RX_MODE) & BGE_RXMODE_FLOWCTL_ENABLE) != 0) { 5927 status = CSR_READ_4(sc, BGE_RX_STS); 5928 if ((status & BGE_RXSTAT_REMOTE_XOFFED) != 0) { 5929 /* 5930 * If link partner has us in XOFF state then wait for 5931 * the condition to clear. 5932 */ 5933 CSR_WRITE_4(sc, BGE_RX_STS, status); 5934 ifp->if_timer = 5; 5935 return; 5936 } else if ((status & BGE_RXSTAT_RCVD_XOFF) != 0 && 5937 (status & BGE_RXSTAT_RCVD_XON) != 0) { 5938 /* 5939 * If link partner has us in XOFF state then wait for 5940 * the condition to clear. 5941 */ 5942 CSR_WRITE_4(sc, BGE_RX_STS, status); 5943 ifp->if_timer = 5; 5944 return; 5945 } 5946 /* 5947 * Any other condition is unexpected and the controller 5948 * should be reset. 5949 */ 5950 } 5951 5952 aprint_error_dev(sc->bge_dev, "watchdog timeout -- resetting\n"); 5953 5954 ifp->if_flags &= ~IFF_RUNNING; 5955 bge_init(ifp); 5956 5957 if_statinc(ifp, if_oerrors); 5958 } 5959 5960 static void 5961 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit) 5962 { 5963 int i; 5964 5965 BGE_CLRBIT_FLUSH(sc, reg, bit); 5966 5967 for (i = 0; i < 1000; i++) { 5968 delay(100); 5969 if ((CSR_READ_4(sc, reg) & bit) == 0) 5970 return; 5971 } 5972 5973 /* 5974 * Doesn't print only when the register is BGE_SRS_MODE. It occurs 5975 * on some environment (and once after boot?) 5976 */ 5977 if (reg != BGE_SRS_MODE) 5978 aprint_error_dev(sc->bge_dev, 5979 "block failed to stop: reg 0x%lx, bit 0x%08x\n", 5980 (u_long)reg, bit); 5981 } 5982 5983 /* 5984 * Stop the adapter and free any mbufs allocated to the 5985 * RX and TX lists. 5986 */ 5987 static void 5988 bge_stop(struct ifnet *ifp, int disable) 5989 { 5990 struct bge_softc *sc = ifp->if_softc; 5991 5992 if (disable) { 5993 sc->bge_detaching = 1; 5994 callout_halt(&sc->bge_timeout, NULL); 5995 } else 5996 callout_stop(&sc->bge_timeout); 5997 5998 /* Disable host interrupts. */ 5999 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 6000 bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 1); 6001 6002 /* 6003 * Tell firmware we're shutting down. 6004 */ 6005 bge_stop_fw(sc); 6006 bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN); 6007 6008 /* 6009 * Disable all of the receiver blocks. 6010 */ 6011 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 6012 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 6013 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 6014 if (BGE_IS_5700_FAMILY(sc)) 6015 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 6016 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 6017 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 6018 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 6019 6020 /* 6021 * Disable all of the transmit blocks. 6022 */ 6023 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 6024 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 6025 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 6026 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 6027 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 6028 if (BGE_IS_5700_FAMILY(sc)) 6029 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 6030 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 6031 6032 BGE_CLRBIT_FLUSH(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB); 6033 delay(40); 6034 6035 bge_stop_block(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 6036 6037 /* 6038 * Shut down all of the memory managers and related 6039 * state machines. 6040 */ 6041 /* 5718 step 5a,5b */ 6042 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 6043 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 6044 if (BGE_IS_5700_FAMILY(sc)) 6045 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 6046 6047 /* 5718 step 5c,5d */ 6048 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 6049 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 6050 6051 if (BGE_IS_5700_FAMILY(sc)) { 6052 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 6053 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 6054 } 6055 6056 bge_reset(sc); 6057 bge_sig_legacy(sc, BGE_RESET_SHUTDOWN); 6058 bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN); 6059 6060 /* 6061 * Keep the ASF firmware running if up. 6062 */ 6063 if (sc->bge_asf_mode & ASF_STACKUP) 6064 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 6065 else 6066 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 6067 6068 /* Free the RX lists. */ 6069 bge_free_rx_ring_std(sc, disable); 6070 6071 /* Free jumbo RX list. */ 6072 if (BGE_IS_JUMBO_CAPABLE(sc)) 6073 bge_free_rx_ring_jumbo(sc); 6074 6075 /* Free TX buffers. */ 6076 bge_free_tx_ring(sc, disable); 6077 6078 /* 6079 * Isolate/power down the PHY. 6080 */ 6081 if (!(sc->bge_flags & BGEF_FIBER_TBI)) 6082 mii_down(&sc->bge_mii); 6083 6084 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 6085 6086 /* Clear MAC's link state (PHY may still have link UP). */ 6087 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 6088 6089 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 6090 } 6091 6092 static void 6093 bge_link_upd(struct bge_softc *sc) 6094 { 6095 struct ifnet *ifp = &sc->ethercom.ec_if; 6096 struct mii_data *mii = &sc->bge_mii; 6097 uint32_t status; 6098 uint16_t phyval; 6099 int link; 6100 6101 /* Clear 'pending link event' flag */ 6102 BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT); 6103 6104 /* 6105 * Process link state changes. 6106 * Grrr. The link status word in the status block does 6107 * not work correctly on the BCM5700 rev AX and BX chips, 6108 * according to all available information. Hence, we have 6109 * to enable MII interrupts in order to properly obtain 6110 * async link changes. Unfortunately, this also means that 6111 * we have to read the MAC status register to detect link 6112 * changes, thereby adding an additional register access to 6113 * the interrupt handler. 6114 */ 6115 6116 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) { 6117 status = CSR_READ_4(sc, BGE_MAC_STS); 6118 if (status & BGE_MACSTAT_MI_INTERRUPT) { 6119 mii_pollstat(mii); 6120 6121 if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 6122 mii->mii_media_status & IFM_ACTIVE && 6123 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 6124 BGE_STS_SETBIT(sc, BGE_STS_LINK); 6125 else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 6126 (!(mii->mii_media_status & IFM_ACTIVE) || 6127 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 6128 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 6129 6130 /* Clear the interrupt */ 6131 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 6132 BGE_EVTENB_MI_INTERRUPT); 6133 bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr, 6134 BRGPHY_MII_ISR, &phyval); 6135 bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr, 6136 BRGPHY_MII_IMR, BRGPHY_INTRS); 6137 } 6138 return; 6139 } 6140 6141 if (sc->bge_flags & BGEF_FIBER_TBI) { 6142 status = CSR_READ_4(sc, BGE_MAC_STS); 6143 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 6144 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) { 6145 BGE_STS_SETBIT(sc, BGE_STS_LINK); 6146 if (BGE_ASICREV(sc->bge_chipid) 6147 == BGE_ASICREV_BCM5704) { 6148 BGE_CLRBIT_FLUSH(sc, BGE_MAC_MODE, 6149 BGE_MACMODE_TBI_SEND_CFGS); 6150 DELAY(40); 6151 } 6152 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 6153 if_link_state_change(ifp, LINK_STATE_UP); 6154 } 6155 } else if (BGE_STS_BIT(sc, BGE_STS_LINK)) { 6156 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 6157 if_link_state_change(ifp, LINK_STATE_DOWN); 6158 } 6159 } else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) { 6160 /* 6161 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED 6162 * bit in status word always set. Workaround this bug by 6163 * reading PHY link status directly. 6164 */ 6165 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)? 6166 BGE_STS_LINK : 0; 6167 6168 if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) { 6169 mii_pollstat(mii); 6170 6171 if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 6172 mii->mii_media_status & IFM_ACTIVE && 6173 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 6174 BGE_STS_SETBIT(sc, BGE_STS_LINK); 6175 else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 6176 (!(mii->mii_media_status & IFM_ACTIVE) || 6177 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 6178 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 6179 } 6180 } else { 6181 /* 6182 * For controllers that call mii_tick, we have to poll 6183 * link status. 6184 */ 6185 mii_pollstat(mii); 6186 } 6187 6188 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) { 6189 uint32_t reg, scale; 6190 6191 reg = CSR_READ_4(sc, BGE_CPMU_CLCK_STAT) & 6192 BGE_CPMU_CLCK_STAT_MAC_CLCK_MASK; 6193 if (reg == BGE_CPMU_CLCK_STAT_MAC_CLCK_62_5) 6194 scale = 65; 6195 else if (reg == BGE_CPMU_CLCK_STAT_MAC_CLCK_6_25) 6196 scale = 6; 6197 else 6198 scale = 12; 6199 6200 reg = CSR_READ_4(sc, BGE_MISC_CFG) & 6201 ~BGE_MISCCFG_TIMER_PRESCALER; 6202 reg |= scale << 1; 6203 CSR_WRITE_4(sc, BGE_MISC_CFG, reg); 6204 } 6205 /* Clear the attention */ 6206 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 6207 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 6208 BGE_MACSTAT_LINK_CHANGED); 6209 } 6210 6211 static int 6212 bge_sysctl_verify(SYSCTLFN_ARGS) 6213 { 6214 int error, t; 6215 struct sysctlnode node; 6216 6217 node = *rnode; 6218 t = *(int*)rnode->sysctl_data; 6219 node.sysctl_data = &t; 6220 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 6221 if (error || newp == NULL) 6222 return error; 6223 6224 #if 0 6225 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t, 6226 node.sysctl_num, rnode->sysctl_num)); 6227 #endif 6228 6229 if (node.sysctl_num == bge_rxthresh_nodenum) { 6230 if (t < 0 || t >= NBGE_RX_THRESH) 6231 return EINVAL; 6232 bge_update_all_threshes(t); 6233 } else 6234 return EINVAL; 6235 6236 *(int*)rnode->sysctl_data = t; 6237 6238 return 0; 6239 } 6240 6241 /* 6242 * Set up sysctl(3) MIB, hw.bge.*. 6243 */ 6244 static void 6245 bge_sysctl_init(struct bge_softc *sc) 6246 { 6247 int rc, bge_root_num; 6248 const struct sysctlnode *node; 6249 6250 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node, 6251 0, CTLTYPE_NODE, "bge", 6252 SYSCTL_DESCR("BGE interface controls"), 6253 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 6254 goto out; 6255 } 6256 6257 bge_root_num = node->sysctl_num; 6258 6259 /* BGE Rx interrupt mitigation level */ 6260 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node, 6261 CTLFLAG_READWRITE, 6262 CTLTYPE_INT, "rx_lvl", 6263 SYSCTL_DESCR("BGE receive interrupt mitigation level"), 6264 bge_sysctl_verify, 0, 6265 &bge_rx_thresh_lvl, 6266 0, CTL_HW, bge_root_num, CTL_CREATE, 6267 CTL_EOL)) != 0) { 6268 goto out; 6269 } 6270 6271 bge_rxthresh_nodenum = node->sysctl_num; 6272 6273 return; 6274 6275 out: 6276 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc); 6277 } 6278 6279 #ifdef BGE_DEBUG 6280 void 6281 bge_debug_info(struct bge_softc *sc) 6282 { 6283 6284 printf("Hardware Flags:\n"); 6285 if (BGE_IS_57765_PLUS(sc)) 6286 printf(" - 57765 Plus\n"); 6287 if (BGE_IS_5717_PLUS(sc)) 6288 printf(" - 5717 Plus\n"); 6289 if (BGE_IS_5755_PLUS(sc)) 6290 printf(" - 5755 Plus\n"); 6291 if (BGE_IS_575X_PLUS(sc)) 6292 printf(" - 575X Plus\n"); 6293 if (BGE_IS_5705_PLUS(sc)) 6294 printf(" - 5705 Plus\n"); 6295 if (BGE_IS_5714_FAMILY(sc)) 6296 printf(" - 5714 Family\n"); 6297 if (BGE_IS_5700_FAMILY(sc)) 6298 printf(" - 5700 Family\n"); 6299 if (sc->bge_flags & BGEF_IS_5788) 6300 printf(" - 5788\n"); 6301 if (sc->bge_flags & BGEF_JUMBO_CAPABLE) 6302 printf(" - Supports Jumbo Frames\n"); 6303 if (sc->bge_flags & BGEF_NO_EEPROM) 6304 printf(" - No EEPROM\n"); 6305 if (sc->bge_flags & BGEF_PCIX) 6306 printf(" - PCI-X Bus\n"); 6307 if (sc->bge_flags & BGEF_PCIE) 6308 printf(" - PCI Express Bus\n"); 6309 if (sc->bge_flags & BGEF_RX_ALIGNBUG) 6310 printf(" - RX Alignment Bug\n"); 6311 if (sc->bge_flags & BGEF_APE) 6312 printf(" - APE\n"); 6313 if (sc->bge_flags & BGEF_CPMU_PRESENT) 6314 printf(" - CPMU\n"); 6315 if (sc->bge_flags & BGEF_TSO) 6316 printf(" - TSO\n"); 6317 if (sc->bge_flags & BGEF_TAGGED_STATUS) 6318 printf(" - TAGGED_STATUS\n"); 6319 6320 /* PHY related */ 6321 if (sc->bge_phy_flags & BGEPHYF_NO_3LED) 6322 printf(" - No 3 LEDs\n"); 6323 if (sc->bge_phy_flags & BGEPHYF_CRC_BUG) 6324 printf(" - CRC bug\n"); 6325 if (sc->bge_phy_flags & BGEPHYF_ADC_BUG) 6326 printf(" - ADC bug\n"); 6327 if (sc->bge_phy_flags & BGEPHYF_5704_A0_BUG) 6328 printf(" - 5704 A0 bug\n"); 6329 if (sc->bge_phy_flags & BGEPHYF_JITTER_BUG) 6330 printf(" - jitter bug\n"); 6331 if (sc->bge_phy_flags & BGEPHYF_BER_BUG) 6332 printf(" - BER bug\n"); 6333 if (sc->bge_phy_flags & BGEPHYF_ADJUST_TRIM) 6334 printf(" - adjust trim\n"); 6335 if (sc->bge_phy_flags & BGEPHYF_NO_WIRESPEED) 6336 printf(" - no wirespeed\n"); 6337 6338 /* ASF related */ 6339 if (sc->bge_asf_mode & ASF_ENABLE) 6340 printf(" - ASF enable\n"); 6341 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) 6342 printf(" - ASF new handshake\n"); 6343 if (sc->bge_asf_mode & ASF_STACKUP) 6344 printf(" - ASF stackup\n"); 6345 } 6346 #endif /* BGE_DEBUG */ 6347 6348 static int 6349 bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]) 6350 { 6351 prop_dictionary_t dict; 6352 prop_data_t ea; 6353 6354 if ((sc->bge_flags & BGEF_NO_EEPROM) == 0) 6355 return 1; 6356 6357 dict = device_properties(sc->bge_dev); 6358 ea = prop_dictionary_get(dict, "mac-address"); 6359 if (ea != NULL) { 6360 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 6361 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 6362 memcpy(ether_addr, prop_data_value(ea), ETHER_ADDR_LEN); 6363 return 0; 6364 } 6365 6366 return 1; 6367 } 6368 6369 static int 6370 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[]) 6371 { 6372 uint32_t mac_addr; 6373 6374 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB); 6375 if ((mac_addr >> 16) == 0x484b) { 6376 ether_addr[0] = (uint8_t)(mac_addr >> 8); 6377 ether_addr[1] = (uint8_t)mac_addr; 6378 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB); 6379 ether_addr[2] = (uint8_t)(mac_addr >> 24); 6380 ether_addr[3] = (uint8_t)(mac_addr >> 16); 6381 ether_addr[4] = (uint8_t)(mac_addr >> 8); 6382 ether_addr[5] = (uint8_t)mac_addr; 6383 return 0; 6384 } 6385 return 1; 6386 } 6387 6388 static int 6389 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[]) 6390 { 6391 int mac_offset = BGE_EE_MAC_OFFSET; 6392 6393 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 6394 mac_offset = BGE_EE_MAC_OFFSET_5906; 6395 6396 return (bge_read_nvram(sc, ether_addr, mac_offset + 2, 6397 ETHER_ADDR_LEN)); 6398 } 6399 6400 static int 6401 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[]) 6402 { 6403 6404 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 6405 return 1; 6406 6407 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, 6408 ETHER_ADDR_LEN)); 6409 } 6410 6411 static int 6412 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[]) 6413 { 6414 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = { 6415 /* NOTE: Order is critical */ 6416 bge_get_eaddr_fw, 6417 bge_get_eaddr_mem, 6418 bge_get_eaddr_nvram, 6419 bge_get_eaddr_eeprom, 6420 NULL 6421 }; 6422 const bge_eaddr_fcn_t *func; 6423 6424 for (func = bge_eaddr_funcs; *func != NULL; ++func) { 6425 if ((*func)(sc, eaddr) == 0) 6426 break; 6427 } 6428 return (*func == NULL ? ENXIO : 0); 6429 } 6430