1 /* $NetBSD: if_bge.c,v 1.334 2019/05/29 10:07:29 msaitoh Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2001 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $ 36 */ 37 38 /* 39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD. 40 * 41 * NetBSD version by: 42 * 43 * Frank van der Linden <fvdl@wasabisystems.com> 44 * Jason Thorpe <thorpej@wasabisystems.com> 45 * Jonathan Stone <jonathan@dsg.stanford.edu> 46 * 47 * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com> 48 * Senior Engineer, Wind River Systems 49 */ 50 51 /* 52 * The Broadcom BCM5700 is based on technology originally developed by 53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 54 * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has 55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 57 * frames, highly configurable RX filtering, and 16 RX and TX queues 58 * (which, along with RX filter rules, can be used for QOS applications). 59 * Other features, such as TCP segmentation, may be available as part 60 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 61 * firmware images can be stored in hardware and need not be compiled 62 * into the driver. 63 * 64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. 66 * 67 * The BCM5701 is a single-chip solution incorporating both the BCM5700 68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 69 * does not support external SSRAM. 70 * 71 * Broadcom also produces a variation of the BCM5700 under the "Altima" 72 * brand name, which is functionally similar but lacks PCI-X support. 73 * 74 * Without external SSRAM, you can only have at most 4 TX rings, 75 * and the use of the mini RX ring is disabled. This seems to imply 76 * that these features are simply not available on the BCM5701. As a 77 * result, this driver does not implement any support for the mini RX 78 * ring. 79 */ 80 81 #include <sys/cdefs.h> 82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.334 2019/05/29 10:07:29 msaitoh Exp $"); 83 84 #include <sys/param.h> 85 #include <sys/systm.h> 86 #include <sys/callout.h> 87 #include <sys/sockio.h> 88 #include <sys/mbuf.h> 89 #include <sys/malloc.h> 90 #include <sys/kernel.h> 91 #include <sys/device.h> 92 #include <sys/socket.h> 93 #include <sys/sysctl.h> 94 #include <sys/rndsource.h> 95 96 #include <net/if.h> 97 #include <net/if_dl.h> 98 #include <net/if_media.h> 99 #include <net/if_ether.h> 100 #include <net/bpf.h> 101 102 #ifdef INET 103 #include <netinet/in.h> 104 #include <netinet/in_systm.h> 105 #include <netinet/in_var.h> 106 #include <netinet/ip.h> 107 #endif 108 109 /* Headers for TCP Segmentation Offload (TSO) */ 110 #include <netinet/in_systm.h> /* n_time for <netinet/ip.h>... */ 111 #include <netinet/in.h> /* ip_{src,dst}, for <netinet/ip.h> */ 112 #include <netinet/ip.h> /* for struct ip */ 113 #include <netinet/tcp.h> /* for struct tcphdr */ 114 115 #include <dev/pci/pcireg.h> 116 #include <dev/pci/pcivar.h> 117 #include <dev/pci/pcidevs.h> 118 119 #include <dev/mii/mii.h> 120 #include <dev/mii/miivar.h> 121 #include <dev/mii/miidevs.h> 122 #include <dev/mii/brgphyreg.h> 123 124 #include <dev/pci/if_bgereg.h> 125 #include <dev/pci/if_bgevar.h> 126 127 #include <prop/proplib.h> 128 129 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 130 131 132 /* 133 * Tunable thresholds for rx-side bge interrupt mitigation. 134 */ 135 136 /* 137 * The pairs of values below were obtained from empirical measurement 138 * on bcm5700 rev B2; they ar designed to give roughly 1 receive 139 * interrupt for every N packets received, where N is, approximately, 140 * the second value (rx_max_bds) in each pair. The values are chosen 141 * such that moving from one pair to the succeeding pair was observed 142 * to roughly halve interrupt rate under sustained input packet load. 143 * The values were empirically chosen to avoid overflowing internal 144 * limits on the bcm5700: increasing rx_ticks much beyond 600 145 * results in internal wrapping and higher interrupt rates. 146 * The limit of 46 frames was chosen to match NFS workloads. 147 * 148 * These values also work well on bcm5701, bcm5704C, and (less 149 * tested) bcm5703. On other chipsets, (including the Altima chip 150 * family), the larger values may overflow internal chip limits, 151 * leading to increasing interrupt rates rather than lower interrupt 152 * rates. 153 * 154 * Applications using heavy interrupt mitigation (interrupting every 155 * 32 or 46 frames) in both directions may need to increase the TCP 156 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain 157 * full link bandwidth, due to ACKs and window updates lingering 158 * in the RX queue during the 30-to-40-frame interrupt-mitigation window. 159 */ 160 static const struct bge_load_rx_thresh { 161 int rx_ticks; 162 int rx_max_bds; } 163 bge_rx_threshes[] = { 164 { 16, 1 }, /* rx_max_bds = 1 disables interrupt mitigation */ 165 { 32, 2 }, 166 { 50, 4 }, 167 { 100, 8 }, 168 { 192, 16 }, 169 { 416, 32 }, 170 { 598, 46 } 171 }; 172 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0])) 173 174 /* XXX patchable; should be sysctl'able */ 175 static int bge_auto_thresh = 1; 176 static int bge_rx_thresh_lvl; 177 178 static int bge_rxthresh_nodenum; 179 180 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]); 181 182 static uint32_t bge_chipid(const struct pci_attach_args *); 183 static int bge_can_use_msi(struct bge_softc *); 184 static int bge_probe(device_t, cfdata_t, void *); 185 static void bge_attach(device_t, device_t, void *); 186 static int bge_detach(device_t, int); 187 static void bge_release_resources(struct bge_softc *); 188 189 static int bge_get_eaddr_fw(struct bge_softc *, uint8_t[]); 190 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]); 191 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]); 192 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]); 193 static int bge_get_eaddr(struct bge_softc *, uint8_t[]); 194 195 static void bge_txeof(struct bge_softc *); 196 static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *); 197 static void bge_rxeof(struct bge_softc *); 198 199 static void bge_asf_driver_up (struct bge_softc *); 200 static void bge_tick(void *); 201 static void bge_stats_update(struct bge_softc *); 202 static void bge_stats_update_regs(struct bge_softc *); 203 static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *); 204 205 static int bge_intr(void *); 206 static void bge_start(struct ifnet *); 207 static int bge_ifflags_cb(struct ethercom *); 208 static int bge_ioctl(struct ifnet *, u_long, void *); 209 static int bge_init(struct ifnet *); 210 static void bge_stop(struct ifnet *, int); 211 static void bge_watchdog(struct ifnet *); 212 static int bge_ifmedia_upd(struct ifnet *); 213 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 214 215 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *); 216 static int bge_read_nvram(struct bge_softc *, uint8_t *, int, int); 217 218 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); 219 static int bge_read_eeprom(struct bge_softc *, void *, int, int); 220 static void bge_setmulti(struct bge_softc *); 221 222 static void bge_handle_events(struct bge_softc *); 223 static int bge_alloc_jumbo_mem(struct bge_softc *); 224 #if 0 /* XXX */ 225 static void bge_free_jumbo_mem(struct bge_softc *); 226 #endif 227 static void *bge_jalloc(struct bge_softc *); 228 static void bge_jfree(struct mbuf *, void *, size_t, void *); 229 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, 230 bus_dmamap_t); 231 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 232 static int bge_init_rx_ring_std(struct bge_softc *); 233 static void bge_free_rx_ring_std(struct bge_softc *m, bool); 234 static int bge_init_rx_ring_jumbo(struct bge_softc *); 235 static void bge_free_rx_ring_jumbo(struct bge_softc *); 236 static void bge_free_tx_ring(struct bge_softc *m, bool); 237 static int bge_init_tx_ring(struct bge_softc *); 238 239 static int bge_chipinit(struct bge_softc *); 240 static int bge_blockinit(struct bge_softc *); 241 static int bge_phy_addr(struct bge_softc *); 242 static uint32_t bge_readmem_ind(struct bge_softc *, int); 243 static void bge_writemem_ind(struct bge_softc *, int, int); 244 static void bge_writembx(struct bge_softc *, int, int); 245 static void bge_writembx_flush(struct bge_softc *, int, int); 246 static void bge_writemem_direct(struct bge_softc *, int, int); 247 static void bge_writereg_ind(struct bge_softc *, int, int); 248 static void bge_set_max_readrq(struct bge_softc *); 249 250 static int bge_miibus_readreg(device_t, int, int, uint16_t *); 251 static int bge_miibus_writereg(device_t, int, int, uint16_t); 252 static void bge_miibus_statchg(struct ifnet *); 253 254 #define BGE_RESET_SHUTDOWN 0 255 #define BGE_RESET_START 1 256 #define BGE_RESET_SUSPEND 2 257 static void bge_sig_post_reset(struct bge_softc *, int); 258 static void bge_sig_legacy(struct bge_softc *, int); 259 static void bge_sig_pre_reset(struct bge_softc *, int); 260 static void bge_wait_for_event_ack(struct bge_softc *); 261 static void bge_stop_fw(struct bge_softc *); 262 static int bge_reset(struct bge_softc *); 263 static void bge_link_upd(struct bge_softc *); 264 static void bge_sysctl_init(struct bge_softc *); 265 static int bge_sysctl_verify(SYSCTLFN_PROTO); 266 267 static void bge_ape_lock_init(struct bge_softc *); 268 static void bge_ape_read_fw_ver(struct bge_softc *); 269 static int bge_ape_lock(struct bge_softc *, int); 270 static void bge_ape_unlock(struct bge_softc *, int); 271 static void bge_ape_send_event(struct bge_softc *, uint32_t); 272 static void bge_ape_driver_state_change(struct bge_softc *, int); 273 274 #ifdef BGE_DEBUG 275 #define DPRINTF(x) if (bgedebug) printf x 276 #define DPRINTFN(n, x) if (bgedebug >= (n)) printf x 277 #define BGE_TSO_PRINTF(x) do { if (bge_tso_debug) printf x ;} while (0) 278 int bgedebug = 0; 279 int bge_tso_debug = 0; 280 void bge_debug_info(struct bge_softc *); 281 #else 282 #define DPRINTF(x) 283 #define DPRINTFN(n, x) 284 #define BGE_TSO_PRINTF(x) 285 #endif 286 287 #ifdef BGE_EVENT_COUNTERS 288 #define BGE_EVCNT_INCR(ev) (ev).ev_count++ 289 #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val) 290 #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val) 291 #else 292 #define BGE_EVCNT_INCR(ev) /* nothing */ 293 #define BGE_EVCNT_ADD(ev, val) /* nothing */ 294 #define BGE_EVCNT_UPD(ev, val) /* nothing */ 295 #endif 296 297 #define VIDDID(a, b) PCI_VENDOR_ ## a, PCI_PRODUCT_ ## a ## _ ## b 298 /* 299 * The BCM5700 documentation seems to indicate that the hardware still has the 300 * Alteon vendor ID burned into it, though it should always be overridden by 301 * the value in the EEPROM. We'll check for it anyway. 302 */ 303 static const struct bge_product { 304 pci_vendor_id_t bp_vendor; 305 pci_product_id_t bp_product; 306 const char *bp_name; 307 } bge_products[] = { 308 { VIDDID(ALTEON, BCM5700), "Broadcom BCM5700 Gigabit" }, 309 { VIDDID(ALTEON, BCM5701), "Broadcom BCM5701 Gigabit" }, 310 { VIDDID(ALTIMA, AC1000), "Altima AC1000 Gigabit" }, 311 { VIDDID(ALTIMA, AC1001), "Altima AC1001 Gigabit" }, 312 { VIDDID(ALTIMA, AC1003), "Altima AC1003 Gigabit" }, 313 { VIDDID(ALTIMA, AC9100), "Altima AC9100 Gigabit" }, 314 { VIDDID(APPLE, BCM5701), "APPLE BCM5701 Gigabit" }, 315 { VIDDID(BROADCOM, BCM5700), "Broadcom BCM5700 Gigabit" }, 316 { VIDDID(BROADCOM, BCM5701), "Broadcom BCM5701 Gigabit" }, 317 { VIDDID(BROADCOM, BCM5702), "Broadcom BCM5702 Gigabit" }, 318 { VIDDID(BROADCOM, BCM5702FE), "Broadcom BCM5702FE Fast" }, 319 { VIDDID(BROADCOM, BCM5702X), "Broadcom BCM5702X Gigabit" }, 320 { VIDDID(BROADCOM, BCM5703), "Broadcom BCM5703 Gigabit" }, 321 { VIDDID(BROADCOM, BCM5703X), "Broadcom BCM5703X Gigabit" }, 322 { VIDDID(BROADCOM, BCM5703_ALT),"Broadcom BCM5703 Gigabit" }, 323 { VIDDID(BROADCOM, BCM5704C), "Broadcom BCM5704C Dual Gigabit" }, 324 { VIDDID(BROADCOM, BCM5704S), "Broadcom BCM5704S Dual Gigabit" }, 325 { VIDDID(BROADCOM, BCM5704S_ALT),"Broadcom BCM5704S Dual Gigabit" }, 326 { VIDDID(BROADCOM, BCM5705), "Broadcom BCM5705 Gigabit" }, 327 { VIDDID(BROADCOM, BCM5705F), "Broadcom BCM5705F Gigabit" }, 328 { VIDDID(BROADCOM, BCM5705K), "Broadcom BCM5705K Gigabit" }, 329 { VIDDID(BROADCOM, BCM5705M), "Broadcom BCM5705M Gigabit" }, 330 { VIDDID(BROADCOM, BCM5705M_ALT),"Broadcom BCM5705M Gigabit" }, 331 { VIDDID(BROADCOM, BCM5714), "Broadcom BCM5714 Gigabit" }, 332 { VIDDID(BROADCOM, BCM5714S), "Broadcom BCM5714S Gigabit" }, 333 { VIDDID(BROADCOM, BCM5715), "Broadcom BCM5715 Gigabit" }, 334 { VIDDID(BROADCOM, BCM5715S), "Broadcom BCM5715S Gigabit" }, 335 { VIDDID(BROADCOM, BCM5717), "Broadcom BCM5717 Gigabit" }, 336 { VIDDID(BROADCOM, BCM5717C), "Broadcom BCM5717 Gigabit" }, 337 { VIDDID(BROADCOM, BCM5718), "Broadcom BCM5718 Gigabit" }, 338 { VIDDID(BROADCOM, BCM5719), "Broadcom BCM5719 Gigabit" }, 339 { VIDDID(BROADCOM, BCM5720), "Broadcom BCM5720 Gigabit" }, 340 { VIDDID(BROADCOM, BCM5721), "Broadcom BCM5721 Gigabit" }, 341 { VIDDID(BROADCOM, BCM5722), "Broadcom BCM5722 Gigabit" }, 342 { VIDDID(BROADCOM, BCM5723), "Broadcom BCM5723 Gigabit" }, 343 { VIDDID(BROADCOM, BCM5725), "Broadcom BCM5725 Gigabit" }, 344 { VIDDID(BROADCOM, BCM5727), "Broadcom BCM5727 Gigabit" }, 345 { VIDDID(BROADCOM, BCM5750), "Broadcom BCM5750 Gigabit" }, 346 { VIDDID(BROADCOM, BCM5751), "Broadcom BCM5751 Gigabit" }, 347 { VIDDID(BROADCOM, BCM5751F), "Broadcom BCM5751F Gigabit" }, 348 { VIDDID(BROADCOM, BCM5751M), "Broadcom BCM5751M Gigabit" }, 349 { VIDDID(BROADCOM, BCM5752), "Broadcom BCM5752 Gigabit" }, 350 { VIDDID(BROADCOM, BCM5752M), "Broadcom BCM5752M Gigabit" }, 351 { VIDDID(BROADCOM, BCM5753), "Broadcom BCM5753 Gigabit" }, 352 { VIDDID(BROADCOM, BCM5753F), "Broadcom BCM5753F Gigabit" }, 353 { VIDDID(BROADCOM, BCM5753M), "Broadcom BCM5753M Gigabit" }, 354 { VIDDID(BROADCOM, BCM5754), "Broadcom BCM5754 Gigabit" }, 355 { VIDDID(BROADCOM, BCM5754M), "Broadcom BCM5754M Gigabit" }, 356 { VIDDID(BROADCOM, BCM5755), "Broadcom BCM5755 Gigabit" }, 357 { VIDDID(BROADCOM, BCM5755M), "Broadcom BCM5755M Gigabit" }, 358 { VIDDID(BROADCOM, BCM5756), "Broadcom BCM5756 Gigabit" }, 359 { VIDDID(BROADCOM, BCM5761), "Broadcom BCM5761 Gigabit" }, 360 { VIDDID(BROADCOM, BCM5761E), "Broadcom BCM5761E Gigabit" }, 361 { VIDDID(BROADCOM, BCM5761S), "Broadcom BCM5761S Gigabit" }, 362 { VIDDID(BROADCOM, BCM5761SE), "Broadcom BCM5761SE Gigabit" }, 363 { VIDDID(BROADCOM, BCM5762), "Broadcom BCM5762 Gigabit" }, 364 { VIDDID(BROADCOM, BCM5764), "Broadcom BCM5764 Gigabit" }, 365 { VIDDID(BROADCOM, BCM5780), "Broadcom BCM5780 Gigabit" }, 366 { VIDDID(BROADCOM, BCM5780S), "Broadcom BCM5780S Gigabit" }, 367 { VIDDID(BROADCOM, BCM5781), "Broadcom BCM5781 Gigabit" }, 368 { VIDDID(BROADCOM, BCM5782), "Broadcom BCM5782 Gigabit" }, 369 { VIDDID(BROADCOM, BCM5784M), "BCM5784M NetLink 1000baseT" }, 370 { VIDDID(BROADCOM, BCM5785F), "BCM5785F NetLink 10/100" }, 371 { VIDDID(BROADCOM, BCM5785G), "BCM5785G NetLink 1000baseT" }, 372 { VIDDID(BROADCOM, BCM5786), "Broadcom BCM5786 Gigabit" }, 373 { VIDDID(BROADCOM, BCM5787), "Broadcom BCM5787 Gigabit" }, 374 { VIDDID(BROADCOM, BCM5787F), "Broadcom BCM5787F 10/100" }, 375 { VIDDID(BROADCOM, BCM5787M), "Broadcom BCM5787M Gigabit" }, 376 { VIDDID(BROADCOM, BCM5788), "Broadcom BCM5788 Gigabit" }, 377 { VIDDID(BROADCOM, BCM5789), "Broadcom BCM5789 Gigabit" }, 378 { VIDDID(BROADCOM, BCM5901), "Broadcom BCM5901 Fast" }, 379 { VIDDID(BROADCOM, BCM5901A2), "Broadcom BCM5901A2 Fast" }, 380 { VIDDID(BROADCOM, BCM5903M), "Broadcom BCM5903M Fast" }, 381 { VIDDID(BROADCOM, BCM5906), "Broadcom BCM5906 Fast" }, 382 { VIDDID(BROADCOM, BCM5906M), "Broadcom BCM5906M Fast" }, 383 { VIDDID(BROADCOM, BCM57760), "Broadcom BCM57760 Gigabit" }, 384 { VIDDID(BROADCOM, BCM57761), "Broadcom BCM57761 Gigabit" }, 385 { VIDDID(BROADCOM, BCM57762), "Broadcom BCM57762 Gigabit" }, 386 { VIDDID(BROADCOM, BCM57764), "Broadcom BCM57764 Gigabit" }, 387 { VIDDID(BROADCOM, BCM57765), "Broadcom BCM57765 Gigabit" }, 388 { VIDDID(BROADCOM, BCM57766), "Broadcom BCM57766 Gigabit" }, 389 { VIDDID(BROADCOM, BCM57767), "Broadcom BCM57767 Gigabit" }, 390 { VIDDID(BROADCOM, BCM57780), "Broadcom BCM57780 Gigabit" }, 391 { VIDDID(BROADCOM, BCM57781), "Broadcom BCM57781 Gigabit" }, 392 { VIDDID(BROADCOM, BCM57782), "Broadcom BCM57782 Gigabit" }, 393 { VIDDID(BROADCOM, BCM57785), "Broadcom BCM57785 Gigabit" }, 394 { VIDDID(BROADCOM, BCM57786), "Broadcom BCM57786 Gigabit" }, 395 { VIDDID(BROADCOM, BCM57787), "Broadcom BCM57787 Gigabit" }, 396 { VIDDID(BROADCOM, BCM57788), "Broadcom BCM57788 Gigabit" }, 397 { VIDDID(BROADCOM, BCM57790), "Broadcom BCM57790 Gigabit" }, 398 { VIDDID(BROADCOM, BCM57791), "Broadcom BCM57791 Gigabit" }, 399 { VIDDID(BROADCOM, BCM57795), "Broadcom BCM57795 Gigabit" }, 400 { VIDDID(SCHNEIDERKOCH, SK_9DX1),"SysKonnect SK-9Dx1 Gigabit" }, 401 { VIDDID(SCHNEIDERKOCH, SK_9MXX),"SysKonnect SK-9Mxx Gigabit" }, 402 { VIDDID(3COM, 3C996), "3Com 3c996 Gigabit" }, 403 { VIDDID(FUJITSU4, PW008GE4), "Fujitsu PW008GE4 Gigabit" }, 404 { VIDDID(FUJITSU4, PW008GE5), "Fujitsu PW008GE5 Gigabit" }, 405 { VIDDID(FUJITSU4, PP250_450_LAN),"Fujitsu Primepower 250/450 Gigabit" }, 406 { 0, 0, NULL }, 407 }; 408 409 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGEF_JUMBO_CAPABLE) 410 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGEF_5700_FAMILY) 411 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGEF_5705_PLUS) 412 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGEF_5714_FAMILY) 413 #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGEF_575X_PLUS) 414 #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGEF_5755_PLUS) 415 #define BGE_IS_57765_FAMILY(sc) ((sc)->bge_flags & BGEF_57765_FAMILY) 416 #define BGE_IS_57765_PLUS(sc) ((sc)->bge_flags & BGEF_57765_PLUS) 417 #define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGEF_5717_PLUS) 418 419 static const struct bge_revision { 420 uint32_t br_chipid; 421 const char *br_name; 422 } bge_revisions[] = { 423 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, 424 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, 425 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, 426 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, 427 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, 428 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, 429 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, 430 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, 431 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, 432 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, 433 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, 434 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, 435 { BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" }, 436 { BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" }, 437 { BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" }, 438 { BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" }, 439 { BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" }, 440 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, 441 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, 442 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, 443 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, 444 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, 445 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, 446 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, 447 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, 448 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, 449 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, 450 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, 451 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, 452 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, 453 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, 454 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, 455 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, 456 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, 457 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, 458 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, 459 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, 460 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, 461 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, 462 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, 463 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, 464 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, 465 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" }, 466 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" }, 467 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" }, 468 { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" }, 469 { BGE_CHIPID_BCM5720_A0, "BCM5720 A0" }, 470 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" }, 471 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" }, 472 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" }, 473 { BGE_CHIPID_BCM5755_C0, "BCM5755 C0" }, 474 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" }, 475 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" }, 476 { BGE_CHIPID_BCM5762_A0, "BCM5762 A0" }, 477 { BGE_CHIPID_BCM5762_B0, "BCM5762 B0" }, 478 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" }, 479 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" }, 480 { BGE_CHIPID_BCM5784_B0, "BCM5784 B0" }, 481 /* 5754 and 5787 share the same ASIC ID */ 482 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" }, 483 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" }, 484 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" }, 485 { BGE_CHIPID_BCM5906_A0, "BCM5906 A0" }, 486 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" }, 487 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" }, 488 { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" }, 489 { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" }, 490 { BGE_CHIPID_BCM57766_A0, "BCM57766 A0" }, 491 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" }, 492 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" }, 493 494 { 0, NULL } 495 }; 496 497 /* 498 * Some defaults for major revisions, so that newer steppings 499 * that we don't know about have a shot at working. 500 */ 501 static const struct bge_revision bge_majorrevs[] = { 502 { BGE_ASICREV_BCM5700, "unknown BCM5700" }, 503 { BGE_ASICREV_BCM5701, "unknown BCM5701" }, 504 { BGE_ASICREV_BCM5703, "unknown BCM5703" }, 505 { BGE_ASICREV_BCM5704, "unknown BCM5704" }, 506 { BGE_ASICREV_BCM5705, "unknown BCM5705" }, 507 { BGE_ASICREV_BCM5750, "unknown BCM5750" }, 508 { BGE_ASICREV_BCM5714, "unknown BCM5714" }, 509 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, 510 { BGE_ASICREV_BCM5752, "unknown BCM5752" }, 511 { BGE_ASICREV_BCM5780, "unknown BCM5780" }, 512 { BGE_ASICREV_BCM5755, "unknown BCM5755" }, 513 { BGE_ASICREV_BCM5761, "unknown BCM5761" }, 514 { BGE_ASICREV_BCM5784, "unknown BCM5784" }, 515 { BGE_ASICREV_BCM5785, "unknown BCM5785" }, 516 /* 5754 and 5787 share the same ASIC ID */ 517 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" }, 518 { BGE_ASICREV_BCM5906, "unknown BCM5906" }, 519 { BGE_ASICREV_BCM57765, "unknown BCM57765" }, 520 { BGE_ASICREV_BCM57766, "unknown BCM57766" }, 521 { BGE_ASICREV_BCM57780, "unknown BCM57780" }, 522 { BGE_ASICREV_BCM5717, "unknown BCM5717" }, 523 { BGE_ASICREV_BCM5719, "unknown BCM5719" }, 524 { BGE_ASICREV_BCM5720, "unknown BCM5720" }, 525 { BGE_ASICREV_BCM5762, "unknown BCM5762" }, 526 527 { 0, NULL } 528 }; 529 530 static int bge_allow_asf = 1; 531 532 CFATTACH_DECL3_NEW(bge, sizeof(struct bge_softc), 533 bge_probe, bge_attach, bge_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 534 535 static uint32_t 536 bge_readmem_ind(struct bge_softc *sc, int off) 537 { 538 pcireg_t val; 539 540 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 && 541 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) 542 return 0; 543 544 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off); 545 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA); 546 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0); 547 return val; 548 } 549 550 static void 551 bge_writemem_ind(struct bge_softc *sc, int off, int val) 552 { 553 554 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off); 555 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA, val); 556 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0); 557 } 558 559 /* 560 * PCI Express only 561 */ 562 static void 563 bge_set_max_readrq(struct bge_softc *sc) 564 { 565 pcireg_t val; 566 567 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap 568 + PCIE_DCSR); 569 val &= ~PCIE_DCSR_MAX_READ_REQ; 570 switch (sc->bge_expmrq) { 571 case 2048: 572 val |= BGE_PCIE_DEVCTL_MAX_READRQ_2048; 573 break; 574 case 4096: 575 val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096; 576 break; 577 default: 578 panic("incorrect expmrq value(%d)", sc->bge_expmrq); 579 break; 580 } 581 pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap 582 + PCIE_DCSR, val); 583 } 584 585 #ifdef notdef 586 static uint32_t 587 bge_readreg_ind(struct bge_softc *sc, int off) 588 { 589 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off); 590 return (pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA)); 591 } 592 #endif 593 594 static void 595 bge_writereg_ind(struct bge_softc *sc, int off, int val) 596 { 597 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off); 598 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA, val); 599 } 600 601 static void 602 bge_writemem_direct(struct bge_softc *sc, int off, int val) 603 { 604 CSR_WRITE_4(sc, off, val); 605 } 606 607 static void 608 bge_writembx(struct bge_softc *sc, int off, int val) 609 { 610 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 611 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; 612 613 CSR_WRITE_4(sc, off, val); 614 } 615 616 static void 617 bge_writembx_flush(struct bge_softc *sc, int off, int val) 618 { 619 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 620 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; 621 622 CSR_WRITE_4_FLUSH(sc, off, val); 623 } 624 625 /* 626 * Clear all stale locks and select the lock for this driver instance. 627 */ 628 void 629 bge_ape_lock_init(struct bge_softc *sc) 630 { 631 struct pci_attach_args *pa = &(sc->bge_pa); 632 uint32_t bit, regbase; 633 int i; 634 635 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 636 regbase = BGE_APE_LOCK_GRANT; 637 else 638 regbase = BGE_APE_PER_LOCK_GRANT; 639 640 /* Clear any stale locks. */ 641 for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) { 642 switch (i) { 643 case BGE_APE_LOCK_PHY0: 644 case BGE_APE_LOCK_PHY1: 645 case BGE_APE_LOCK_PHY2: 646 case BGE_APE_LOCK_PHY3: 647 bit = BGE_APE_LOCK_GRANT_DRIVER0; 648 break; 649 default: 650 if (pa->pa_function == 0) 651 bit = BGE_APE_LOCK_GRANT_DRIVER0; 652 else 653 bit = (1 << pa->pa_function); 654 } 655 APE_WRITE_4(sc, regbase + 4 * i, bit); 656 } 657 658 /* Select the PHY lock based on the device's function number. */ 659 switch (pa->pa_function) { 660 case 0: 661 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0; 662 break; 663 case 1: 664 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1; 665 break; 666 case 2: 667 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2; 668 break; 669 case 3: 670 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3; 671 break; 672 default: 673 printf("%s: PHY lock not supported on function\n", 674 device_xname(sc->bge_dev)); 675 break; 676 } 677 } 678 679 /* 680 * Check for APE firmware, set flags, and print version info. 681 */ 682 void 683 bge_ape_read_fw_ver(struct bge_softc *sc) 684 { 685 const char *fwtype; 686 uint32_t apedata, features; 687 688 /* Check for a valid APE signature in shared memory. */ 689 apedata = APE_READ_4(sc, BGE_APE_SEG_SIG); 690 if (apedata != BGE_APE_SEG_SIG_MAGIC) { 691 sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE; 692 return; 693 } 694 695 /* Check if APE firmware is running. */ 696 apedata = APE_READ_4(sc, BGE_APE_FW_STATUS); 697 if ((apedata & BGE_APE_FW_STATUS_READY) == 0) { 698 printf("%s: APE signature found but FW status not ready! " 699 "0x%08x\n", device_xname(sc->bge_dev), apedata); 700 return; 701 } 702 703 sc->bge_mfw_flags |= BGE_MFW_ON_APE; 704 705 /* Fetch the APE firwmare type and version. */ 706 apedata = APE_READ_4(sc, BGE_APE_FW_VERSION); 707 features = APE_READ_4(sc, BGE_APE_FW_FEATURES); 708 if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) { 709 sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI; 710 fwtype = "NCSI"; 711 } else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) { 712 sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH; 713 fwtype = "DASH"; 714 } else 715 fwtype = "UNKN"; 716 717 /* Print the APE firmware version. */ 718 aprint_normal_dev(sc->bge_dev, "APE firmware %s %d.%d.%d.%d\n", fwtype, 719 (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT, 720 (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT, 721 (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT, 722 (apedata & BGE_APE_FW_VERSION_BLDMSK)); 723 } 724 725 int 726 bge_ape_lock(struct bge_softc *sc, int locknum) 727 { 728 struct pci_attach_args *pa = &(sc->bge_pa); 729 uint32_t bit, gnt, req, status; 730 int i, off; 731 732 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 733 return (0); 734 735 /* Lock request/grant registers have different bases. */ 736 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) { 737 req = BGE_APE_LOCK_REQ; 738 gnt = BGE_APE_LOCK_GRANT; 739 } else { 740 req = BGE_APE_PER_LOCK_REQ; 741 gnt = BGE_APE_PER_LOCK_GRANT; 742 } 743 744 off = 4 * locknum; 745 746 switch (locknum) { 747 case BGE_APE_LOCK_GPIO: 748 /* Lock required when using GPIO. */ 749 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 750 return (0); 751 if (pa->pa_function == 0) 752 bit = BGE_APE_LOCK_REQ_DRIVER0; 753 else 754 bit = (1 << pa->pa_function); 755 break; 756 case BGE_APE_LOCK_GRC: 757 /* Lock required to reset the device. */ 758 if (pa->pa_function == 0) 759 bit = BGE_APE_LOCK_REQ_DRIVER0; 760 else 761 bit = (1 << pa->pa_function); 762 break; 763 case BGE_APE_LOCK_MEM: 764 /* Lock required when accessing certain APE memory. */ 765 if (pa->pa_function == 0) 766 bit = BGE_APE_LOCK_REQ_DRIVER0; 767 else 768 bit = (1 << pa->pa_function); 769 break; 770 case BGE_APE_LOCK_PHY0: 771 case BGE_APE_LOCK_PHY1: 772 case BGE_APE_LOCK_PHY2: 773 case BGE_APE_LOCK_PHY3: 774 /* Lock required when accessing PHYs. */ 775 bit = BGE_APE_LOCK_REQ_DRIVER0; 776 break; 777 default: 778 return (EINVAL); 779 } 780 781 /* Request a lock. */ 782 APE_WRITE_4_FLUSH(sc, req + off, bit); 783 784 /* Wait up to 1 second to acquire lock. */ 785 for (i = 0; i < 20000; i++) { 786 status = APE_READ_4(sc, gnt + off); 787 if (status == bit) 788 break; 789 DELAY(50); 790 } 791 792 /* Handle any errors. */ 793 if (status != bit) { 794 printf("%s: APE lock %d request failed! " 795 "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n", 796 device_xname(sc->bge_dev), 797 locknum, req + off, bit & 0xFFFF, gnt + off, 798 status & 0xFFFF); 799 /* Revoke the lock request. */ 800 APE_WRITE_4(sc, gnt + off, bit); 801 return (EBUSY); 802 } 803 804 return (0); 805 } 806 807 void 808 bge_ape_unlock(struct bge_softc *sc, int locknum) 809 { 810 struct pci_attach_args *pa = &(sc->bge_pa); 811 uint32_t bit, gnt; 812 int off; 813 814 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 815 return; 816 817 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 818 gnt = BGE_APE_LOCK_GRANT; 819 else 820 gnt = BGE_APE_PER_LOCK_GRANT; 821 822 off = 4 * locknum; 823 824 switch (locknum) { 825 case BGE_APE_LOCK_GPIO: 826 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 827 return; 828 if (pa->pa_function == 0) 829 bit = BGE_APE_LOCK_GRANT_DRIVER0; 830 else 831 bit = (1 << pa->pa_function); 832 break; 833 case BGE_APE_LOCK_GRC: 834 if (pa->pa_function == 0) 835 bit = BGE_APE_LOCK_GRANT_DRIVER0; 836 else 837 bit = (1 << pa->pa_function); 838 break; 839 case BGE_APE_LOCK_MEM: 840 if (pa->pa_function == 0) 841 bit = BGE_APE_LOCK_GRANT_DRIVER0; 842 else 843 bit = (1 << pa->pa_function); 844 break; 845 case BGE_APE_LOCK_PHY0: 846 case BGE_APE_LOCK_PHY1: 847 case BGE_APE_LOCK_PHY2: 848 case BGE_APE_LOCK_PHY3: 849 bit = BGE_APE_LOCK_GRANT_DRIVER0; 850 break; 851 default: 852 return; 853 } 854 855 /* Write and flush for consecutive bge_ape_lock() */ 856 APE_WRITE_4_FLUSH(sc, gnt + off, bit); 857 } 858 859 /* 860 * Send an event to the APE firmware. 861 */ 862 void 863 bge_ape_send_event(struct bge_softc *sc, uint32_t event) 864 { 865 uint32_t apedata; 866 int i; 867 868 /* NCSI does not support APE events. */ 869 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 870 return; 871 872 /* Wait up to 1ms for APE to service previous event. */ 873 for (i = 10; i > 0; i--) { 874 if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0) 875 break; 876 apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS); 877 if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) { 878 APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event | 879 BGE_APE_EVENT_STATUS_EVENT_PENDING); 880 bge_ape_unlock(sc, BGE_APE_LOCK_MEM); 881 APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1); 882 break; 883 } 884 bge_ape_unlock(sc, BGE_APE_LOCK_MEM); 885 DELAY(100); 886 } 887 if (i == 0) { 888 printf("%s: APE event 0x%08x send timed out\n", 889 device_xname(sc->bge_dev), event); 890 } 891 } 892 893 void 894 bge_ape_driver_state_change(struct bge_softc *sc, int kind) 895 { 896 uint32_t apedata, event; 897 898 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 899 return; 900 901 switch (kind) { 902 case BGE_RESET_START: 903 /* If this is the first load, clear the load counter. */ 904 apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG); 905 if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC) 906 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0); 907 else { 908 apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT); 909 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata); 910 } 911 APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG, 912 BGE_APE_HOST_SEG_SIG_MAGIC); 913 APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN, 914 BGE_APE_HOST_SEG_LEN_MAGIC); 915 916 /* Add some version info if bge(4) supports it. */ 917 APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID, 918 BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0)); 919 APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR, 920 BGE_APE_HOST_BEHAV_NO_PHYLOCK); 921 APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS, 922 BGE_APE_HOST_HEARTBEAT_INT_DISABLE); 923 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, 924 BGE_APE_HOST_DRVR_STATE_START); 925 event = BGE_APE_EVENT_STATUS_STATE_START; 926 break; 927 case BGE_RESET_SHUTDOWN: 928 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, 929 BGE_APE_HOST_DRVR_STATE_UNLOAD); 930 event = BGE_APE_EVENT_STATUS_STATE_UNLOAD; 931 break; 932 case BGE_RESET_SUSPEND: 933 event = BGE_APE_EVENT_STATUS_STATE_SUSPEND; 934 break; 935 default: 936 return; 937 } 938 939 bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT | 940 BGE_APE_EVENT_STATUS_STATE_CHNGE); 941 } 942 943 static uint8_t 944 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 945 { 946 uint32_t access, byte = 0; 947 int i; 948 949 /* Lock. */ 950 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 951 for (i = 0; i < 8000; i++) { 952 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) 953 break; 954 DELAY(20); 955 } 956 if (i == 8000) 957 return 1; 958 959 /* Enable access. */ 960 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); 961 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); 962 963 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); 964 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); 965 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 966 DELAY(10); 967 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { 968 DELAY(10); 969 break; 970 } 971 } 972 973 if (i == BGE_TIMEOUT * 10) { 974 aprint_error_dev(sc->bge_dev, "nvram read timed out\n"); 975 return 1; 976 } 977 978 /* Get result. */ 979 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); 980 981 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; 982 983 /* Disable access. */ 984 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); 985 986 /* Unlock. */ 987 CSR_WRITE_4_FLUSH(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); 988 989 return 0; 990 } 991 992 /* 993 * Read a sequence of bytes from NVRAM. 994 */ 995 static int 996 bge_read_nvram(struct bge_softc *sc, uint8_t *dest, int off, int cnt) 997 { 998 int error = 0, i; 999 uint8_t byte = 0; 1000 1001 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906) 1002 return 1; 1003 1004 for (i = 0; i < cnt; i++) { 1005 error = bge_nvram_getbyte(sc, off + i, &byte); 1006 if (error) 1007 break; 1008 *(dest + i) = byte; 1009 } 1010 1011 return (error ? 1 : 0); 1012 } 1013 1014 /* 1015 * Read a byte of data stored in the EEPROM at address 'addr.' The 1016 * BCM570x supports both the traditional bitbang interface and an 1017 * auto access interface for reading the EEPROM. We use the auto 1018 * access method. 1019 */ 1020 static uint8_t 1021 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 1022 { 1023 int i; 1024 uint32_t byte = 0; 1025 1026 /* 1027 * Enable use of auto EEPROM access so we can avoid 1028 * having to use the bitbang method. 1029 */ 1030 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 1031 1032 /* Reset the EEPROM, load the clock period. */ 1033 CSR_WRITE_4(sc, BGE_EE_ADDR, 1034 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 1035 DELAY(20); 1036 1037 /* Issue the read EEPROM command. */ 1038 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 1039 1040 /* Wait for completion */ 1041 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 1042 DELAY(10); 1043 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 1044 break; 1045 } 1046 1047 if (i == BGE_TIMEOUT * 10) { 1048 aprint_error_dev(sc->bge_dev, "eeprom read timed out\n"); 1049 return 1; 1050 } 1051 1052 /* Get result. */ 1053 byte = CSR_READ_4(sc, BGE_EE_DATA); 1054 1055 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 1056 1057 return 0; 1058 } 1059 1060 /* 1061 * Read a sequence of bytes from the EEPROM. 1062 */ 1063 static int 1064 bge_read_eeprom(struct bge_softc *sc, void *destv, int off, int cnt) 1065 { 1066 int error = 0, i; 1067 uint8_t byte = 0; 1068 char *dest = destv; 1069 1070 for (i = 0; i < cnt; i++) { 1071 error = bge_eeprom_getbyte(sc, off + i, &byte); 1072 if (error) 1073 break; 1074 *(dest + i) = byte; 1075 } 1076 1077 return (error ? 1 : 0); 1078 } 1079 1080 static int 1081 bge_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) 1082 { 1083 struct bge_softc *sc = device_private(dev); 1084 uint32_t data; 1085 uint32_t autopoll; 1086 int rv = 0; 1087 int i; 1088 1089 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) 1090 return -1; 1091 1092 /* Reading with autopolling on may trigger PCI errors */ 1093 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 1094 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1095 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL); 1096 BGE_CLRBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1097 DELAY(80); 1098 } 1099 1100 CSR_WRITE_4_FLUSH(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | 1101 BGE_MIPHY(phy) | BGE_MIREG(reg)); 1102 1103 for (i = 0; i < BGE_TIMEOUT; i++) { 1104 delay(10); 1105 data = CSR_READ_4(sc, BGE_MI_COMM); 1106 if (!(data & BGE_MICOMM_BUSY)) { 1107 DELAY(5); 1108 data = CSR_READ_4(sc, BGE_MI_COMM); 1109 break; 1110 } 1111 } 1112 1113 if (i == BGE_TIMEOUT) { 1114 aprint_error_dev(sc->bge_dev, "PHY read timed out\n"); 1115 rv = ETIMEDOUT; 1116 } else if ((data & BGE_MICOMM_READFAIL) != 0) 1117 rv = -1; 1118 else 1119 *val = data & BGE_MICOMM_DATA; 1120 1121 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1122 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 1123 BGE_SETBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1124 DELAY(80); 1125 } 1126 1127 bge_ape_unlock(sc, sc->bge_phy_ape_lock); 1128 1129 return rv; 1130 } 1131 1132 static int 1133 bge_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) 1134 { 1135 struct bge_softc *sc = device_private(dev); 1136 uint32_t autopoll; 1137 int i; 1138 1139 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 && 1140 (reg == MII_GTCR || reg == BRGPHY_MII_AUXCTL)) 1141 return 0; 1142 1143 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) 1144 return -1; 1145 1146 /* Reading with autopolling on may trigger PCI errors */ 1147 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 1148 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1149 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL); 1150 BGE_CLRBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1151 DELAY(80); 1152 } 1153 1154 CSR_WRITE_4_FLUSH(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | 1155 BGE_MIPHY(phy) | BGE_MIREG(reg) | val); 1156 1157 for (i = 0; i < BGE_TIMEOUT; i++) { 1158 delay(10); 1159 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { 1160 delay(5); 1161 CSR_READ_4(sc, BGE_MI_COMM); 1162 break; 1163 } 1164 } 1165 1166 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1167 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 1168 BGE_SETBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1169 delay(80); 1170 } 1171 1172 bge_ape_unlock(sc, sc->bge_phy_ape_lock); 1173 1174 if (i == BGE_TIMEOUT) { 1175 aprint_error_dev(sc->bge_dev, "PHY read timed out\n"); 1176 return ETIMEDOUT; 1177 } 1178 1179 return 0; 1180 } 1181 1182 static void 1183 bge_miibus_statchg(struct ifnet *ifp) 1184 { 1185 struct bge_softc *sc = ifp->if_softc; 1186 struct mii_data *mii = &sc->bge_mii; 1187 uint32_t mac_mode, rx_mode, tx_mode; 1188 1189 /* 1190 * Get flow control negotiation result. 1191 */ 1192 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1193 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) 1194 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1195 1196 if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 1197 mii->mii_media_status & IFM_ACTIVE && 1198 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 1199 BGE_STS_SETBIT(sc, BGE_STS_LINK); 1200 else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 1201 (!(mii->mii_media_status & IFM_ACTIVE) || 1202 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 1203 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 1204 1205 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) 1206 return; 1207 1208 /* Set the port mode (MII/GMII) to match the link speed. */ 1209 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & 1210 ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX); 1211 tx_mode = CSR_READ_4(sc, BGE_TX_MODE); 1212 rx_mode = CSR_READ_4(sc, BGE_RX_MODE); 1213 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 1214 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 1215 mac_mode |= BGE_PORTMODE_GMII; 1216 else 1217 mac_mode |= BGE_PORTMODE_MII; 1218 1219 tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE; 1220 rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE; 1221 if ((mii->mii_media_active & IFM_FDX) != 0) { 1222 if (sc->bge_flowflags & IFM_ETH_TXPAUSE) 1223 tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE; 1224 if (sc->bge_flowflags & IFM_ETH_RXPAUSE) 1225 rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE; 1226 } else 1227 mac_mode |= BGE_MACMODE_HALF_DUPLEX; 1228 1229 CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, mac_mode); 1230 DELAY(40); 1231 CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode); 1232 CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode); 1233 } 1234 1235 /* 1236 * Update rx threshold levels to values in a particular slot 1237 * of the interrupt-mitigation table bge_rx_threshes. 1238 */ 1239 static void 1240 bge_set_thresh(struct ifnet *ifp, int lvl) 1241 { 1242 struct bge_softc *sc = ifp->if_softc; 1243 int s; 1244 1245 /* For now, just save the new Rx-intr thresholds and record 1246 * that a threshold update is pending. Updating the hardware 1247 * registers here (even at splhigh()) is observed to 1248 * occasionaly cause glitches where Rx-interrupts are not 1249 * honoured for up to 10 seconds. jonathan@NetBSD.org, 2003-04-05 1250 */ 1251 s = splnet(); 1252 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks; 1253 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds; 1254 sc->bge_pending_rxintr_change = 1; 1255 splx(s); 1256 } 1257 1258 1259 /* 1260 * Update Rx thresholds of all bge devices 1261 */ 1262 static void 1263 bge_update_all_threshes(int lvl) 1264 { 1265 struct ifnet *ifp; 1266 const char * const namebuf = "bge"; 1267 int namelen; 1268 int s; 1269 1270 if (lvl < 0) 1271 lvl = 0; 1272 else if (lvl >= NBGE_RX_THRESH) 1273 lvl = NBGE_RX_THRESH - 1; 1274 1275 namelen = strlen(namebuf); 1276 /* 1277 * Now search all the interfaces for this name/number 1278 */ 1279 s = pserialize_read_enter(); 1280 IFNET_READER_FOREACH(ifp) { 1281 if (strncmp(ifp->if_xname, namebuf, namelen) != 0) 1282 continue; 1283 /* We got a match: update if doing auto-threshold-tuning */ 1284 if (bge_auto_thresh) 1285 bge_set_thresh(ifp, lvl); 1286 } 1287 pserialize_read_exit(s); 1288 } 1289 1290 /* 1291 * Handle events that have triggered interrupts. 1292 */ 1293 static void 1294 bge_handle_events(struct bge_softc *sc) 1295 { 1296 1297 return; 1298 } 1299 1300 /* 1301 * Memory management for jumbo frames. 1302 */ 1303 1304 static int 1305 bge_alloc_jumbo_mem(struct bge_softc *sc) 1306 { 1307 char *ptr, *kva; 1308 bus_dma_segment_t seg; 1309 int i, rseg, state, error; 1310 struct bge_jpool_entry *entry; 1311 1312 state = error = 0; 1313 1314 /* Grab a big chunk o' storage. */ 1315 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0, 1316 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 1317 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n"); 1318 return ENOBUFS; 1319 } 1320 1321 state = 1; 1322 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, (void **)&kva, 1323 BUS_DMA_NOWAIT)) { 1324 aprint_error_dev(sc->bge_dev, 1325 "can't map DMA buffers (%d bytes)\n", (int)BGE_JMEM); 1326 error = ENOBUFS; 1327 goto out; 1328 } 1329 1330 state = 2; 1331 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0, 1332 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) { 1333 aprint_error_dev(sc->bge_dev, "can't create DMA map\n"); 1334 error = ENOBUFS; 1335 goto out; 1336 } 1337 1338 state = 3; 1339 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 1340 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) { 1341 aprint_error_dev(sc->bge_dev, "can't load DMA map\n"); 1342 error = ENOBUFS; 1343 goto out; 1344 } 1345 1346 state = 4; 1347 sc->bge_cdata.bge_jumbo_buf = (void *)kva; 1348 DPRINTFN(1,("bge_jumbo_buf = %p\n", sc->bge_cdata.bge_jumbo_buf)); 1349 1350 SLIST_INIT(&sc->bge_jfree_listhead); 1351 SLIST_INIT(&sc->bge_jinuse_listhead); 1352 1353 /* 1354 * Now divide it up into 9K pieces and save the addresses 1355 * in an array. 1356 */ 1357 ptr = sc->bge_cdata.bge_jumbo_buf; 1358 for (i = 0; i < BGE_JSLOTS; i++) { 1359 sc->bge_cdata.bge_jslots[i] = ptr; 1360 ptr += BGE_JLEN; 1361 entry = malloc(sizeof(struct bge_jpool_entry), 1362 M_DEVBUF, M_NOWAIT); 1363 if (entry == NULL) { 1364 aprint_error_dev(sc->bge_dev, 1365 "no memory for jumbo buffer queue!\n"); 1366 error = ENOBUFS; 1367 goto out; 1368 } 1369 entry->slot = i; 1370 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 1371 entry, jpool_entries); 1372 } 1373 out: 1374 if (error != 0) { 1375 switch (state) { 1376 case 4: 1377 bus_dmamap_unload(sc->bge_dmatag, 1378 sc->bge_cdata.bge_rx_jumbo_map); 1379 /* FALLTHROUGH */ 1380 case 3: 1381 bus_dmamap_destroy(sc->bge_dmatag, 1382 sc->bge_cdata.bge_rx_jumbo_map); 1383 /* FALLTHROUGH */ 1384 case 2: 1385 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 1386 /* FALLTHROUGH */ 1387 case 1: 1388 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 1389 break; 1390 default: 1391 break; 1392 } 1393 } 1394 1395 return error; 1396 } 1397 1398 /* 1399 * Allocate a jumbo buffer. 1400 */ 1401 static void * 1402 bge_jalloc(struct bge_softc *sc) 1403 { 1404 struct bge_jpool_entry *entry; 1405 1406 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 1407 1408 if (entry == NULL) { 1409 aprint_error_dev(sc->bge_dev, "no free jumbo buffers\n"); 1410 return NULL; 1411 } 1412 1413 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 1414 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 1415 return (sc->bge_cdata.bge_jslots[entry->slot]); 1416 } 1417 1418 /* 1419 * Release a jumbo buffer. 1420 */ 1421 static void 1422 bge_jfree(struct mbuf *m, void *buf, size_t size, void *arg) 1423 { 1424 struct bge_jpool_entry *entry; 1425 struct bge_softc *sc; 1426 int i, s; 1427 1428 /* Extract the softc struct pointer. */ 1429 sc = (struct bge_softc *)arg; 1430 1431 if (sc == NULL) 1432 panic("bge_jfree: can't find softc pointer!"); 1433 1434 /* calculate the slot this buffer belongs to */ 1435 1436 i = ((char *)buf 1437 - (char *)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 1438 1439 if ((i < 0) || (i >= BGE_JSLOTS)) 1440 panic("bge_jfree: asked to free buffer that we don't manage!"); 1441 1442 s = splvm(); 1443 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 1444 if (entry == NULL) 1445 panic("bge_jfree: buffer not in use!"); 1446 entry->slot = i; 1447 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 1448 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 1449 1450 if (__predict_true(m != NULL)) 1451 pool_cache_put(mb_cache, m); 1452 splx(s); 1453 } 1454 1455 1456 /* 1457 * Initialize a standard receive ring descriptor. 1458 */ 1459 static int 1460 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m, 1461 bus_dmamap_t dmamap) 1462 { 1463 struct mbuf *m_new = NULL; 1464 struct bge_rx_bd *r; 1465 int error; 1466 1467 if (dmamap == NULL) 1468 dmamap = sc->bge_cdata.bge_rx_std_map[i]; 1469 1470 if (dmamap == NULL) { 1471 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, 1472 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap); 1473 if (error != 0) 1474 return error; 1475 } 1476 1477 sc->bge_cdata.bge_rx_std_map[i] = dmamap; 1478 1479 if (m == NULL) { 1480 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1481 if (m_new == NULL) 1482 return ENOBUFS; 1483 1484 MCLGET(m_new, M_DONTWAIT); 1485 if (!(m_new->m_flags & M_EXT)) { 1486 m_freem(m_new); 1487 return ENOBUFS; 1488 } 1489 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1490 1491 } else { 1492 m_new = m; 1493 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1494 m_new->m_data = m_new->m_ext.ext_buf; 1495 } 1496 if (!(sc->bge_flags & BGEF_RX_ALIGNBUG)) 1497 m_adj(m_new, ETHER_ALIGN); 1498 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new, 1499 BUS_DMA_READ | BUS_DMA_NOWAIT)) { 1500 m_freem(m_new); 1501 return ENOBUFS; 1502 } 1503 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 1504 BUS_DMASYNC_PREREAD); 1505 1506 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 1507 r = &sc->bge_rdata->bge_rx_std_ring[i]; 1508 BGE_HOSTADDR(r->bge_addr, dmamap->dm_segs[0].ds_addr); 1509 r->bge_flags = BGE_RXBDFLAG_END; 1510 r->bge_len = m_new->m_len; 1511 r->bge_idx = i; 1512 1513 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1514 offsetof(struct bge_ring_data, bge_rx_std_ring) + 1515 i * sizeof (struct bge_rx_bd), 1516 sizeof (struct bge_rx_bd), 1517 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1518 1519 return 0; 1520 } 1521 1522 /* 1523 * Initialize a jumbo receive ring descriptor. This allocates 1524 * a jumbo buffer from the pool managed internally by the driver. 1525 */ 1526 static int 1527 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m) 1528 { 1529 struct mbuf *m_new = NULL; 1530 struct bge_rx_bd *r; 1531 void *buf = NULL; 1532 1533 if (m == NULL) { 1534 1535 /* Allocate the mbuf. */ 1536 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1537 if (m_new == NULL) 1538 return ENOBUFS; 1539 1540 /* Allocate the jumbo buffer */ 1541 buf = bge_jalloc(sc); 1542 if (buf == NULL) { 1543 m_freem(m_new); 1544 aprint_error_dev(sc->bge_dev, 1545 "jumbo allocation failed -- packet dropped!\n"); 1546 return ENOBUFS; 1547 } 1548 1549 /* Attach the buffer to the mbuf. */ 1550 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 1551 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF, 1552 bge_jfree, sc); 1553 m_new->m_flags |= M_EXT_RW; 1554 } else { 1555 m_new = m; 1556 buf = m_new->m_data = m_new->m_ext.ext_buf; 1557 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 1558 } 1559 if (!(sc->bge_flags & BGEF_RX_ALIGNBUG)) 1560 m_adj(m_new, ETHER_ALIGN); 1561 bus_dmamap_sync(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 1562 mtod(m_new, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, 1563 BGE_JLEN, BUS_DMASYNC_PREREAD); 1564 /* Set up the descriptor. */ 1565 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 1566 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 1567 BGE_HOSTADDR(r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new)); 1568 r->bge_flags = BGE_RXBDFLAG_END | BGE_RXBDFLAG_JUMBO_RING; 1569 r->bge_len = m_new->m_len; 1570 r->bge_idx = i; 1571 1572 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1573 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 1574 i * sizeof (struct bge_rx_bd), 1575 sizeof (struct bge_rx_bd), 1576 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1577 1578 return 0; 1579 } 1580 1581 /* 1582 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 1583 * that's 1MB or memory, which is a lot. For now, we fill only the first 1584 * 256 ring entries and hope that our CPU is fast enough to keep up with 1585 * the NIC. 1586 */ 1587 static int 1588 bge_init_rx_ring_std(struct bge_softc *sc) 1589 { 1590 int i; 1591 1592 if (sc->bge_flags & BGEF_RXRING_VALID) 1593 return 0; 1594 1595 for (i = 0; i < BGE_SSLOTS; i++) { 1596 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 1597 return ENOBUFS; 1598 } 1599 1600 sc->bge_std = i - 1; 1601 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 1602 1603 sc->bge_flags |= BGEF_RXRING_VALID; 1604 1605 return 0; 1606 } 1607 1608 static void 1609 bge_free_rx_ring_std(struct bge_softc *sc, bool disable) 1610 { 1611 int i; 1612 1613 if (!(sc->bge_flags & BGEF_RXRING_VALID)) 1614 return; 1615 1616 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1617 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 1618 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 1619 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 1620 if (disable) { 1621 bus_dmamap_destroy(sc->bge_dmatag, 1622 sc->bge_cdata.bge_rx_std_map[i]); 1623 sc->bge_cdata.bge_rx_std_map[i] = NULL; 1624 } 1625 } 1626 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0, 1627 sizeof(struct bge_rx_bd)); 1628 } 1629 1630 sc->bge_flags &= ~BGEF_RXRING_VALID; 1631 } 1632 1633 static int 1634 bge_init_rx_ring_jumbo(struct bge_softc *sc) 1635 { 1636 int i; 1637 volatile struct bge_rcb *rcb; 1638 1639 if (sc->bge_flags & BGEF_JUMBO_RXRING_VALID) 1640 return 0; 1641 1642 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1643 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 1644 return ENOBUFS; 1645 } 1646 1647 sc->bge_jumbo = i - 1; 1648 sc->bge_flags |= BGEF_JUMBO_RXRING_VALID; 1649 1650 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1651 rcb->bge_maxlen_flags = 0; 1652 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1653 1654 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1655 1656 return 0; 1657 } 1658 1659 static void 1660 bge_free_rx_ring_jumbo(struct bge_softc *sc) 1661 { 1662 int i; 1663 1664 if (!(sc->bge_flags & BGEF_JUMBO_RXRING_VALID)) 1665 return; 1666 1667 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1668 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 1669 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 1670 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 1671 } 1672 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0, 1673 sizeof(struct bge_rx_bd)); 1674 } 1675 1676 sc->bge_flags &= ~BGEF_JUMBO_RXRING_VALID; 1677 } 1678 1679 static void 1680 bge_free_tx_ring(struct bge_softc *sc, bool disable) 1681 { 1682 int i; 1683 struct txdmamap_pool_entry *dma; 1684 1685 if (!(sc->bge_flags & BGEF_TXRING_VALID)) 1686 return; 1687 1688 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1689 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 1690 m_freem(sc->bge_cdata.bge_tx_chain[i]); 1691 sc->bge_cdata.bge_tx_chain[i] = NULL; 1692 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i], 1693 link); 1694 sc->txdma[i] = 0; 1695 } 1696 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0, 1697 sizeof(struct bge_tx_bd)); 1698 } 1699 1700 if (disable) { 1701 while ((dma = SLIST_FIRST(&sc->txdma_list))) { 1702 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 1703 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap); 1704 if (sc->bge_dma64) { 1705 bus_dmamap_destroy(sc->bge_dmatag32, 1706 dma->dmamap32); 1707 } 1708 free(dma, M_DEVBUF); 1709 } 1710 SLIST_INIT(&sc->txdma_list); 1711 } 1712 1713 sc->bge_flags &= ~BGEF_TXRING_VALID; 1714 } 1715 1716 static int 1717 bge_init_tx_ring(struct bge_softc *sc) 1718 { 1719 struct ifnet *ifp = &sc->ethercom.ec_if; 1720 int i; 1721 bus_dmamap_t dmamap, dmamap32; 1722 bus_size_t maxsegsz; 1723 struct txdmamap_pool_entry *dma; 1724 1725 if (sc->bge_flags & BGEF_TXRING_VALID) 1726 return 0; 1727 1728 sc->bge_txcnt = 0; 1729 sc->bge_tx_saved_considx = 0; 1730 1731 /* Initialize transmit producer index for host-memory send ring. */ 1732 sc->bge_tx_prodidx = 0; 1733 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1734 /* 5700 b2 errata */ 1735 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 1736 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1737 1738 /* NIC-memory send ring not used; initialize to zero. */ 1739 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1740 /* 5700 b2 errata */ 1741 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 1742 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1743 1744 /* Limit DMA segment size for some chips */ 1745 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) && 1746 (ifp->if_mtu <= ETHERMTU)) 1747 maxsegsz = 2048; 1748 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) 1749 maxsegsz = 4096; 1750 else 1751 maxsegsz = ETHER_MAX_LEN_JUMBO; 1752 1753 if (SLIST_FIRST(&sc->txdma_list) != NULL) 1754 goto alloc_done; 1755 1756 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1757 if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX, 1758 BGE_NTXSEG, maxsegsz, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1759 &dmamap)) 1760 return ENOBUFS; 1761 if (dmamap == NULL) 1762 panic("dmamap NULL in bge_init_tx_ring"); 1763 if (sc->bge_dma64) { 1764 if (bus_dmamap_create(sc->bge_dmatag32, BGE_TXDMA_MAX, 1765 BGE_NTXSEG, maxsegsz, 0, 1766 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1767 &dmamap32)) { 1768 bus_dmamap_destroy(sc->bge_dmatag, dmamap); 1769 return ENOBUFS; 1770 } 1771 if (dmamap32 == NULL) 1772 panic("dmamap32 NULL in bge_init_tx_ring"); 1773 } else 1774 dmamap32 = dmamap; 1775 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT); 1776 if (dma == NULL) { 1777 aprint_error_dev(sc->bge_dev, 1778 "can't alloc txdmamap_pool_entry\n"); 1779 bus_dmamap_destroy(sc->bge_dmatag, dmamap); 1780 if (sc->bge_dma64) 1781 bus_dmamap_destroy(sc->bge_dmatag32, dmamap32); 1782 return ENOMEM; 1783 } 1784 dma->dmamap = dmamap; 1785 dma->dmamap32 = dmamap32; 1786 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 1787 } 1788 alloc_done: 1789 sc->bge_flags |= BGEF_TXRING_VALID; 1790 1791 return 0; 1792 } 1793 1794 static void 1795 bge_setmulti(struct bge_softc *sc) 1796 { 1797 struct ethercom *ec = &sc->ethercom; 1798 struct ifnet *ifp = &ec->ec_if; 1799 struct ether_multi *enm; 1800 struct ether_multistep step; 1801 uint32_t hashes[4] = { 0, 0, 0, 0 }; 1802 uint32_t h; 1803 int i; 1804 1805 if (ifp->if_flags & IFF_PROMISC) 1806 goto allmulti; 1807 1808 /* Now program new ones. */ 1809 ETHER_LOCK(ec); 1810 ETHER_FIRST_MULTI(step, ec, enm); 1811 while (enm != NULL) { 1812 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1813 /* 1814 * We must listen to a range of multicast addresses. 1815 * For now, just accept all multicasts, rather than 1816 * trying to set only those filter bits needed to match 1817 * the range. (At this time, the only use of address 1818 * ranges is for IP multicast routing, for which the 1819 * range is big enough to require all bits set.) 1820 */ 1821 ETHER_UNLOCK(ec); 1822 goto allmulti; 1823 } 1824 1825 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1826 1827 /* Just want the 7 least-significant bits. */ 1828 h &= 0x7f; 1829 1830 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1831 ETHER_NEXT_MULTI(step, enm); 1832 } 1833 ETHER_UNLOCK(ec); 1834 1835 ifp->if_flags &= ~IFF_ALLMULTI; 1836 goto setit; 1837 1838 allmulti: 1839 ifp->if_flags |= IFF_ALLMULTI; 1840 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff; 1841 1842 setit: 1843 for (i = 0; i < 4; i++) 1844 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1845 } 1846 1847 static void 1848 bge_sig_pre_reset(struct bge_softc *sc, int type) 1849 { 1850 1851 /* 1852 * Some chips don't like this so only do this if ASF is enabled 1853 */ 1854 if (sc->bge_asf_mode) 1855 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); 1856 1857 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1858 switch (type) { 1859 case BGE_RESET_START: 1860 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1861 BGE_FW_DRV_STATE_START); 1862 break; 1863 case BGE_RESET_SHUTDOWN: 1864 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1865 BGE_FW_DRV_STATE_UNLOAD); 1866 break; 1867 case BGE_RESET_SUSPEND: 1868 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1869 BGE_FW_DRV_STATE_SUSPEND); 1870 break; 1871 } 1872 } 1873 1874 if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND) 1875 bge_ape_driver_state_change(sc, type); 1876 } 1877 1878 static void 1879 bge_sig_post_reset(struct bge_softc *sc, int type) 1880 { 1881 1882 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1883 switch (type) { 1884 case BGE_RESET_START: 1885 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1886 BGE_FW_DRV_STATE_START_DONE); 1887 /* START DONE */ 1888 break; 1889 case BGE_RESET_SHUTDOWN: 1890 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1891 BGE_FW_DRV_STATE_UNLOAD_DONE); 1892 break; 1893 } 1894 } 1895 1896 if (type == BGE_RESET_SHUTDOWN) 1897 bge_ape_driver_state_change(sc, type); 1898 } 1899 1900 static void 1901 bge_sig_legacy(struct bge_softc *sc, int type) 1902 { 1903 1904 if (sc->bge_asf_mode) { 1905 switch (type) { 1906 case BGE_RESET_START: 1907 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1908 BGE_FW_DRV_STATE_START); 1909 break; 1910 case BGE_RESET_SHUTDOWN: 1911 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 1912 BGE_FW_DRV_STATE_UNLOAD); 1913 break; 1914 } 1915 } 1916 } 1917 1918 static void 1919 bge_wait_for_event_ack(struct bge_softc *sc) 1920 { 1921 int i; 1922 1923 /* wait up to 2500usec */ 1924 for (i = 0; i < 250; i++) { 1925 if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) & 1926 BGE_RX_CPU_DRV_EVENT)) 1927 break; 1928 DELAY(10); 1929 } 1930 } 1931 1932 static void 1933 bge_stop_fw(struct bge_softc *sc) 1934 { 1935 1936 if (sc->bge_asf_mode) { 1937 bge_wait_for_event_ack(sc); 1938 1939 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE); 1940 CSR_WRITE_4_FLUSH(sc, BGE_RX_CPU_EVENT, 1941 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT); 1942 1943 bge_wait_for_event_ack(sc); 1944 } 1945 } 1946 1947 static int 1948 bge_poll_fw(struct bge_softc *sc) 1949 { 1950 uint32_t val; 1951 int i; 1952 1953 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 1954 for (i = 0; i < BGE_TIMEOUT; i++) { 1955 val = CSR_READ_4(sc, BGE_VCPU_STATUS); 1956 if (val & BGE_VCPU_STATUS_INIT_DONE) 1957 break; 1958 DELAY(100); 1959 } 1960 if (i >= BGE_TIMEOUT) { 1961 aprint_error_dev(sc->bge_dev, "reset timed out\n"); 1962 return -1; 1963 } 1964 } else { 1965 /* 1966 * Poll the value location we just wrote until 1967 * we see the 1's complement of the magic number. 1968 * This indicates that the firmware initialization 1969 * is complete. 1970 * XXX 1000ms for Flash and 10000ms for SEEPROM. 1971 */ 1972 for (i = 0; i < BGE_TIMEOUT; i++) { 1973 val = bge_readmem_ind(sc, BGE_SRAM_FW_MB); 1974 if (val == ~BGE_SRAM_FW_MB_MAGIC) 1975 break; 1976 DELAY(10); 1977 } 1978 1979 if ((i >= BGE_TIMEOUT) 1980 && ((sc->bge_flags & BGEF_NO_EEPROM) == 0)) { 1981 aprint_error_dev(sc->bge_dev, 1982 "firmware handshake timed out, val = %x\n", val); 1983 return -1; 1984 } 1985 } 1986 1987 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) { 1988 /* tg3 says we have to wait extra time */ 1989 delay(10 * 1000); 1990 } 1991 1992 return 0; 1993 } 1994 1995 int 1996 bge_phy_addr(struct bge_softc *sc) 1997 { 1998 struct pci_attach_args *pa = &(sc->bge_pa); 1999 int phy_addr = 1; 2000 2001 /* 2002 * PHY address mapping for various devices. 2003 * 2004 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr | 2005 * ---------+-------+-------+-------+-------+ 2006 * BCM57XX | 1 | X | X | X | 2007 * BCM5704 | 1 | X | 1 | X | 2008 * BCM5717 | 1 | 8 | 2 | 9 | 2009 * BCM5719 | 1 | 8 | 2 | 9 | 2010 * BCM5720 | 1 | 8 | 2 | 9 | 2011 * 2012 * | F2 Cu | F2 Sr | F3 Cu | F3 Sr | 2013 * ---------+-------+-------+-------+-------+ 2014 * BCM57XX | X | X | X | X | 2015 * BCM5704 | X | X | X | X | 2016 * BCM5717 | X | X | X | X | 2017 * BCM5719 | 3 | 10 | 4 | 11 | 2018 * BCM5720 | X | X | X | X | 2019 * 2020 * Other addresses may respond but they are not 2021 * IEEE compliant PHYs and should be ignored. 2022 */ 2023 switch (BGE_ASICREV(sc->bge_chipid)) { 2024 case BGE_ASICREV_BCM5717: 2025 case BGE_ASICREV_BCM5719: 2026 case BGE_ASICREV_BCM5720: 2027 phy_addr = pa->pa_function; 2028 if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) { 2029 phy_addr += (CSR_READ_4(sc, BGE_SGDIG_STS) & 2030 BGE_SGDIGSTS_IS_SERDES) ? 8 : 1; 2031 } else { 2032 phy_addr += (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) & 2033 BGE_CPMU_PHY_STRAP_IS_SERDES) ? 8 : 1; 2034 } 2035 } 2036 2037 return phy_addr; 2038 } 2039 2040 /* 2041 * Do endian, PCI and DMA initialization. Also check the on-board ROM 2042 * self-test results. 2043 */ 2044 static int 2045 bge_chipinit(struct bge_softc *sc) 2046 { 2047 uint32_t dma_rw_ctl, misc_ctl, mode_ctl, reg; 2048 int i; 2049 2050 /* Set endianness before we access any non-PCI registers. */ 2051 misc_ctl = BGE_INIT; 2052 if (sc->bge_flags & BGEF_TAGGED_STATUS) 2053 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS; 2054 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 2055 misc_ctl); 2056 2057 /* 2058 * Clear the MAC statistics block in the NIC's 2059 * internal memory. 2060 */ 2061 for (i = BGE_STATS_BLOCK; 2062 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 2063 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0); 2064 2065 for (i = BGE_STATUS_BLOCK; 2066 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 2067 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0); 2068 2069 /* 5717 workaround from tg3 */ 2070 if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) { 2071 /* Save */ 2072 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 2073 2074 /* Temporary modify MODE_CTL to control TLP */ 2075 reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK; 2076 CSR_WRITE_4(sc, BGE_MODE_CTL, reg | BGE_MODECTL_PCIE_TLPADDR1); 2077 2078 /* Control TLP */ 2079 reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG + 2080 BGE_TLP_PHYCTL1); 2081 CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_PHYCTL1, 2082 reg | BGE_TLP_PHYCTL1_EN_L1PLLPD); 2083 2084 /* Restore */ 2085 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 2086 } 2087 2088 if (BGE_IS_57765_FAMILY(sc)) { 2089 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) { 2090 /* Save */ 2091 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 2092 2093 /* Temporary modify MODE_CTL to control TLP */ 2094 reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK; 2095 CSR_WRITE_4(sc, BGE_MODE_CTL, 2096 reg | BGE_MODECTL_PCIE_TLPADDR1); 2097 2098 /* Control TLP */ 2099 reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG + 2100 BGE_TLP_PHYCTL5); 2101 CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_PHYCTL5, 2102 reg | BGE_TLP_PHYCTL5_DIS_L2CLKREQ); 2103 2104 /* Restore */ 2105 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 2106 } 2107 if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_57765_AX) { 2108 /* 2109 * For the 57766 and non Ax versions of 57765, bootcode 2110 * needs to setup the PCIE Fast Training Sequence (FTS) 2111 * value to prevent transmit hangs. 2112 */ 2113 reg = CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL); 2114 CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, 2115 reg | BGE_CPMU_PADRNG_CTL_RDIV2); 2116 2117 /* Save */ 2118 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 2119 2120 /* Temporary modify MODE_CTL to control TLP */ 2121 reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK; 2122 CSR_WRITE_4(sc, BGE_MODE_CTL, 2123 reg | BGE_MODECTL_PCIE_TLPADDR0); 2124 2125 /* Control TLP */ 2126 reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG + 2127 BGE_TLP_FTSMAX); 2128 reg &= ~BGE_TLP_FTSMAX_MSK; 2129 CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_FTSMAX, 2130 reg | BGE_TLP_FTSMAX_VAL); 2131 2132 /* Restore */ 2133 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 2134 } 2135 2136 reg = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK); 2137 reg &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK; 2138 reg |= BGE_CPMU_LSPD_10MB_MACCLK_6_25; 2139 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, reg); 2140 } 2141 2142 /* Set up the PCI DMA control register. */ 2143 dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD; 2144 if (sc->bge_flags & BGEF_PCIE) { 2145 /* Read watermark not used, 128 bytes for write. */ 2146 DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n", 2147 device_xname(sc->bge_dev))); 2148 if (sc->bge_mps >= 256) 2149 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); 2150 else 2151 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 2152 } else if (sc->bge_flags & BGEF_PCIX) { 2153 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", 2154 device_xname(sc->bge_dev))); 2155 /* PCI-X bus */ 2156 if (BGE_IS_5714_FAMILY(sc)) { 2157 /* 256 bytes for read and write. */ 2158 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) | 2159 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2); 2160 2161 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780) 2162 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 2163 else 2164 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL; 2165 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) { 2166 /* 2167 * In the BCM5703, the DMA read watermark should 2168 * be set to less than or equal to the maximum 2169 * memory read byte count of the PCI-X command 2170 * register. 2171 */ 2172 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) | 2173 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 2174 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 2175 /* 1536 bytes for read, 384 bytes for write. */ 2176 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 2177 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 2178 } else { 2179 /* 384 bytes for read and write. */ 2180 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) | 2181 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) | 2182 (0x0F); 2183 } 2184 2185 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 2186 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 2187 uint32_t tmp; 2188 2189 /* Set ONEDMA_ATONCE for hardware workaround. */ 2190 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; 2191 if (tmp == 6 || tmp == 7) 2192 dma_rw_ctl |= 2193 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 2194 2195 /* Set PCI-X DMA write workaround. */ 2196 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE; 2197 } 2198 } else { 2199 /* Conventional PCI bus: 256 bytes for read and write. */ 2200 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", 2201 device_xname(sc->bge_dev))); 2202 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 2203 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); 2204 2205 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 && 2206 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750) 2207 dma_rw_ctl |= 0x0F; 2208 } 2209 2210 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 2211 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) 2212 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM | 2213 BGE_PCIDMARWCTL_ASRT_ALL_BE; 2214 2215 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 2216 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 2217 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 2218 2219 if (BGE_IS_57765_PLUS(sc)) { 2220 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT; 2221 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) 2222 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK; 2223 2224 /* 2225 * Enable HW workaround for controllers that misinterpret 2226 * a status tag update and leave interrupts permanently 2227 * disabled. 2228 */ 2229 if (!BGE_IS_57765_FAMILY(sc) && 2230 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 && 2231 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5762) 2232 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA; 2233 } 2234 2235 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, 2236 dma_rw_ctl); 2237 2238 /* 2239 * Set up general mode register. 2240 */ 2241 mode_ctl = BGE_DMA_SWAP_OPTIONS; 2242 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 || 2243 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2244 /* Retain Host-2-BMC settings written by APE firmware. */ 2245 mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) & 2246 (BGE_MODECTL_BYTESWAP_B2HRX_DATA | 2247 BGE_MODECTL_WORDSWAP_B2HRX_DATA | 2248 BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE); 2249 } 2250 mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS | 2251 BGE_MODECTL_TX_NO_PHDR_CSUM; 2252 2253 /* 2254 * BCM5701 B5 have a bug causing data corruption when using 2255 * 64-bit DMA reads, which can be terminated early and then 2256 * completed later as 32-bit accesses, in combination with 2257 * certain bridges. 2258 */ 2259 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 && 2260 sc->bge_chipid == BGE_CHIPID_BCM5701_B5) 2261 mode_ctl |= BGE_MODECTL_FORCE_PCI32; 2262 2263 /* 2264 * Tell the firmware the driver is running 2265 */ 2266 if (sc->bge_asf_mode & ASF_STACKUP) 2267 mode_ctl |= BGE_MODECTL_STACKUP; 2268 2269 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 2270 2271 /* 2272 * Disable memory write invalidate. Apparently it is not supported 2273 * properly by these devices. 2274 */ 2275 PCI_CLRBIT(sc->sc_pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, 2276 PCI_COMMAND_INVALIDATE_ENABLE); 2277 2278 #ifdef __brokenalpha__ 2279 /* 2280 * Must insure that we do not cross an 8K (bytes) boundary 2281 * for DMA reads. Our highest limit is 1K bytes. This is a 2282 * restriction on some ALPHA platforms with early revision 2283 * 21174 PCI chipsets, such as the AlphaPC 164lx 2284 */ 2285 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 2286 #endif 2287 2288 /* Set the timer prescaler (always 66MHz) */ 2289 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); 2290 2291 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 2292 DELAY(40); /* XXX */ 2293 2294 /* Put PHY into ready state */ 2295 BGE_CLRBIT_FLUSH(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); 2296 DELAY(40); 2297 } 2298 2299 return 0; 2300 } 2301 2302 static int 2303 bge_blockinit(struct bge_softc *sc) 2304 { 2305 volatile struct bge_rcb *rcb; 2306 bus_size_t rcb_addr; 2307 struct ifnet *ifp = &sc->ethercom.ec_if; 2308 bge_hostaddr taddr; 2309 uint32_t dmactl, rdmareg, mimode, val; 2310 int i, limit; 2311 2312 /* 2313 * Initialize the memory window pointer register so that 2314 * we can access the first 32K of internal NIC RAM. This will 2315 * allow us to set up the TX send ring RCBs and the RX return 2316 * ring RCBs, plus other things which live in NIC memory. 2317 */ 2318 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0); 2319 2320 if (!BGE_IS_5705_PLUS(sc)) { 2321 /* 57XX step 33 */ 2322 /* Configure mbuf memory pool */ 2323 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); 2324 2325 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 2326 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 2327 else 2328 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 2329 2330 /* 57XX step 34 */ 2331 /* Configure DMA resource pool */ 2332 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 2333 BGE_DMA_DESCRIPTORS); 2334 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 2335 } 2336 2337 /* 5718 step 11, 57XX step 35 */ 2338 /* 2339 * Configure mbuf pool watermarks. New broadcom docs strongly 2340 * recommend these. 2341 */ 2342 if (BGE_IS_5717_PLUS(sc)) { 2343 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 2344 if (ifp->if_mtu > ETHERMTU) { 2345 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e); 2346 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea); 2347 } else { 2348 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a); 2349 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0); 2350 } 2351 } else if (BGE_IS_5705_PLUS(sc)) { 2352 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 2353 2354 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 2355 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); 2356 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); 2357 } else { 2358 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 2359 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 2360 } 2361 } else { 2362 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 2363 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 2364 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 2365 } 2366 2367 /* 57XX step 36 */ 2368 /* Configure DMA resource watermarks */ 2369 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 2370 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 2371 2372 /* 5718 step 13, 57XX step 38 */ 2373 /* Enable buffer manager */ 2374 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_ATTN; 2375 /* 2376 * Change the arbitration algorithm of TXMBUF read request to 2377 * round-robin instead of priority based for BCM5719. When 2378 * TXFIFO is almost empty, RDMA will hold its request until 2379 * TXFIFO is not almost empty. 2380 */ 2381 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) 2382 val |= BGE_BMANMODE_NO_TX_UNDERRUN; 2383 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2384 sc->bge_chipid == BGE_CHIPID_BCM5719_A0 || 2385 sc->bge_chipid == BGE_CHIPID_BCM5720_A0) 2386 val |= BGE_BMANMODE_LOMBUF_ATTN; 2387 CSR_WRITE_4(sc, BGE_BMAN_MODE, val); 2388 2389 /* 57XX step 39 */ 2390 /* Poll for buffer manager start indication */ 2391 for (i = 0; i < BGE_TIMEOUT * 2; i++) { 2392 DELAY(10); 2393 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 2394 break; 2395 } 2396 2397 if (i == BGE_TIMEOUT * 2) { 2398 aprint_error_dev(sc->bge_dev, 2399 "buffer manager failed to start\n"); 2400 return ENXIO; 2401 } 2402 2403 /* 57XX step 40 */ 2404 /* Enable flow-through queues */ 2405 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 2406 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 2407 2408 /* Wait until queue initialization is complete */ 2409 for (i = 0; i < BGE_TIMEOUT * 2; i++) { 2410 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 2411 break; 2412 DELAY(10); 2413 } 2414 2415 if (i == BGE_TIMEOUT * 2) { 2416 aprint_error_dev(sc->bge_dev, 2417 "flow-through queue init failed\n"); 2418 return ENXIO; 2419 } 2420 2421 /* 2422 * Summary of rings supported by the controller: 2423 * 2424 * Standard Receive Producer Ring 2425 * - This ring is used to feed receive buffers for "standard" 2426 * sized frames (typically 1536 bytes) to the controller. 2427 * 2428 * Jumbo Receive Producer Ring 2429 * - This ring is used to feed receive buffers for jumbo sized 2430 * frames (i.e. anything bigger than the "standard" frames) 2431 * to the controller. 2432 * 2433 * Mini Receive Producer Ring 2434 * - This ring is used to feed receive buffers for "mini" 2435 * sized frames to the controller. 2436 * - This feature required external memory for the controller 2437 * but was never used in a production system. Should always 2438 * be disabled. 2439 * 2440 * Receive Return Ring 2441 * - After the controller has placed an incoming frame into a 2442 * receive buffer that buffer is moved into a receive return 2443 * ring. The driver is then responsible to passing the 2444 * buffer up to the stack. Many versions of the controller 2445 * support multiple RR rings. 2446 * 2447 * Send Ring 2448 * - This ring is used for outgoing frames. Many versions of 2449 * the controller support multiple send rings. 2450 */ 2451 2452 /* 5718 step 15, 57XX step 41 */ 2453 /* Initialize the standard RX ring control block */ 2454 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 2455 BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); 2456 /* 5718 step 16 */ 2457 if (BGE_IS_57765_PLUS(sc)) { 2458 /* 2459 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32) 2460 * Bits 15-2 : Maximum RX frame size 2461 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 2462 * Bit 0 : Reserved 2463 */ 2464 rcb->bge_maxlen_flags = 2465 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2); 2466 } else if (BGE_IS_5705_PLUS(sc)) { 2467 /* 2468 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32) 2469 * Bits 15-2 : Reserved (should be 0) 2470 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 2471 * Bit 0 : Reserved 2472 */ 2473 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 2474 } else { 2475 /* 2476 * Ring size is always XXX entries 2477 * Bits 31-16: Maximum RX frame size 2478 * Bits 15-2 : Reserved (should be 0) 2479 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 2480 * Bit 0 : Reserved 2481 */ 2482 rcb->bge_maxlen_flags = 2483 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 2484 } 2485 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2486 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2487 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 2488 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717; 2489 else 2490 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 2491 /* Write the standard receive producer ring control block. */ 2492 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 2493 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 2494 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 2495 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 2496 2497 /* Reset the standard receive producer ring producer index. */ 2498 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); 2499 2500 /* 57XX step 42 */ 2501 /* 2502 * Initialize the jumbo RX ring control block 2503 * We set the 'ring disabled' bit in the flags 2504 * field until we're actually ready to start 2505 * using this ring (i.e. once we set the MTU 2506 * high enough to require it). 2507 */ 2508 if (BGE_IS_JUMBO_CAPABLE(sc)) { 2509 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 2510 BGE_HOSTADDR(rcb->bge_hostaddr, 2511 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); 2512 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 2513 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED); 2514 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2515 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2516 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 2517 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717; 2518 else 2519 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 2520 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 2521 rcb->bge_hostaddr.bge_addr_hi); 2522 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 2523 rcb->bge_hostaddr.bge_addr_lo); 2524 /* Program the jumbo receive producer ring RCB parameters. */ 2525 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 2526 rcb->bge_maxlen_flags); 2527 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 2528 /* Reset the jumbo receive producer ring producer index. */ 2529 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 2530 } 2531 2532 /* 57XX step 43 */ 2533 /* Disable the mini receive producer ring RCB. */ 2534 if (BGE_IS_5700_FAMILY(sc)) { 2535 /* Set up dummy disabled mini ring RCB */ 2536 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 2537 rcb->bge_maxlen_flags = 2538 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 2539 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 2540 rcb->bge_maxlen_flags); 2541 /* Reset the mini receive producer ring producer index. */ 2542 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 2543 2544 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2545 offsetof(struct bge_ring_data, bge_info), 2546 sizeof (struct bge_gib), 2547 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2548 } 2549 2550 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */ 2551 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 2552 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 || 2553 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 || 2554 sc->bge_chipid == BGE_CHIPID_BCM5906_A2) 2555 CSR_WRITE_4(sc, BGE_ISO_PKT_TX, 2556 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2); 2557 } 2558 /* 5718 step 14, 57XX step 44 */ 2559 /* 2560 * The BD ring replenish thresholds control how often the 2561 * hardware fetches new BD's from the producer rings in host 2562 * memory. Setting the value too low on a busy system can 2563 * starve the hardware and recue the throughpout. 2564 * 2565 * Set the BD ring replenish thresholds. The recommended 2566 * values are 1/8th the number of descriptors allocated to 2567 * each ring, but since we try to avoid filling the entire 2568 * ring we set these to the minimal value of 8. This needs to 2569 * be done on several of the supported chip revisions anyway, 2570 * to work around HW bugs. 2571 */ 2572 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, 8); 2573 if (BGE_IS_JUMBO_CAPABLE(sc)) 2574 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 8); 2575 2576 /* 5718 step 18 */ 2577 if (BGE_IS_5717_PLUS(sc)) { 2578 CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4); 2579 CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4); 2580 } 2581 2582 /* 57XX step 45 */ 2583 /* 2584 * Disable all send rings by setting the 'ring disabled' bit 2585 * in the flags field of all the TX send ring control blocks, 2586 * located in NIC memory. 2587 */ 2588 if (BGE_IS_5700_FAMILY(sc)) { 2589 /* 5700 to 5704 had 16 send rings. */ 2590 limit = BGE_TX_RINGS_EXTSSRAM_MAX; 2591 } else if (BGE_IS_5717_PLUS(sc)) { 2592 limit = BGE_TX_RINGS_5717_MAX; 2593 } else if (BGE_IS_57765_FAMILY(sc) || 2594 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2595 limit = BGE_TX_RINGS_57765_MAX; 2596 } else 2597 limit = 1; 2598 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 2599 for (i = 0; i < limit; i++) { 2600 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2601 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 2602 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 2603 rcb_addr += sizeof(struct bge_rcb); 2604 } 2605 2606 /* 57XX step 46 and 47 */ 2607 /* Configure send ring RCB 0 (we use only the first ring) */ 2608 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 2609 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 2610 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 2611 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 2612 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2613 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2614 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 2615 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, BGE_SEND_RING_5717); 2616 else 2617 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 2618 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 2619 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2620 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 2621 2622 /* 57XX step 48 */ 2623 /* 2624 * Disable all receive return rings by setting the 2625 * 'ring diabled' bit in the flags field of all the receive 2626 * return ring control blocks, located in NIC memory. 2627 */ 2628 if (BGE_IS_5717_PLUS(sc)) { 2629 /* Should be 17, use 16 until we get an SRAM map. */ 2630 limit = 16; 2631 } else if (BGE_IS_5700_FAMILY(sc)) 2632 limit = BGE_RX_RINGS_MAX; 2633 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 2634 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762 || 2635 BGE_IS_57765_FAMILY(sc)) 2636 limit = 4; 2637 else 2638 limit = 1; 2639 /* Disable all receive return rings */ 2640 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 2641 for (i = 0; i < limit; i++) { 2642 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 2643 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 2644 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2645 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 2646 BGE_RCB_FLAG_RING_DISABLED)); 2647 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 2648 bge_writembx(sc, BGE_MBX_RX_CONS0_LO + 2649 (i * (sizeof(uint64_t))), 0); 2650 rcb_addr += sizeof(struct bge_rcb); 2651 } 2652 2653 /* 57XX step 49 */ 2654 /* 2655 * Set up receive return ring 0. Note that the NIC address 2656 * for RX return rings is 0x0. The return rings live entirely 2657 * within the host, so the nicaddr field in the RCB isn't used. 2658 */ 2659 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 2660 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 2661 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 2662 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 2663 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 2664 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2665 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 2666 2667 /* 5718 step 24, 57XX step 53 */ 2668 /* Set random backoff seed for TX */ 2669 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 2670 (CLLADDR(ifp->if_sadl)[0] + CLLADDR(ifp->if_sadl)[1] + 2671 CLLADDR(ifp->if_sadl)[2] + CLLADDR(ifp->if_sadl)[3] + 2672 CLLADDR(ifp->if_sadl)[4] + CLLADDR(ifp->if_sadl)[5]) & 2673 BGE_TX_BACKOFF_SEED_MASK); 2674 2675 /* 5718 step 26, 57XX step 55 */ 2676 /* Set inter-packet gap */ 2677 val = 0x2620; 2678 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 || 2679 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) 2680 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) & 2681 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK); 2682 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val); 2683 2684 /* 5718 step 27, 57XX step 56 */ 2685 /* 2686 * Specify which ring to use for packets that don't match 2687 * any RX rules. 2688 */ 2689 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 2690 2691 /* 5718 step 28, 57XX step 57 */ 2692 /* 2693 * Configure number of RX lists. One interrupt distribution 2694 * list, sixteen active lists, one bad frames class. 2695 */ 2696 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 2697 2698 /* 5718 step 29, 57XX step 58 */ 2699 /* Inialize RX list placement stats mask. */ 2700 if (BGE_IS_575X_PLUS(sc)) { 2701 val = CSR_READ_4(sc, BGE_RXLP_STATS_ENABLE_MASK); 2702 val &= ~BGE_RXLPSTATCONTROL_DACK_FIX; 2703 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, val); 2704 } else 2705 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 2706 2707 /* 5718 step 30, 57XX step 59 */ 2708 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 2709 2710 /* 5718 step 33, 57XX step 62 */ 2711 /* Disable host coalescing until we get it set up */ 2712 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 2713 2714 /* 5718 step 34, 57XX step 63 */ 2715 /* Poll to make sure it's shut down. */ 2716 for (i = 0; i < BGE_TIMEOUT * 2; i++) { 2717 DELAY(10); 2718 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 2719 break; 2720 } 2721 2722 if (i == BGE_TIMEOUT * 2) { 2723 aprint_error_dev(sc->bge_dev, 2724 "host coalescing engine failed to idle\n"); 2725 return ENXIO; 2726 } 2727 2728 /* 5718 step 35, 36, 37 */ 2729 /* Set up host coalescing defaults */ 2730 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 2731 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 2732 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 2733 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 2734 if (!(BGE_IS_5705_PLUS(sc))) { 2735 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 2736 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 2737 } 2738 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 2739 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 2740 2741 /* Set up address of statistics block */ 2742 if (BGE_IS_5700_FAMILY(sc)) { 2743 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 2744 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 2745 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 2746 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi); 2747 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo); 2748 } 2749 2750 /* 5718 step 38 */ 2751 /* Set up address of status block */ 2752 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block)); 2753 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 2754 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi); 2755 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo); 2756 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 2757 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 2758 2759 /* Set up status block size. */ 2760 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 && 2761 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) { 2762 val = BGE_STATBLKSZ_FULL; 2763 bzero(&sc->bge_rdata->bge_status_block, BGE_STATUS_BLK_SZ); 2764 } else { 2765 val = BGE_STATBLKSZ_32BYTE; 2766 bzero(&sc->bge_rdata->bge_status_block, 32); 2767 } 2768 2769 /* 5718 step 39, 57XX step 73 */ 2770 /* Turn on host coalescing state machine */ 2771 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE); 2772 2773 /* 5718 step 40, 57XX step 74 */ 2774 /* Turn on RX BD completion state machine and enable attentions */ 2775 CSR_WRITE_4(sc, BGE_RBDC_MODE, 2776 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN); 2777 2778 /* 5718 step 41, 57XX step 75 */ 2779 /* Turn on RX list placement state machine */ 2780 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 2781 2782 /* 57XX step 76 */ 2783 /* Turn on RX list selector state machine. */ 2784 if (!(BGE_IS_5705_PLUS(sc))) 2785 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 2786 2787 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | 2788 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | 2789 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | 2790 BGE_MACMODE_FRMHDR_DMA_ENB; 2791 2792 if (sc->bge_flags & BGEF_FIBER_TBI) 2793 val |= BGE_PORTMODE_TBI; 2794 else if (sc->bge_flags & BGEF_FIBER_MII) 2795 val |= BGE_PORTMODE_GMII; 2796 else 2797 val |= BGE_PORTMODE_MII; 2798 2799 /* 5718 step 42 and 43, 57XX step 77 and 78 */ 2800 /* Allow APE to send/receive frames. */ 2801 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) 2802 val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; 2803 2804 /* Turn on DMA, clear stats */ 2805 CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, val); 2806 /* 5718 step 44 */ 2807 DELAY(40); 2808 2809 /* 5718 step 45, 57XX step 79 */ 2810 /* Set misc. local control, enable interrupts on attentions */ 2811 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 2812 if (BGE_IS_5717_PLUS(sc)) { 2813 CSR_READ_4(sc, BGE_MISC_LOCAL_CTL); /* Flush */ 2814 /* 5718 step 46 */ 2815 DELAY(100); 2816 } 2817 2818 /* 57XX step 81 */ 2819 /* Turn on DMA completion state machine */ 2820 if (!(BGE_IS_5705_PLUS(sc))) 2821 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 2822 2823 /* 5718 step 47, 57XX step 82 */ 2824 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS; 2825 2826 /* 5718 step 48 */ 2827 /* Enable host coalescing bug fix. */ 2828 if (BGE_IS_5755_PLUS(sc)) 2829 val |= BGE_WDMAMODE_STATUS_TAG_FIX; 2830 2831 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785) 2832 val |= BGE_WDMAMODE_BURST_ALL_DATA; 2833 2834 /* Turn on write DMA state machine */ 2835 CSR_WRITE_4_FLUSH(sc, BGE_WDMA_MODE, val); 2836 /* 5718 step 49 */ 2837 DELAY(40); 2838 2839 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 2840 2841 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717) 2842 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS; 2843 2844 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2845 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 2846 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) 2847 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | 2848 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | 2849 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; 2850 2851 if (sc->bge_flags & BGEF_PCIE) 2852 val |= BGE_RDMAMODE_FIFO_LONG_BURST; 2853 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) { 2854 if (ifp->if_mtu <= ETHERMTU) 2855 val |= BGE_RDMAMODE_JMB_2K_MMRR; 2856 } 2857 if (sc->bge_flags & BGEF_TSO) { 2858 val |= BGE_RDMAMODE_TSO4_ENABLE; 2859 if (BGE_IS_5717_PLUS(sc)) 2860 val |= BGE_RDMAMODE_TSO6_ENABLE; 2861 } 2862 2863 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 || 2864 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2865 val |= CSR_READ_4(sc, BGE_RDMA_MODE) & 2866 BGE_RDMAMODE_H2BNC_VLAN_DET; 2867 /* 2868 * Allow multiple outstanding read requests from 2869 * non-LSO read DMA engine. 2870 */ 2871 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS; 2872 } 2873 2874 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 2875 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2876 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 2877 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780 || 2878 BGE_IS_57765_PLUS(sc)) { 2879 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) 2880 rdmareg = BGE_RDMA_RSRVCTRL_REG2; 2881 else 2882 rdmareg = BGE_RDMA_RSRVCTRL; 2883 dmactl = CSR_READ_4(sc, rdmareg); 2884 /* 2885 * Adjust tx margin to prevent TX data corruption and 2886 * fix internal FIFO overflow. 2887 */ 2888 if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0 || 2889 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2890 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK | 2891 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK | 2892 BGE_RDMA_RSRVCTRL_TXMRGN_MASK); 2893 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K | 2894 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K | 2895 BGE_RDMA_RSRVCTRL_TXMRGN_320B; 2896 } 2897 /* 2898 * Enable fix for read DMA FIFO overruns. 2899 * The fix is to limit the number of RX BDs 2900 * the hardware would fetch at a fime. 2901 */ 2902 CSR_WRITE_4(sc, rdmareg, dmactl | 2903 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 2904 } 2905 2906 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) { 2907 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 2908 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | 2909 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | 2910 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 2911 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { 2912 /* 2913 * Allow 4KB burst length reads for non-LSO frames. 2914 * Enable 512B burst length reads for buffer descriptors. 2915 */ 2916 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 2917 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | 2918 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 | 2919 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 2920 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2921 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2, 2922 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2) | 2923 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | 2924 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 2925 } 2926 /* Turn on read DMA state machine */ 2927 CSR_WRITE_4_FLUSH(sc, BGE_RDMA_MODE, val); 2928 /* 5718 step 52 */ 2929 delay(40); 2930 2931 if (sc->bge_flags & BGEF_RDMA_BUG) { 2932 for (i = 0; i < BGE_NUM_RDMA_CHANNELS / 2; i++) { 2933 val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4); 2934 if ((val & 0xFFFF) > BGE_FRAMELEN) 2935 break; 2936 if (((val >> 16) & 0xFFFF) > BGE_FRAMELEN) 2937 break; 2938 } 2939 if (i != BGE_NUM_RDMA_CHANNELS / 2) { 2940 val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL); 2941 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) 2942 val |= BGE_RDMA_TX_LENGTH_WA_5719; 2943 else 2944 val |= BGE_RDMA_TX_LENGTH_WA_5720; 2945 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val); 2946 } 2947 } 2948 2949 /* 5718 step 56, 57XX step 84 */ 2950 /* Turn on RX data completion state machine */ 2951 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 2952 2953 /* Turn on RX data and RX BD initiator state machine */ 2954 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 2955 2956 /* 57XX step 85 */ 2957 /* Turn on Mbuf cluster free state machine */ 2958 if (!BGE_IS_5705_PLUS(sc)) 2959 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 2960 2961 /* 5718 step 57, 57XX step 86 */ 2962 /* Turn on send data completion state machine */ 2963 val = BGE_SDCMODE_ENABLE; 2964 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 2965 val |= BGE_SDCMODE_CDELAY; 2966 CSR_WRITE_4(sc, BGE_SDC_MODE, val); 2967 2968 /* 5718 step 58 */ 2969 /* Turn on send BD completion state machine */ 2970 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 2971 2972 /* 57XX step 88 */ 2973 /* Turn on RX BD initiator state machine */ 2974 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 2975 2976 /* 5718 step 60, 57XX step 90 */ 2977 /* Turn on send data initiator state machine */ 2978 if (sc->bge_flags & BGEF_TSO) { 2979 /* XXX: magic value from Linux driver */ 2980 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 2981 BGE_SDIMODE_HW_LSO_PRE_DMA); 2982 } else 2983 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 2984 2985 /* 5718 step 61, 57XX step 91 */ 2986 /* Turn on send BD initiator state machine */ 2987 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 2988 2989 /* 5718 step 62, 57XX step 92 */ 2990 /* Turn on send BD selector state machine */ 2991 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 2992 2993 /* 5718 step 31, 57XX step 60 */ 2994 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 2995 /* 5718 step 32, 57XX step 61 */ 2996 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 2997 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER); 2998 2999 /* ack/clear link change events */ 3000 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 3001 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 3002 BGE_MACSTAT_LINK_CHANGED); 3003 CSR_WRITE_4(sc, BGE_MI_STS, 0); 3004 3005 /* 3006 * Enable attention when the link has changed state for 3007 * devices that use auto polling. 3008 */ 3009 if (sc->bge_flags & BGEF_FIBER_TBI) { 3010 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 3011 } else { 3012 if ((sc->bge_flags & BGEF_CPMU_PRESENT) != 0) 3013 mimode = BGE_MIMODE_500KHZ_CONST; 3014 else 3015 mimode = BGE_MIMODE_BASE; 3016 /* 5718 step 68. 5718 step 69 (optionally). */ 3017 if (BGE_IS_5700_FAMILY(sc) || 3018 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705) { 3019 mimode |= BGE_MIMODE_AUTOPOLL; 3020 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 3021 } 3022 mimode |= BGE_MIMODE_PHYADDR(sc->bge_phy_addr); 3023 CSR_WRITE_4(sc, BGE_MI_MODE, mimode); 3024 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) 3025 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 3026 BGE_EVTENB_MI_INTERRUPT); 3027 } 3028 3029 /* 3030 * Clear any pending link state attention. 3031 * Otherwise some link state change events may be lost until attention 3032 * is cleared by bge_intr() -> bge_link_upd() sequence. 3033 * It's not necessary on newer BCM chips - perhaps enabling link 3034 * state change attentions implies clearing pending attention. 3035 */ 3036 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 3037 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 3038 BGE_MACSTAT_LINK_CHANGED); 3039 3040 /* Enable link state change attentions. */ 3041 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 3042 3043 return 0; 3044 } 3045 3046 static const struct bge_revision * 3047 bge_lookup_rev(uint32_t chipid) 3048 { 3049 const struct bge_revision *br; 3050 3051 for (br = bge_revisions; br->br_name != NULL; br++) { 3052 if (br->br_chipid == chipid) 3053 return br; 3054 } 3055 3056 for (br = bge_majorrevs; br->br_name != NULL; br++) { 3057 if (br->br_chipid == BGE_ASICREV(chipid)) 3058 return br; 3059 } 3060 3061 return NULL; 3062 } 3063 3064 static const struct bge_product * 3065 bge_lookup(const struct pci_attach_args *pa) 3066 { 3067 const struct bge_product *bp; 3068 3069 for (bp = bge_products; bp->bp_name != NULL; bp++) { 3070 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor && 3071 PCI_PRODUCT(pa->pa_id) == bp->bp_product) 3072 return bp; 3073 } 3074 3075 return NULL; 3076 } 3077 3078 static uint32_t 3079 bge_chipid(const struct pci_attach_args *pa) 3080 { 3081 uint32_t id; 3082 3083 id = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) 3084 >> BGE_PCIMISCCTL_ASICREV_SHIFT; 3085 3086 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) { 3087 switch (PCI_PRODUCT(pa->pa_id)) { 3088 case PCI_PRODUCT_BROADCOM_BCM5717: 3089 case PCI_PRODUCT_BROADCOM_BCM5718: 3090 case PCI_PRODUCT_BROADCOM_BCM5719: 3091 case PCI_PRODUCT_BROADCOM_BCM5720: 3092 case PCI_PRODUCT_BROADCOM_BCM5725: 3093 case PCI_PRODUCT_BROADCOM_BCM5727: 3094 case PCI_PRODUCT_BROADCOM_BCM5762: 3095 case PCI_PRODUCT_BROADCOM_BCM57764: 3096 case PCI_PRODUCT_BROADCOM_BCM57767: 3097 case PCI_PRODUCT_BROADCOM_BCM57787: 3098 id = pci_conf_read(pa->pa_pc, pa->pa_tag, 3099 BGE_PCI_GEN2_PRODID_ASICREV); 3100 break; 3101 case PCI_PRODUCT_BROADCOM_BCM57761: 3102 case PCI_PRODUCT_BROADCOM_BCM57762: 3103 case PCI_PRODUCT_BROADCOM_BCM57765: 3104 case PCI_PRODUCT_BROADCOM_BCM57766: 3105 case PCI_PRODUCT_BROADCOM_BCM57781: 3106 case PCI_PRODUCT_BROADCOM_BCM57782: 3107 case PCI_PRODUCT_BROADCOM_BCM57785: 3108 case PCI_PRODUCT_BROADCOM_BCM57786: 3109 case PCI_PRODUCT_BROADCOM_BCM57791: 3110 case PCI_PRODUCT_BROADCOM_BCM57795: 3111 id = pci_conf_read(pa->pa_pc, pa->pa_tag, 3112 BGE_PCI_GEN15_PRODID_ASICREV); 3113 break; 3114 default: 3115 id = pci_conf_read(pa->pa_pc, pa->pa_tag, 3116 BGE_PCI_PRODID_ASICREV); 3117 break; 3118 } 3119 } 3120 3121 return id; 3122 } 3123 3124 /* 3125 * Return true if MSI can be used with this device. 3126 */ 3127 static int 3128 bge_can_use_msi(struct bge_softc *sc) 3129 { 3130 int can_use_msi = 0; 3131 3132 switch (BGE_ASICREV(sc->bge_chipid)) { 3133 case BGE_ASICREV_BCM5714_A0: 3134 case BGE_ASICREV_BCM5714: 3135 /* 3136 * Apparently, MSI doesn't work when these chips are 3137 * configured in single-port mode. 3138 */ 3139 break; 3140 case BGE_ASICREV_BCM5750: 3141 if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_AX && 3142 BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_BX) 3143 can_use_msi = 1; 3144 break; 3145 default: 3146 if (BGE_IS_575X_PLUS(sc)) 3147 can_use_msi = 1; 3148 } 3149 return (can_use_msi); 3150 } 3151 3152 /* 3153 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 3154 * against our list and return its name if we find a match. Note 3155 * that since the Broadcom controller contains VPD support, we 3156 * can get the device name string from the controller itself instead 3157 * of the compiled-in string. This is a little slow, but it guarantees 3158 * we'll always announce the right product name. 3159 */ 3160 static int 3161 bge_probe(device_t parent, cfdata_t match, void *aux) 3162 { 3163 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 3164 3165 if (bge_lookup(pa) != NULL) 3166 return 1; 3167 3168 return 0; 3169 } 3170 3171 static void 3172 bge_attach(device_t parent, device_t self, void *aux) 3173 { 3174 struct bge_softc *sc = device_private(self); 3175 struct pci_attach_args *pa = aux; 3176 prop_dictionary_t dict; 3177 const struct bge_product *bp; 3178 const struct bge_revision *br; 3179 pci_chipset_tag_t pc; 3180 const char *intrstr = NULL; 3181 uint32_t hwcfg, hwcfg2, hwcfg3, hwcfg4, hwcfg5; 3182 uint32_t command; 3183 struct ifnet *ifp; 3184 struct mii_data * const mii = &sc->bge_mii; 3185 uint32_t misccfg, mimode; 3186 void * kva; 3187 u_char eaddr[ETHER_ADDR_LEN]; 3188 pcireg_t memtype, subid, reg; 3189 bus_addr_t memaddr; 3190 uint32_t pm_ctl; 3191 bool no_seeprom; 3192 int capmask; 3193 int mii_flags; 3194 int map_flags; 3195 char intrbuf[PCI_INTRSTR_LEN]; 3196 3197 bp = bge_lookup(pa); 3198 KASSERT(bp != NULL); 3199 3200 sc->sc_pc = pa->pa_pc; 3201 sc->sc_pcitag = pa->pa_tag; 3202 sc->bge_dev = self; 3203 3204 sc->bge_pa = *pa; 3205 pc = sc->sc_pc; 3206 subid = pci_conf_read(pc, sc->sc_pcitag, PCI_SUBSYS_ID_REG); 3207 3208 aprint_naive(": Ethernet controller\n"); 3209 aprint_normal(": %s Ethernet\n", bp->bp_name); 3210 3211 /* 3212 * Map control/status registers. 3213 */ 3214 DPRINTFN(5, ("Map control/status regs\n")); 3215 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 3216 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 3217 pci_conf_write(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, command); 3218 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 3219 3220 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 3221 aprint_error_dev(sc->bge_dev, 3222 "failed to enable memory mapping!\n"); 3223 return; 3224 } 3225 3226 DPRINTFN(5, ("pci_mem_find\n")); 3227 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_pcitag, BGE_PCI_BAR0); 3228 switch (memtype) { 3229 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 3230 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 3231 #if 0 3232 if (pci_mapreg_map(pa, BGE_PCI_BAR0, 3233 memtype, 0, &sc->bge_btag, &sc->bge_bhandle, 3234 &memaddr, &sc->bge_bsize) == 0) 3235 break; 3236 #else 3237 /* 3238 * Workaround for PCI prefetchable bit. Some BCM5717-5720 based 3239 * system get NMI on boot (PR#48451). This problem might not be 3240 * the driver's bug but our PCI common part's bug. Until we 3241 * find a real reason, we ignore the prefetchable bit. 3242 */ 3243 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0, 3244 memtype, &memaddr, &sc->bge_bsize, &map_flags) == 0) { 3245 map_flags &= ~BUS_SPACE_MAP_PREFETCHABLE; 3246 if (bus_space_map(pa->pa_memt, memaddr, sc->bge_bsize, 3247 map_flags, &sc->bge_bhandle) == 0) { 3248 sc->bge_btag = pa->pa_memt; 3249 break; 3250 } 3251 } 3252 #endif 3253 /* FALLTHROUGH */ 3254 default: 3255 aprint_error_dev(sc->bge_dev, "can't find mem space\n"); 3256 return; 3257 } 3258 3259 /* Save various chip information. */ 3260 sc->bge_chipid = bge_chipid(pa); 3261 sc->bge_phy_addr = bge_phy_addr(sc); 3262 3263 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PCIEXPRESS, 3264 &sc->bge_pciecap, NULL) != 0) { 3265 /* PCIe */ 3266 sc->bge_flags |= BGEF_PCIE; 3267 /* Extract supported maximum payload size. */ 3268 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 3269 sc->bge_pciecap + PCIE_DCAP); 3270 sc->bge_mps = 128 << (reg & PCIE_DCAP_MAX_PAYLOAD); 3271 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 3272 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 3273 sc->bge_expmrq = 2048; 3274 else 3275 sc->bge_expmrq = 4096; 3276 bge_set_max_readrq(sc); 3277 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785) { 3278 /* PCIe without PCIe cap */ 3279 sc->bge_flags |= BGEF_PCIE; 3280 } else if ((pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE) & 3281 BGE_PCISTATE_PCI_BUSMODE) == 0) { 3282 /* PCI-X */ 3283 sc->bge_flags |= BGEF_PCIX; 3284 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIX, 3285 &sc->bge_pcixcap, NULL) == 0) 3286 aprint_error_dev(sc->bge_dev, 3287 "unable to find PCIX capability\n"); 3288 } 3289 3290 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) { 3291 /* 3292 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) 3293 * can clobber the chip's PCI config-space power control 3294 * registers, leaving the card in D3 powersave state. We do 3295 * not have memory-mapped registers in this state, so force 3296 * device into D0 state before starting initialization. 3297 */ 3298 pm_ctl = pci_conf_read(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD); 3299 pm_ctl &= ~(PCI_PWR_D0 | PCI_PWR_D1 | PCI_PWR_D2 | PCI_PWR_D3); 3300 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */ 3301 pci_conf_write(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD, pm_ctl); 3302 DELAY(1000); /* 27 usec is allegedly sufficent */ 3303 } 3304 3305 /* Save chipset family. */ 3306 switch (BGE_ASICREV(sc->bge_chipid)) { 3307 case BGE_ASICREV_BCM5717: 3308 case BGE_ASICREV_BCM5719: 3309 case BGE_ASICREV_BCM5720: 3310 sc->bge_flags |= BGEF_5717_PLUS; 3311 /* FALLTHROUGH */ 3312 case BGE_ASICREV_BCM5762: 3313 case BGE_ASICREV_BCM57765: 3314 case BGE_ASICREV_BCM57766: 3315 if (!BGE_IS_5717_PLUS(sc)) 3316 sc->bge_flags |= BGEF_57765_FAMILY; 3317 sc->bge_flags |= BGEF_57765_PLUS | BGEF_5755_PLUS | 3318 BGEF_575X_PLUS | BGEF_5705_PLUS | BGEF_JUMBO_CAPABLE; 3319 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 3320 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { 3321 /* 3322 * Enable work around for DMA engine miscalculation 3323 * of TXMBUF available space. 3324 */ 3325 sc->bge_flags |= BGEF_RDMA_BUG; 3326 3327 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) && 3328 (sc->bge_chipid == BGE_CHIPID_BCM5719_A0)) { 3329 /* Jumbo frame on BCM5719 A0 does not work. */ 3330 sc->bge_flags &= ~BGEF_JUMBO_CAPABLE; 3331 } 3332 } 3333 break; 3334 case BGE_ASICREV_BCM5755: 3335 case BGE_ASICREV_BCM5761: 3336 case BGE_ASICREV_BCM5784: 3337 case BGE_ASICREV_BCM5785: 3338 case BGE_ASICREV_BCM5787: 3339 case BGE_ASICREV_BCM57780: 3340 sc->bge_flags |= BGEF_5755_PLUS | BGEF_575X_PLUS | BGEF_5705_PLUS; 3341 break; 3342 case BGE_ASICREV_BCM5700: 3343 case BGE_ASICREV_BCM5701: 3344 case BGE_ASICREV_BCM5703: 3345 case BGE_ASICREV_BCM5704: 3346 sc->bge_flags |= BGEF_5700_FAMILY | BGEF_JUMBO_CAPABLE; 3347 break; 3348 case BGE_ASICREV_BCM5714_A0: 3349 case BGE_ASICREV_BCM5780: 3350 case BGE_ASICREV_BCM5714: 3351 sc->bge_flags |= BGEF_5714_FAMILY | BGEF_JUMBO_CAPABLE; 3352 /* FALLTHROUGH */ 3353 case BGE_ASICREV_BCM5750: 3354 case BGE_ASICREV_BCM5752: 3355 case BGE_ASICREV_BCM5906: 3356 sc->bge_flags |= BGEF_575X_PLUS; 3357 /* FALLTHROUGH */ 3358 case BGE_ASICREV_BCM5705: 3359 sc->bge_flags |= BGEF_5705_PLUS; 3360 break; 3361 } 3362 3363 /* Identify chips with APE processor. */ 3364 switch (BGE_ASICREV(sc->bge_chipid)) { 3365 case BGE_ASICREV_BCM5717: 3366 case BGE_ASICREV_BCM5719: 3367 case BGE_ASICREV_BCM5720: 3368 case BGE_ASICREV_BCM5761: 3369 case BGE_ASICREV_BCM5762: 3370 sc->bge_flags |= BGEF_APE; 3371 break; 3372 } 3373 3374 /* 3375 * The 40bit DMA bug applies to the 5714/5715 controllers and is 3376 * not actually a MAC controller bug but an issue with the embedded 3377 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround. 3378 */ 3379 if (BGE_IS_5714_FAMILY(sc) && ((sc->bge_flags & BGEF_PCIX) != 0)) 3380 sc->bge_flags |= BGEF_40BIT_BUG; 3381 3382 /* Chips with APE need BAR2 access for APE registers/memory. */ 3383 if ((sc->bge_flags & BGEF_APE) != 0) { 3384 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2); 3385 #if 0 3386 if (pci_mapreg_map(pa, BGE_PCI_BAR2, memtype, 0, 3387 &sc->bge_apetag, &sc->bge_apehandle, NULL, 3388 &sc->bge_apesize)) { 3389 aprint_error_dev(sc->bge_dev, 3390 "couldn't map BAR2 memory\n"); 3391 return; 3392 } 3393 #else 3394 /* 3395 * Workaround for PCI prefetchable bit. Some BCM5717-5720 based 3396 * system get NMI on boot (PR#48451). This problem might not be 3397 * the driver's bug but our PCI common part's bug. Until we 3398 * find a real reason, we ignore the prefetchable bit. 3399 */ 3400 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2, 3401 memtype, &memaddr, &sc->bge_apesize, &map_flags) != 0) { 3402 aprint_error_dev(sc->bge_dev, 3403 "couldn't map BAR2 memory\n"); 3404 return; 3405 } 3406 3407 map_flags &= ~BUS_SPACE_MAP_PREFETCHABLE; 3408 if (bus_space_map(pa->pa_memt, memaddr, 3409 sc->bge_apesize, map_flags, &sc->bge_apehandle) != 0) { 3410 aprint_error_dev(sc->bge_dev, 3411 "couldn't map BAR2 memory\n"); 3412 return; 3413 } 3414 sc->bge_apetag = pa->pa_memt; 3415 #endif 3416 3417 /* Enable APE register/memory access by host driver. */ 3418 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE); 3419 reg |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | 3420 BGE_PCISTATE_ALLOW_APE_SHMEM_WR | 3421 BGE_PCISTATE_ALLOW_APE_PSPACE_WR; 3422 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE, reg); 3423 3424 bge_ape_lock_init(sc); 3425 bge_ape_read_fw_ver(sc); 3426 } 3427 3428 /* Identify the chips that use an CPMU. */ 3429 if (BGE_IS_5717_PLUS(sc) || 3430 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 3431 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 3432 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 3433 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) 3434 sc->bge_flags |= BGEF_CPMU_PRESENT; 3435 3436 /* Set MI_MODE */ 3437 mimode = BGE_MIMODE_PHYADDR(sc->bge_phy_addr); 3438 if ((sc->bge_flags & BGEF_CPMU_PRESENT) != 0) 3439 mimode |= BGE_MIMODE_500KHZ_CONST; 3440 else 3441 mimode |= BGE_MIMODE_BASE; 3442 CSR_WRITE_4(sc, BGE_MI_MODE, mimode); 3443 3444 /* 3445 * When using the BCM5701 in PCI-X mode, data corruption has 3446 * been observed in the first few bytes of some received packets. 3447 * Aligning the packet buffer in memory eliminates the corruption. 3448 * Unfortunately, this misaligns the packet payloads. On platforms 3449 * which do not support unaligned accesses, we will realign the 3450 * payloads by copying the received packets. 3451 */ 3452 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 && 3453 sc->bge_flags & BGEF_PCIX) 3454 sc->bge_flags |= BGEF_RX_ALIGNBUG; 3455 3456 if (BGE_IS_5700_FAMILY(sc)) 3457 sc->bge_flags |= BGEF_JUMBO_CAPABLE; 3458 3459 misccfg = CSR_READ_4(sc, BGE_MISC_CFG); 3460 misccfg &= BGE_MISCCFG_BOARD_ID_MASK; 3461 3462 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 3463 (misccfg == BGE_MISCCFG_BOARD_ID_5788 || 3464 misccfg == BGE_MISCCFG_BOARD_ID_5788M)) 3465 sc->bge_flags |= BGEF_IS_5788; 3466 3467 /* 3468 * Some controllers seem to require a special firmware to use 3469 * TSO. But the firmware is not available to FreeBSD and Linux 3470 * claims that the TSO performed by the firmware is slower than 3471 * hardware based TSO. Moreover the firmware based TSO has one 3472 * known bug which can't handle TSO if ethernet header + IP/TCP 3473 * header is greater than 80 bytes. The workaround for the TSO 3474 * bug exist but it seems it's too expensive than not using 3475 * TSO at all. Some hardwares also have the TSO bug so limit 3476 * the TSO to the controllers that are not affected TSO issues 3477 * (e.g. 5755 or higher). 3478 */ 3479 if (BGE_IS_5755_PLUS(sc)) { 3480 /* 3481 * BCM5754 and BCM5787 shares the same ASIC id so 3482 * explicit device id check is required. 3483 */ 3484 if ((PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754) && 3485 (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754M)) 3486 sc->bge_flags |= BGEF_TSO; 3487 /* TSO on BCM5719 A0 does not work. */ 3488 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) && 3489 (sc->bge_chipid == BGE_CHIPID_BCM5719_A0)) 3490 sc->bge_flags &= ~BGEF_TSO; 3491 } 3492 3493 capmask = 0xffffffff; /* XXX BMSR_DEFCAPMASK */ 3494 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 && 3495 (misccfg == 0x4000 || misccfg == 0x8000)) || 3496 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 3497 PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && 3498 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 || 3499 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 || 3500 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) || 3501 (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && 3502 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F || 3503 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F || 3504 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) || 3505 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 || 3506 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57791 || 3507 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57795 || 3508 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 3509 /* These chips are 10/100 only. */ 3510 capmask &= ~BMSR_EXTSTAT; 3511 sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED; 3512 } 3513 3514 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 3515 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 3516 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && 3517 sc->bge_chipid != BGE_CHIPID_BCM5705_A1))) 3518 sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED; 3519 3520 /* Set various PHY bug flags. */ 3521 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || 3522 sc->bge_chipid == BGE_CHIPID_BCM5701_B0) 3523 sc->bge_phy_flags |= BGEPHYF_CRC_BUG; 3524 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX || 3525 BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX) 3526 sc->bge_phy_flags |= BGEPHYF_ADC_BUG; 3527 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) 3528 sc->bge_phy_flags |= BGEPHYF_5704_A0_BUG; 3529 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 3530 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) && 3531 PCI_VENDOR(subid) == PCI_VENDOR_DELL) 3532 sc->bge_phy_flags |= BGEPHYF_NO_3LED; 3533 if (BGE_IS_5705_PLUS(sc) && 3534 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 && 3535 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 && 3536 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780 && 3537 !BGE_IS_57765_PLUS(sc)) { 3538 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 3539 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 3540 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 3541 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) { 3542 if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 && 3543 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756) 3544 sc->bge_phy_flags |= BGEPHYF_JITTER_BUG; 3545 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M) 3546 sc->bge_phy_flags |= BGEPHYF_ADJUST_TRIM; 3547 } else 3548 sc->bge_phy_flags |= BGEPHYF_BER_BUG; 3549 } 3550 3551 /* 3552 * SEEPROM check. 3553 * First check if firmware knows we do not have SEEPROM. 3554 */ 3555 if (prop_dictionary_get_bool(device_properties(self), 3556 "without-seeprom", &no_seeprom) && no_seeprom) 3557 sc->bge_flags |= BGEF_NO_EEPROM; 3558 3559 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 3560 sc->bge_flags |= BGEF_NO_EEPROM; 3561 3562 /* Now check the 'ROM failed' bit on the RX CPU */ 3563 else if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) 3564 sc->bge_flags |= BGEF_NO_EEPROM; 3565 3566 sc->bge_asf_mode = 0; 3567 /* No ASF if APE present. */ 3568 if ((sc->bge_flags & BGEF_APE) == 0) { 3569 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == 3570 BGE_SRAM_DATA_SIG_MAGIC)) { 3571 if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG) & 3572 BGE_HWCFG_ASF) { 3573 sc->bge_asf_mode |= ASF_ENABLE; 3574 sc->bge_asf_mode |= ASF_STACKUP; 3575 if (BGE_IS_575X_PLUS(sc)) 3576 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; 3577 } 3578 } 3579 } 3580 3581 int counts[PCI_INTR_TYPE_SIZE] = { 3582 [PCI_INTR_TYPE_INTX] = 1, 3583 [PCI_INTR_TYPE_MSI] = 1, 3584 [PCI_INTR_TYPE_MSIX] = 1, 3585 }; 3586 int max_type = PCI_INTR_TYPE_MSIX; 3587 3588 if (!bge_can_use_msi(sc)) { 3589 /* MSI broken, allow only INTx */ 3590 max_type = PCI_INTR_TYPE_INTX; 3591 } 3592 3593 if (pci_intr_alloc(pa, &sc->bge_pihp, counts, max_type) != 0) { 3594 aprint_error_dev(sc->bge_dev, "couldn't alloc interrupt\n"); 3595 return; 3596 } 3597 3598 DPRINTFN(5, ("pci_intr_string\n")); 3599 intrstr = pci_intr_string(pc, sc->bge_pihp[0], intrbuf, 3600 sizeof(intrbuf)); 3601 DPRINTFN(5, ("pci_intr_establish\n")); 3602 sc->bge_intrhand = pci_intr_establish_xname(pc, sc->bge_pihp[0], 3603 IPL_NET, bge_intr, sc, device_xname(sc->bge_dev)); 3604 if (sc->bge_intrhand == NULL) { 3605 pci_intr_release(pc, sc->bge_pihp, 1); 3606 sc->bge_pihp = NULL; 3607 3608 aprint_error_dev(self, "couldn't establish interrupt"); 3609 if (intrstr != NULL) 3610 aprint_error(" at %s", intrstr); 3611 aprint_error("\n"); 3612 return; 3613 } 3614 aprint_normal_dev(sc->bge_dev, "interrupting at %s\n", intrstr); 3615 3616 switch (pci_intr_type(pc, sc->bge_pihp[0])) { 3617 case PCI_INTR_TYPE_MSIX: 3618 case PCI_INTR_TYPE_MSI: 3619 KASSERT(bge_can_use_msi(sc)); 3620 sc->bge_flags |= BGEF_MSI; 3621 break; 3622 default: 3623 /* nothing to do */ 3624 break; 3625 } 3626 3627 /* 3628 * All controllers except BCM5700 supports tagged status but 3629 * we use tagged status only for MSI case on BCM5717. Otherwise 3630 * MSI on BCM5717 does not work. 3631 */ 3632 if (BGE_IS_57765_PLUS(sc) && sc->bge_flags & BGEF_MSI) 3633 sc->bge_flags |= BGEF_TAGGED_STATUS; 3634 3635 /* 3636 * Reset NVRAM before bge_reset(). It's required to acquire NVRAM 3637 * lock in bge_reset(). 3638 */ 3639 CSR_WRITE_4(sc, BGE_EE_ADDR, 3640 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 3641 delay(1000); 3642 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 3643 3644 bge_stop_fw(sc); 3645 bge_sig_pre_reset(sc, BGE_RESET_START); 3646 if (bge_reset(sc)) 3647 aprint_error_dev(sc->bge_dev, "chip reset failed\n"); 3648 3649 /* 3650 * Read the hardware config word in the first 32k of NIC internal 3651 * memory, or fall back to the config word in the EEPROM. 3652 * Note: on some BCM5700 cards, this value appears to be unset. 3653 */ 3654 hwcfg = hwcfg2 = hwcfg3 = hwcfg4 = hwcfg5 = 0; 3655 if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == 3656 BGE_SRAM_DATA_SIG_MAGIC) { 3657 uint32_t tmp; 3658 3659 hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG); 3660 tmp = bge_readmem_ind(sc, BGE_SRAM_DATA_VER) >> 3661 BGE_SRAM_DATA_VER_SHIFT; 3662 if ((0 < tmp) && (tmp < 0x100)) 3663 hwcfg2 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_2); 3664 if (sc->bge_flags & BGEF_PCIE) 3665 hwcfg3 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_3); 3666 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785) 3667 hwcfg4 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_4); 3668 if (BGE_IS_5717_PLUS(sc)) 3669 hwcfg5 = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG_5); 3670 } else if (!(sc->bge_flags & BGEF_NO_EEPROM)) { 3671 bge_read_eeprom(sc, (void *)&hwcfg, 3672 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 3673 hwcfg = be32toh(hwcfg); 3674 } 3675 aprint_normal_dev(sc->bge_dev, 3676 "HW config %08x, %08x, %08x, %08x %08x\n", 3677 hwcfg, hwcfg2, hwcfg3, hwcfg4, hwcfg5); 3678 3679 bge_sig_legacy(sc, BGE_RESET_START); 3680 bge_sig_post_reset(sc, BGE_RESET_START); 3681 3682 if (bge_chipinit(sc)) { 3683 aprint_error_dev(sc->bge_dev, "chip initialization failed\n"); 3684 bge_release_resources(sc); 3685 return; 3686 } 3687 3688 /* 3689 * Get station address from the EEPROM. 3690 */ 3691 if (bge_get_eaddr(sc, eaddr)) { 3692 aprint_error_dev(sc->bge_dev, 3693 "failed to read station address\n"); 3694 bge_release_resources(sc); 3695 return; 3696 } 3697 3698 br = bge_lookup_rev(sc->bge_chipid); 3699 3700 if (br == NULL) { 3701 aprint_normal_dev(sc->bge_dev, "unknown ASIC (0x%x)", 3702 sc->bge_chipid); 3703 } else { 3704 aprint_normal_dev(sc->bge_dev, "ASIC %s (0x%x)", 3705 br->br_name, sc->bge_chipid); 3706 } 3707 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr)); 3708 3709 /* Allocate the general information block and ring buffers. */ 3710 if (pci_dma64_available(pa)) { 3711 sc->bge_dmatag = pa->pa_dmat64; 3712 sc->bge_dmatag32 = pa->pa_dmat; 3713 sc->bge_dma64 = true; 3714 } else { 3715 sc->bge_dmatag = pa->pa_dmat; 3716 sc->bge_dmatag32 = pa->pa_dmat; 3717 sc->bge_dma64 = false; 3718 } 3719 3720 /* 40bit DMA workaround */ 3721 if (sizeof(bus_addr_t) > 4) { 3722 if ((sc->bge_flags & BGEF_40BIT_BUG) != 0) { 3723 bus_dma_tag_t olddmatag = sc->bge_dmatag; /* save */ 3724 3725 if (bus_dmatag_subregion(olddmatag, 0, 3726 (bus_addr_t)(1ULL << 40), &(sc->bge_dmatag), 3727 BUS_DMA_NOWAIT) != 0) { 3728 aprint_error_dev(self, 3729 "WARNING: failed to restrict dma range," 3730 " falling back to parent bus dma range\n"); 3731 sc->bge_dmatag = olddmatag; 3732 } 3733 } 3734 } 3735 SLIST_INIT(&sc->txdma_list); 3736 DPRINTFN(5, ("bus_dmamem_alloc\n")); 3737 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 3738 PAGE_SIZE, 0, &sc->bge_ring_seg, 1, 3739 &sc->bge_ring_rseg, BUS_DMA_NOWAIT)) { 3740 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n"); 3741 return; 3742 } 3743 DPRINTFN(5, ("bus_dmamem_map\n")); 3744 if (bus_dmamem_map(sc->bge_dmatag, &sc->bge_ring_seg, 3745 sc->bge_ring_rseg, sizeof(struct bge_ring_data), &kva, 3746 BUS_DMA_NOWAIT)) { 3747 aprint_error_dev(sc->bge_dev, 3748 "can't map DMA buffers (%zu bytes)\n", 3749 sizeof(struct bge_ring_data)); 3750 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, 3751 sc->bge_ring_rseg); 3752 return; 3753 } 3754 DPRINTFN(5, ("bus_dmamem_create\n")); 3755 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 3756 sizeof(struct bge_ring_data), 0, 3757 BUS_DMA_NOWAIT, &sc->bge_ring_map)) { 3758 aprint_error_dev(sc->bge_dev, "can't create DMA map\n"); 3759 bus_dmamem_unmap(sc->bge_dmatag, kva, 3760 sizeof(struct bge_ring_data)); 3761 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, 3762 sc->bge_ring_rseg); 3763 return; 3764 } 3765 DPRINTFN(5, ("bus_dmamem_load\n")); 3766 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 3767 sizeof(struct bge_ring_data), NULL, 3768 BUS_DMA_NOWAIT)) { 3769 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 3770 bus_dmamem_unmap(sc->bge_dmatag, kva, 3771 sizeof(struct bge_ring_data)); 3772 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, 3773 sc->bge_ring_rseg); 3774 return; 3775 } 3776 3777 DPRINTFN(5, ("bzero\n")); 3778 sc->bge_rdata = (struct bge_ring_data *)kva; 3779 3780 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data)); 3781 3782 /* Try to allocate memory for jumbo buffers. */ 3783 if (BGE_IS_JUMBO_CAPABLE(sc)) { 3784 if (bge_alloc_jumbo_mem(sc)) { 3785 aprint_error_dev(sc->bge_dev, 3786 "jumbo buffer allocation failed\n"); 3787 } else 3788 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 3789 } 3790 3791 /* Set default tuneable values. */ 3792 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 3793 sc->bge_rx_coal_ticks = 150; 3794 sc->bge_rx_max_coal_bds = 64; 3795 sc->bge_tx_coal_ticks = 300; 3796 sc->bge_tx_max_coal_bds = 400; 3797 if (BGE_IS_5705_PLUS(sc)) { 3798 sc->bge_tx_coal_ticks = (12 * 5); 3799 sc->bge_tx_max_coal_bds = (12 * 5); 3800 aprint_verbose_dev(sc->bge_dev, 3801 "setting short Tx thresholds\n"); 3802 } 3803 3804 if (BGE_IS_5717_PLUS(sc)) 3805 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 3806 else if (BGE_IS_5705_PLUS(sc)) 3807 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 3808 else 3809 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 3810 3811 /* Set up ifnet structure */ 3812 ifp = &sc->ethercom.ec_if; 3813 ifp->if_softc = sc; 3814 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 3815 ifp->if_ioctl = bge_ioctl; 3816 ifp->if_stop = bge_stop; 3817 ifp->if_start = bge_start; 3818 ifp->if_init = bge_init; 3819 ifp->if_watchdog = bge_watchdog; 3820 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(BGE_TX_RING_CNT - 1, IFQ_MAXLEN)); 3821 IFQ_SET_READY(&ifp->if_snd); 3822 DPRINTFN(5, ("strcpy if_xname\n")); 3823 strcpy(ifp->if_xname, device_xname(sc->bge_dev)); 3824 3825 if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) 3826 sc->ethercom.ec_if.if_capabilities |= 3827 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx; 3828 #if 1 /* XXX TCP/UDP checksum offload breaks with pf(4) */ 3829 sc->ethercom.ec_if.if_capabilities |= 3830 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 3831 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 3832 #endif 3833 sc->ethercom.ec_capabilities |= 3834 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 3835 3836 if (sc->bge_flags & BGEF_TSO) 3837 sc->ethercom.ec_if.if_capabilities |= IFCAP_TSOv4; 3838 3839 /* 3840 * Do MII setup. 3841 */ 3842 DPRINTFN(5, ("mii setup\n")); 3843 mii->mii_ifp = ifp; 3844 mii->mii_readreg = bge_miibus_readreg; 3845 mii->mii_writereg = bge_miibus_writereg; 3846 mii->mii_statchg = bge_miibus_statchg; 3847 3848 /* 3849 * Figure out what sort of media we have by checking the hardware 3850 * config word. Note: on some BCM5700 cards, this value appears to be 3851 * unset. If that's the case, we have to rely on identifying the NIC 3852 * by its PCI subsystem ID, as we do below for the SysKonnect SK-9D41. 3853 * The SysKonnect SK-9D41 is a 1000baseSX card. 3854 */ 3855 if (PCI_PRODUCT(pa->pa_id) == SK_SUBSYSID_9D41 || 3856 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) { 3857 if (BGE_IS_5705_PLUS(sc)) { 3858 sc->bge_flags |= BGEF_FIBER_MII; 3859 sc->bge_phy_flags |= BGEPHYF_NO_WIRESPEED; 3860 } else 3861 sc->bge_flags |= BGEF_FIBER_TBI; 3862 } 3863 3864 /* Set bge_phy_flags before prop_dictionary_set_uint32() */ 3865 if (BGE_IS_JUMBO_CAPABLE(sc)) 3866 sc->bge_phy_flags |= BGEPHYF_JUMBO_CAPABLE; 3867 3868 /* set phyflags and chipid before mii_attach() */ 3869 dict = device_properties(self); 3870 prop_dictionary_set_uint32(dict, "phyflags", sc->bge_phy_flags); 3871 prop_dictionary_set_uint32(dict, "chipid", sc->bge_chipid); 3872 3873 /* Initialize ifmedia structures. */ 3874 if (sc->bge_flags & BGEF_FIBER_TBI) { 3875 sc->ethercom.ec_ifmedia = &sc->bge_ifmedia; 3876 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 3877 bge_ifmedia_sts); 3878 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER |IFM_1000_SX, 0, NULL); 3879 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX |IFM_FDX, 3880 0, NULL); 3881 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); 3882 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO); 3883 /* Pretend the user requested this setting */ 3884 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 3885 } else { 3886 /* 3887 * Do transceiver setup and tell the firmware the 3888 * driver is down so we can try to get access the 3889 * probe if ASF is running. Retry a couple of times 3890 * if we get a conflict with the ASF firmware accessing 3891 * the PHY. 3892 */ 3893 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3894 bge_asf_driver_up(sc); 3895 3896 sc->ethercom.ec_mii = mii; 3897 ifmedia_init(&mii->mii_media, 0, bge_ifmedia_upd, 3898 bge_ifmedia_sts); 3899 mii_flags = MIIF_DOPAUSE; 3900 if (sc->bge_flags & BGEF_FIBER_MII) 3901 mii_flags |= MIIF_HAVEFIBER; 3902 mii_attach(sc->bge_dev, mii, capmask, sc->bge_phy_addr, 3903 MII_OFFSET_ANY, mii_flags); 3904 3905 if (LIST_EMPTY(&mii->mii_phys)) { 3906 aprint_error_dev(sc->bge_dev, "no PHY found!\n"); 3907 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 3908 0, NULL); 3909 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL); 3910 } else 3911 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 3912 3913 /* 3914 * Now tell the firmware we are going up after probing the PHY 3915 */ 3916 if (sc->bge_asf_mode & ASF_STACKUP) 3917 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3918 } 3919 3920 /* 3921 * Call MI attach routine. 3922 */ 3923 DPRINTFN(5, ("if_attach\n")); 3924 if_attach(ifp); 3925 if_deferred_start_init(ifp, NULL); 3926 DPRINTFN(5, ("ether_ifattach\n")); 3927 ether_ifattach(ifp, eaddr); 3928 ether_set_ifflags_cb(&sc->ethercom, bge_ifflags_cb); 3929 rnd_attach_source(&sc->rnd_source, device_xname(sc->bge_dev), 3930 RND_TYPE_NET, RND_FLAG_DEFAULT); 3931 #ifdef BGE_EVENT_COUNTERS 3932 /* 3933 * Attach event counters. 3934 */ 3935 evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR, 3936 NULL, device_xname(sc->bge_dev), "intr"); 3937 evcnt_attach_dynamic(&sc->bge_ev_intr_spurious, EVCNT_TYPE_INTR, 3938 NULL, device_xname(sc->bge_dev), "intr_spurious"); 3939 evcnt_attach_dynamic(&sc->bge_ev_intr_spurious2, EVCNT_TYPE_INTR, 3940 NULL, device_xname(sc->bge_dev), "intr_spurious2"); 3941 evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC, 3942 NULL, device_xname(sc->bge_dev), "tx_xoff"); 3943 evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC, 3944 NULL, device_xname(sc->bge_dev), "tx_xon"); 3945 evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC, 3946 NULL, device_xname(sc->bge_dev), "rx_xoff"); 3947 evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC, 3948 NULL, device_xname(sc->bge_dev), "rx_xon"); 3949 evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC, 3950 NULL, device_xname(sc->bge_dev), "rx_macctl"); 3951 evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC, 3952 NULL, device_xname(sc->bge_dev), "xoffentered"); 3953 #endif /* BGE_EVENT_COUNTERS */ 3954 DPRINTFN(5, ("callout_init\n")); 3955 callout_init(&sc->bge_timeout, 0); 3956 3957 if (pmf_device_register(self, NULL, NULL)) 3958 pmf_class_network_register(self, ifp); 3959 else 3960 aprint_error_dev(self, "couldn't establish power handler\n"); 3961 3962 bge_sysctl_init(sc); 3963 3964 #ifdef BGE_DEBUG 3965 bge_debug_info(sc); 3966 #endif 3967 } 3968 3969 /* 3970 * Stop all chip I/O so that the kernel's probe routines don't 3971 * get confused by errant DMAs when rebooting. 3972 */ 3973 static int 3974 bge_detach(device_t self, int flags __unused) 3975 { 3976 struct bge_softc *sc = device_private(self); 3977 struct ifnet *ifp = &sc->ethercom.ec_if; 3978 int s; 3979 3980 s = splnet(); 3981 /* Stop the interface. Callouts are stopped in it. */ 3982 bge_stop(ifp, 1); 3983 splx(s); 3984 3985 mii_detach(&sc->bge_mii, MII_PHY_ANY, MII_OFFSET_ANY); 3986 3987 /* Delete all remaining media. */ 3988 ifmedia_delete_instance(&sc->bge_mii.mii_media, IFM_INST_ANY); 3989 3990 ether_ifdetach(ifp); 3991 if_detach(ifp); 3992 3993 bge_release_resources(sc); 3994 3995 return 0; 3996 } 3997 3998 static void 3999 bge_release_resources(struct bge_softc *sc) 4000 { 4001 4002 /* Detach sysctl */ 4003 if (sc->bge_log != NULL) 4004 sysctl_teardown(&sc->bge_log); 4005 4006 #ifdef BGE_EVENT_COUNTERS 4007 /* Detach event counters. */ 4008 evcnt_detach(&sc->bge_ev_intr); 4009 evcnt_detach(&sc->bge_ev_intr_spurious); 4010 evcnt_detach(&sc->bge_ev_intr_spurious2); 4011 evcnt_detach(&sc->bge_ev_tx_xoff); 4012 evcnt_detach(&sc->bge_ev_tx_xon); 4013 evcnt_detach(&sc->bge_ev_rx_xoff); 4014 evcnt_detach(&sc->bge_ev_rx_xon); 4015 evcnt_detach(&sc->bge_ev_rx_macctl); 4016 evcnt_detach(&sc->bge_ev_xoffentered); 4017 #endif /* BGE_EVENT_COUNTERS */ 4018 4019 /* Disestablish the interrupt handler */ 4020 if (sc->bge_intrhand != NULL) { 4021 pci_intr_disestablish(sc->sc_pc, sc->bge_intrhand); 4022 pci_intr_release(sc->sc_pc, sc->bge_pihp, 1); 4023 sc->bge_intrhand = NULL; 4024 } 4025 4026 if (sc->bge_dmatag != NULL) { 4027 bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map); 4028 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 4029 bus_dmamem_unmap(sc->bge_dmatag, (void *)sc->bge_rdata, 4030 sizeof(struct bge_ring_data)); 4031 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, 4032 sc->bge_ring_rseg); 4033 } 4034 4035 /* Unmap the device registers */ 4036 if (sc->bge_bsize != 0) { 4037 bus_space_unmap(sc->bge_btag, sc->bge_bhandle, sc->bge_bsize); 4038 sc->bge_bsize = 0; 4039 } 4040 4041 /* Unmap the APE registers */ 4042 if (sc->bge_apesize != 0) { 4043 bus_space_unmap(sc->bge_apetag, sc->bge_apehandle, 4044 sc->bge_apesize); 4045 sc->bge_apesize = 0; 4046 } 4047 } 4048 4049 static int 4050 bge_reset(struct bge_softc *sc) 4051 { 4052 uint32_t cachesize, command; 4053 uint32_t reset, mac_mode, mac_mode_mask; 4054 pcireg_t devctl, reg; 4055 int i, val; 4056 void (*write_op)(struct bge_softc *, int, int); 4057 4058 /* Make mask for BGE_MAC_MODE register. */ 4059 mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE; 4060 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) 4061 mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; 4062 /* Keep mac_mode_mask's bits of BGE_MAC_MODE register into mac_mode */ 4063 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask; 4064 4065 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) && 4066 (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)) { 4067 if (sc->bge_flags & BGEF_PCIE) 4068 write_op = bge_writemem_direct; 4069 else 4070 write_op = bge_writemem_ind; 4071 } else 4072 write_op = bge_writereg_ind; 4073 4074 /* 57XX step 4 */ 4075 /* Acquire the NVM lock */ 4076 if ((sc->bge_flags & BGEF_NO_EEPROM) == 0 && 4077 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5700 && 4078 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5701) { 4079 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 4080 for (i = 0; i < 8000; i++) { 4081 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & 4082 BGE_NVRAMSWARB_GNT1) 4083 break; 4084 DELAY(20); 4085 } 4086 if (i == 8000) { 4087 printf("%s: NVRAM lock timedout!\n", 4088 device_xname(sc->bge_dev)); 4089 } 4090 } 4091 4092 /* Take APE lock when performing reset. */ 4093 bge_ape_lock(sc, BGE_APE_LOCK_GRC); 4094 4095 /* 57XX step 3 */ 4096 /* Save some important PCI state. */ 4097 cachesize = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ); 4098 /* 5718 reset step 3 */ 4099 command = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD); 4100 4101 /* 5718 reset step 5, 57XX step 5b-5d */ 4102 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 4103 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 4104 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW); 4105 4106 /* XXX ???: Disable fastboot on controllers that support it. */ 4107 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || 4108 BGE_IS_5755_PLUS(sc)) 4109 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0); 4110 4111 /* 5718 reset step 2, 57XX step 6 */ 4112 /* 4113 * Write the magic number to SRAM at offset 0xB50. 4114 * When firmware finishes its initialization it will 4115 * write ~BGE_MAGIC_NUMBER to the same location. 4116 */ 4117 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); 4118 4119 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) { 4120 val = CSR_READ_4(sc, BGE_PCIE_LINKCTL); 4121 val = (val & ~BGE_PCIE_LINKCTL_L1_PLL_PDEN) 4122 | BGE_PCIE_LINKCTL_L1_PLL_PDDIS; 4123 CSR_WRITE_4(sc, BGE_PCIE_LINKCTL, val); 4124 } 4125 4126 /* 5718 reset step 6, 57XX step 7 */ 4127 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ; 4128 /* 4129 * XXX: from FreeBSD/Linux; no documentation 4130 */ 4131 if (sc->bge_flags & BGEF_PCIE) { 4132 if ((BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) && 4133 !BGE_IS_57765_PLUS(sc) && 4134 (CSR_READ_4(sc, BGE_PHY_TEST_CTRL_REG) == 4135 (BGE_PHY_PCIE_LTASS_MODE | BGE_PHY_PCIE_SCRAM_MODE))) { 4136 /* PCI Express 1.0 system */ 4137 CSR_WRITE_4(sc, BGE_PHY_TEST_CTRL_REG, 4138 BGE_PHY_PCIE_SCRAM_MODE); 4139 } 4140 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 4141 /* 4142 * Prevent PCI Express link training 4143 * during global reset. 4144 */ 4145 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); 4146 reset |= (1 << 29); 4147 } 4148 } 4149 4150 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 4151 i = CSR_READ_4(sc, BGE_VCPU_STATUS); 4152 CSR_WRITE_4(sc, BGE_VCPU_STATUS, 4153 i | BGE_VCPU_STATUS_DRV_RESET); 4154 i = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); 4155 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, 4156 i & ~BGE_VCPU_EXT_CTRL_HALT_CPU); 4157 } 4158 4159 /* 4160 * Set GPHY Power Down Override to leave GPHY 4161 * powered up in D0 uninitialized. 4162 */ 4163 if (BGE_IS_5705_PLUS(sc) && 4164 (sc->bge_flags & BGEF_CPMU_PRESENT) == 0) 4165 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE; 4166 4167 /* Issue global reset */ 4168 write_op(sc, BGE_MISC_CFG, reset); 4169 4170 /* 5718 reset step 7, 57XX step 8 */ 4171 if (sc->bge_flags & BGEF_PCIE) 4172 delay(100*1000); /* too big */ 4173 else 4174 delay(1000); 4175 4176 if (sc->bge_flags & BGEF_PCIE) { 4177 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 4178 DELAY(500000); 4179 /* XXX: Magic Numbers */ 4180 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 4181 BGE_PCI_UNKNOWN0); 4182 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 4183 BGE_PCI_UNKNOWN0, 4184 reg | (1 << 15)); 4185 } 4186 devctl = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 4187 sc->bge_pciecap + PCIE_DCSR); 4188 /* Clear enable no snoop and disable relaxed ordering. */ 4189 devctl &= ~(PCIE_DCSR_ENA_RELAX_ORD | 4190 PCIE_DCSR_ENA_NO_SNOOP); 4191 4192 /* Set PCIE max payload size to 128 for older PCIe devices */ 4193 if ((sc->bge_flags & BGEF_CPMU_PRESENT) == 0) 4194 devctl &= ~(0x00e0); 4195 /* Clear device status register. Write 1b to clear */ 4196 devctl |= PCIE_DCSR_URD | PCIE_DCSR_FED 4197 | PCIE_DCSR_NFED | PCIE_DCSR_CED; 4198 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 4199 sc->bge_pciecap + PCIE_DCSR, devctl); 4200 bge_set_max_readrq(sc); 4201 } 4202 4203 /* From Linux: dummy read to flush PCI posted writes */ 4204 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD); 4205 4206 /* 4207 * Reset some of the PCI state that got zapped by reset 4208 * To modify the PCISTATE register, BGE_PCIMISCCTL_PCISTATE_RW must be 4209 * set, too. 4210 */ 4211 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 4212 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 4213 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW); 4214 val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE; 4215 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 && 4216 (sc->bge_flags & BGEF_PCIX) != 0) 4217 val |= BGE_PCISTATE_RETRY_SAME_DMA; 4218 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) 4219 val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | 4220 BGE_PCISTATE_ALLOW_APE_SHMEM_WR | 4221 BGE_PCISTATE_ALLOW_APE_PSPACE_WR; 4222 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE, val); 4223 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ, cachesize); 4224 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, command); 4225 4226 /* 57xx step 11: disable PCI-X Relaxed Ordering. */ 4227 if (sc->bge_flags & BGEF_PCIX) { 4228 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap 4229 + PCIX_CMD); 4230 /* Set max memory read byte count to 2K */ 4231 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) { 4232 reg &= ~PCIX_CMD_BYTECNT_MASK; 4233 reg |= PCIX_CMD_BCNT_2048; 4234 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704){ 4235 /* 4236 * For 5704, set max outstanding split transaction 4237 * field to 0 (0 means it supports 1 request) 4238 */ 4239 reg &= ~(PCIX_CMD_SPLTRANS_MASK 4240 | PCIX_CMD_BYTECNT_MASK); 4241 reg |= PCIX_CMD_BCNT_2048; 4242 } 4243 pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap 4244 + PCIX_CMD, reg & ~PCIX_CMD_RELAXED_ORDER); 4245 } 4246 4247 /* 5718 reset step 10, 57XX step 12 */ 4248 /* Enable memory arbiter. */ 4249 if (BGE_IS_5714_FAMILY(sc)) { 4250 val = CSR_READ_4(sc, BGE_MARB_MODE); 4251 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); 4252 } else 4253 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 4254 4255 /* XXX 5721, 5751 and 5752 */ 4256 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750) { 4257 /* Step 19: */ 4258 BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, 1 << 29 | 1 << 25); 4259 /* Step 20: */ 4260 BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, BGE_TLP_DATA_FIFO_PROTECT); 4261 } 4262 4263 /* 5718 reset step 12, 57XX step 15 and 16 */ 4264 /* Fix up byte swapping */ 4265 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS); 4266 4267 /* 5718 reset step 13, 57XX step 17 */ 4268 /* Poll until the firmware initialization is complete */ 4269 bge_poll_fw(sc); 4270 4271 /* 57XX step 21 */ 4272 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_BX) { 4273 pcireg_t msidata; 4274 4275 msidata = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 4276 BGE_PCI_MSI_DATA); 4277 msidata |= ((1 << 13 | 1 << 12 | 1 << 10) << 16); 4278 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MSI_DATA, 4279 msidata); 4280 } 4281 4282 /* 57XX step 18 */ 4283 /* Write mac mode. */ 4284 val = CSR_READ_4(sc, BGE_MAC_MODE); 4285 /* Restore mac_mode_mask's bits using mac_mode */ 4286 val = (val & ~mac_mode_mask) | mac_mode; 4287 CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, val); 4288 DELAY(40); 4289 4290 bge_ape_unlock(sc, BGE_APE_LOCK_GRC); 4291 4292 /* 4293 * The 5704 in TBI mode apparently needs some special 4294 * adjustment to insure the SERDES drive level is set 4295 * to 1.2V. 4296 */ 4297 if (sc->bge_flags & BGEF_FIBER_TBI && 4298 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 4299 uint32_t serdescfg; 4300 4301 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 4302 serdescfg = (serdescfg & ~0xFFF) | 0x880; 4303 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 4304 } 4305 4306 if (sc->bge_flags & BGEF_PCIE && 4307 !BGE_IS_57765_PLUS(sc) && 4308 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 && 4309 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) { 4310 uint32_t v; 4311 4312 /* Enable PCI Express bug fix */ 4313 v = CSR_READ_4(sc, BGE_TLP_CONTROL_REG); 4314 CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG, 4315 v | BGE_TLP_DATA_FIFO_PROTECT); 4316 } 4317 4318 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 4319 BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE, 4320 CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 4321 4322 return 0; 4323 } 4324 4325 /* 4326 * Frame reception handling. This is called if there's a frame 4327 * on the receive return list. 4328 * 4329 * Note: we have to be able to handle two possibilities here: 4330 * 1) the frame is from the jumbo receive ring 4331 * 2) the frame is from the standard receive ring 4332 */ 4333 4334 static void 4335 bge_rxeof(struct bge_softc *sc) 4336 { 4337 struct ifnet *ifp; 4338 uint16_t rx_prod, rx_cons; 4339 int stdcnt = 0, jumbocnt = 0; 4340 bus_dmamap_t dmamap; 4341 bus_addr_t offset, toff; 4342 bus_size_t tlen; 4343 int tosync; 4344 4345 rx_cons = sc->bge_rx_saved_considx; 4346 rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx; 4347 4348 /* Nothing to do */ 4349 if (rx_cons == rx_prod) 4350 return; 4351 4352 ifp = &sc->ethercom.ec_if; 4353 4354 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4355 offsetof(struct bge_ring_data, bge_status_block), 4356 sizeof (struct bge_status_block), 4357 BUS_DMASYNC_POSTREAD); 4358 4359 offset = offsetof(struct bge_ring_data, bge_rx_return_ring); 4360 tosync = rx_prod - rx_cons; 4361 4362 if (tosync != 0) 4363 rnd_add_uint32(&sc->rnd_source, tosync); 4364 4365 toff = offset + (rx_cons * sizeof (struct bge_rx_bd)); 4366 4367 if (tosync < 0) { 4368 tlen = (sc->bge_return_ring_cnt - rx_cons) * 4369 sizeof (struct bge_rx_bd); 4370 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4371 toff, tlen, BUS_DMASYNC_POSTREAD); 4372 tosync = -tosync; 4373 } 4374 4375 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4376 offset, tosync * sizeof (struct bge_rx_bd), 4377 BUS_DMASYNC_POSTREAD); 4378 4379 while (rx_cons != rx_prod) { 4380 struct bge_rx_bd *cur_rx; 4381 uint32_t rxidx; 4382 struct mbuf *m = NULL; 4383 4384 cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons]; 4385 4386 rxidx = cur_rx->bge_idx; 4387 BGE_INC(rx_cons, sc->bge_return_ring_cnt); 4388 4389 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 4390 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 4391 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 4392 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 4393 jumbocnt++; 4394 bus_dmamap_sync(sc->bge_dmatag, 4395 sc->bge_cdata.bge_rx_jumbo_map, 4396 mtod(m, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, 4397 BGE_JLEN, BUS_DMASYNC_POSTREAD); 4398 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 4399 ifp->if_ierrors++; 4400 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 4401 continue; 4402 } 4403 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 4404 NULL)== ENOBUFS) { 4405 ifp->if_ierrors++; 4406 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 4407 continue; 4408 } 4409 } else { 4410 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 4411 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 4412 4413 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 4414 stdcnt++; 4415 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; 4416 sc->bge_cdata.bge_rx_std_map[rxidx] = NULL; 4417 if (dmamap == NULL) { 4418 ifp->if_ierrors++; 4419 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 4420 continue; 4421 } 4422 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, 4423 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 4424 bus_dmamap_unload(sc->bge_dmatag, dmamap); 4425 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 4426 ifp->if_ierrors++; 4427 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 4428 continue; 4429 } 4430 if (bge_newbuf_std(sc, sc->bge_std, 4431 NULL, dmamap) == ENOBUFS) { 4432 ifp->if_ierrors++; 4433 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 4434 continue; 4435 } 4436 } 4437 4438 #ifndef __NO_STRICT_ALIGNMENT 4439 /* 4440 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect, 4441 * the Rx buffer has the layer-2 header unaligned. 4442 * If our CPU requires alignment, re-align by copying. 4443 */ 4444 if (sc->bge_flags & BGEF_RX_ALIGNBUG) { 4445 memmove(mtod(m, char *) + ETHER_ALIGN, m->m_data, 4446 cur_rx->bge_len); 4447 m->m_data += ETHER_ALIGN; 4448 } 4449 #endif 4450 4451 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 4452 m_set_rcvif(m, ifp); 4453 4454 bge_rxcsum(sc, cur_rx, m); 4455 4456 /* 4457 * If we received a packet with a vlan tag, pass it 4458 * to vlan_input() instead of ether_input(). 4459 */ 4460 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) 4461 vlan_set_tag(m, cur_rx->bge_vlan_tag); 4462 4463 if_percpuq_enqueue(ifp->if_percpuq, m); 4464 } 4465 4466 sc->bge_rx_saved_considx = rx_cons; 4467 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 4468 if (stdcnt) 4469 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 4470 if (jumbocnt) 4471 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 4472 } 4473 4474 static void 4475 bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m) 4476 { 4477 4478 if (BGE_IS_57765_PLUS(sc)) { 4479 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) { 4480 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) != 0) 4481 m->m_pkthdr.csum_flags = M_CSUM_IPv4; 4482 if ((cur_rx->bge_error_flag & 4483 BGE_RXERRFLAG_IP_CSUM_NOK) != 0) 4484 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 4485 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 4486 m->m_pkthdr.csum_data = 4487 cur_rx->bge_tcp_udp_csum; 4488 m->m_pkthdr.csum_flags |= 4489 (M_CSUM_TCPv4 | M_CSUM_UDPv4 |M_CSUM_DATA); 4490 } 4491 } 4492 } else { 4493 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) != 0) 4494 m->m_pkthdr.csum_flags = M_CSUM_IPv4; 4495 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0) 4496 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 4497 /* 4498 * Rx transport checksum-offload may also 4499 * have bugs with packets which, when transmitted, 4500 * were `runts' requiring padding. 4501 */ 4502 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 4503 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/ 4504 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) { 4505 m->m_pkthdr.csum_data = 4506 cur_rx->bge_tcp_udp_csum; 4507 m->m_pkthdr.csum_flags |= 4508 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_DATA); 4509 } 4510 } 4511 } 4512 4513 static void 4514 bge_txeof(struct bge_softc *sc) 4515 { 4516 struct bge_tx_bd *cur_tx = NULL; 4517 struct ifnet *ifp; 4518 struct txdmamap_pool_entry *dma; 4519 bus_addr_t offset, toff; 4520 bus_size_t tlen; 4521 int tosync; 4522 struct mbuf *m; 4523 4524 ifp = &sc->ethercom.ec_if; 4525 4526 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4527 offsetof(struct bge_ring_data, bge_status_block), 4528 sizeof (struct bge_status_block), 4529 BUS_DMASYNC_POSTREAD); 4530 4531 offset = offsetof(struct bge_ring_data, bge_tx_ring); 4532 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx - 4533 sc->bge_tx_saved_considx; 4534 4535 if (tosync != 0) 4536 rnd_add_uint32(&sc->rnd_source, tosync); 4537 4538 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd)); 4539 4540 if (tosync < 0) { 4541 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) * 4542 sizeof (struct bge_tx_bd); 4543 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4544 toff, tlen, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 4545 tosync = -tosync; 4546 } 4547 4548 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4549 offset, tosync * sizeof (struct bge_tx_bd), 4550 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 4551 4552 /* 4553 * Go through our tx ring and free mbufs for those 4554 * frames that have been sent. 4555 */ 4556 while (sc->bge_tx_saved_considx != 4557 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 4558 uint32_t idx = 0; 4559 4560 idx = sc->bge_tx_saved_considx; 4561 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 4562 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 4563 ifp->if_opackets++; 4564 m = sc->bge_cdata.bge_tx_chain[idx]; 4565 if (m != NULL) { 4566 sc->bge_cdata.bge_tx_chain[idx] = NULL; 4567 dma = sc->txdma[idx]; 4568 if (dma->is_dma32) { 4569 bus_dmamap_sync(sc->bge_dmatag32, dma->dmamap32, 4570 0, dma->dmamap32->dm_mapsize, 4571 BUS_DMASYNC_POSTWRITE); 4572 bus_dmamap_unload( 4573 sc->bge_dmatag32, dma->dmamap32); 4574 } else { 4575 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 4576 0, dma->dmamap->dm_mapsize, 4577 BUS_DMASYNC_POSTWRITE); 4578 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap); 4579 } 4580 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 4581 sc->txdma[idx] = NULL; 4582 4583 m_freem(m); 4584 } 4585 sc->bge_txcnt--; 4586 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 4587 ifp->if_timer = 0; 4588 } 4589 4590 if (cur_tx != NULL) 4591 ifp->if_flags &= ~IFF_OACTIVE; 4592 } 4593 4594 static int 4595 bge_intr(void *xsc) 4596 { 4597 struct bge_softc *sc; 4598 struct ifnet *ifp; 4599 uint32_t pcistate, statusword, statustag; 4600 uint32_t intrmask = BGE_PCISTATE_INTR_NOT_ACTIVE; 4601 4602 sc = xsc; 4603 ifp = &sc->ethercom.ec_if; 4604 4605 /* 5717 and newer chips have no BGE_PCISTATE_INTR_NOT_ACTIVE bit */ 4606 if (BGE_IS_5717_PLUS(sc)) 4607 intrmask = 0; 4608 4609 /* It is possible for the interrupt to arrive before 4610 * the status block is updated prior to the interrupt. 4611 * Reading the PCI State register will confirm whether the 4612 * interrupt is ours and will flush the status block. 4613 */ 4614 pcistate = CSR_READ_4(sc, BGE_PCI_PCISTATE); 4615 4616 /* read status word from status block */ 4617 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4618 offsetof(struct bge_ring_data, bge_status_block), 4619 sizeof (struct bge_status_block), 4620 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 4621 statusword = sc->bge_rdata->bge_status_block.bge_status; 4622 statustag = sc->bge_rdata->bge_status_block.bge_status_tag << 24; 4623 4624 if (sc->bge_flags & BGEF_TAGGED_STATUS) { 4625 if (sc->bge_lasttag == statustag && 4626 (~pcistate & intrmask)) { 4627 BGE_EVCNT_INCR(sc->bge_ev_intr_spurious); 4628 return (0); 4629 } 4630 sc->bge_lasttag = statustag; 4631 } else { 4632 if (!(statusword & BGE_STATFLAG_UPDATED) && 4633 !(~pcistate & intrmask)) { 4634 BGE_EVCNT_INCR(sc->bge_ev_intr_spurious2); 4635 return (0); 4636 } 4637 statustag = 0; 4638 } 4639 /* Ack interrupt and stop others from occurring. */ 4640 bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 1); 4641 BGE_EVCNT_INCR(sc->bge_ev_intr); 4642 4643 /* clear status word */ 4644 sc->bge_rdata->bge_status_block.bge_status = 0; 4645 4646 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4647 offsetof(struct bge_ring_data, bge_status_block), 4648 sizeof (struct bge_status_block), 4649 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4650 4651 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 4652 statusword & BGE_STATFLAG_LINKSTATE_CHANGED || 4653 BGE_STS_BIT(sc, BGE_STS_LINK_EVT)) 4654 bge_link_upd(sc); 4655 4656 if (ifp->if_flags & IFF_RUNNING) { 4657 /* Check RX return ring producer/consumer */ 4658 bge_rxeof(sc); 4659 4660 /* Check TX ring producer/consumer */ 4661 bge_txeof(sc); 4662 } 4663 4664 if (sc->bge_pending_rxintr_change) { 4665 uint32_t rx_ticks = sc->bge_rx_coal_ticks; 4666 uint32_t rx_bds = sc->bge_rx_max_coal_bds; 4667 4668 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks); 4669 DELAY(10); 4670 (void)CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); 4671 4672 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds); 4673 DELAY(10); 4674 (void)CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); 4675 4676 sc->bge_pending_rxintr_change = 0; 4677 } 4678 bge_handle_events(sc); 4679 4680 /* Re-enable interrupts. */ 4681 bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, statustag); 4682 4683 if (ifp->if_flags & IFF_RUNNING) 4684 if_schedule_deferred_start(ifp); 4685 4686 return 1; 4687 } 4688 4689 static void 4690 bge_asf_driver_up(struct bge_softc *sc) 4691 { 4692 if (sc->bge_asf_mode & ASF_STACKUP) { 4693 /* Send ASF heartbeat aprox. every 2s */ 4694 if (sc->bge_asf_count) 4695 sc->bge_asf_count --; 4696 else { 4697 sc->bge_asf_count = 2; 4698 4699 bge_wait_for_event_ack(sc); 4700 4701 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, 4702 BGE_FW_CMD_DRV_ALIVE3); 4703 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4); 4704 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB, 4705 BGE_FW_HB_TIMEOUT_SEC); 4706 CSR_WRITE_4_FLUSH(sc, BGE_RX_CPU_EVENT, 4707 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | 4708 BGE_RX_CPU_DRV_EVENT); 4709 } 4710 } 4711 } 4712 4713 static void 4714 bge_tick(void *xsc) 4715 { 4716 struct bge_softc *sc = xsc; 4717 struct mii_data *mii = &sc->bge_mii; 4718 int s; 4719 4720 s = splnet(); 4721 4722 if (BGE_IS_5705_PLUS(sc)) 4723 bge_stats_update_regs(sc); 4724 else 4725 bge_stats_update(sc); 4726 4727 if (sc->bge_flags & BGEF_FIBER_TBI) { 4728 /* 4729 * Since in TBI mode auto-polling can't be used we should poll 4730 * link status manually. Here we register pending link event 4731 * and trigger interrupt. 4732 */ 4733 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT); 4734 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 4735 } else { 4736 /* 4737 * Do not touch PHY if we have link up. This could break 4738 * IPMI/ASF mode or produce extra input errors. 4739 * (extra input errors was reported for bcm5701 & bcm5704). 4740 */ 4741 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) 4742 mii_tick(mii); 4743 } 4744 4745 bge_asf_driver_up(sc); 4746 4747 if (!sc->bge_detaching) 4748 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 4749 4750 splx(s); 4751 } 4752 4753 static void 4754 bge_stats_update_regs(struct bge_softc *sc) 4755 { 4756 struct ifnet *ifp = &sc->ethercom.ec_if; 4757 4758 ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS + 4759 offsetof(struct bge_mac_stats_regs, etherStatsCollisions)); 4760 4761 /* 4762 * On BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0, 4763 * RXLP_LOCSTAT_IFIN_DROPS includes unwanted multicast frames 4764 * (silicon bug). There's no reliable workaround so just 4765 * ignore the counter 4766 */ 4767 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 && 4768 sc->bge_chipid != BGE_CHIPID_BCM5719_A0 && 4769 sc->bge_chipid != BGE_CHIPID_BCM5720_A0) { 4770 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); 4771 } 4772 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS); 4773 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); 4774 4775 if (sc->bge_flags & BGEF_RDMA_BUG) { 4776 uint32_t val, ucast, mcast, bcast; 4777 4778 ucast = CSR_READ_4(sc, BGE_MAC_STATS + 4779 offsetof(struct bge_mac_stats_regs, ifHCOutUcastPkts)); 4780 mcast = CSR_READ_4(sc, BGE_MAC_STATS + 4781 offsetof(struct bge_mac_stats_regs, ifHCOutMulticastPkts)); 4782 bcast = CSR_READ_4(sc, BGE_MAC_STATS + 4783 offsetof(struct bge_mac_stats_regs, ifHCOutBroadcastPkts)); 4784 4785 /* 4786 * If controller transmitted more than BGE_NUM_RDMA_CHANNELS 4787 * frames, it's safe to disable workaround for DMA engine's 4788 * miscalculation of TXMBUF space. 4789 */ 4790 if (ucast + mcast + bcast > BGE_NUM_RDMA_CHANNELS) { 4791 val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL); 4792 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) 4793 val &= ~BGE_RDMA_TX_LENGTH_WA_5719; 4794 else 4795 val &= ~BGE_RDMA_TX_LENGTH_WA_5720; 4796 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val); 4797 sc->bge_flags &= ~BGEF_RDMA_BUG; 4798 } 4799 } 4800 } 4801 4802 static void 4803 bge_stats_update(struct bge_softc *sc) 4804 { 4805 struct ifnet *ifp = &sc->ethercom.ec_if; 4806 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 4807 4808 #define READ_STAT(sc, stats, stat) \ 4809 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 4810 4811 ifp->if_collisions += 4812 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) + 4813 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) + 4814 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) + 4815 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) - 4816 ifp->if_collisions; 4817 4818 BGE_EVCNT_UPD(sc->bge_ev_tx_xoff, 4819 READ_STAT(sc, stats, outXoffSent.bge_addr_lo)); 4820 BGE_EVCNT_UPD(sc->bge_ev_tx_xon, 4821 READ_STAT(sc, stats, outXonSent.bge_addr_lo)); 4822 BGE_EVCNT_UPD(sc->bge_ev_rx_xoff, 4823 READ_STAT(sc, stats, 4824 xoffPauseFramesReceived.bge_addr_lo)); 4825 BGE_EVCNT_UPD(sc->bge_ev_rx_xon, 4826 READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo)); 4827 BGE_EVCNT_UPD(sc->bge_ev_rx_macctl, 4828 READ_STAT(sc, stats, 4829 macControlFramesReceived.bge_addr_lo)); 4830 BGE_EVCNT_UPD(sc->bge_ev_xoffentered, 4831 READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo)); 4832 4833 #undef READ_STAT 4834 4835 #ifdef notdef 4836 ifp->if_collisions += 4837 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 4838 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 4839 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 4840 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 4841 ifp->if_collisions; 4842 #endif 4843 } 4844 4845 /* 4846 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 4847 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 4848 * but when such padded frames employ the bge IP/TCP checksum offload, 4849 * the hardware checksum assist gives incorrect results (possibly 4850 * from incorporating its own padding into the UDP/TCP checksum; who knows). 4851 * If we pad such runts with zeros, the onboard checksum comes out correct. 4852 */ 4853 static inline int 4854 bge_cksum_pad(struct mbuf *pkt) 4855 { 4856 struct mbuf *last = NULL; 4857 int padlen; 4858 4859 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len; 4860 4861 /* if there's only the packet-header and we can pad there, use it. */ 4862 if (pkt->m_pkthdr.len == pkt->m_len && 4863 M_TRAILINGSPACE(pkt) >= padlen) { 4864 last = pkt; 4865 } else { 4866 /* 4867 * Walk packet chain to find last mbuf. We will either 4868 * pad there, or append a new mbuf and pad it 4869 * (thus perhaps avoiding the bcm5700 dma-min bug). 4870 */ 4871 for (last = pkt; last->m_next != NULL; last = last->m_next) { 4872 continue; /* do nothing */ 4873 } 4874 4875 /* `last' now points to last in chain. */ 4876 if (M_TRAILINGSPACE(last) < padlen) { 4877 /* Allocate new empty mbuf, pad it. Compact later. */ 4878 struct mbuf *n; 4879 MGET(n, M_DONTWAIT, MT_DATA); 4880 if (n == NULL) 4881 return ENOBUFS; 4882 n->m_len = 0; 4883 last->m_next = n; 4884 last = n; 4885 } 4886 } 4887 4888 KDASSERT(!M_READONLY(last)); 4889 KDASSERT(M_TRAILINGSPACE(last) >= padlen); 4890 4891 /* Now zero the pad area, to avoid the bge cksum-assist bug */ 4892 memset(mtod(last, char *) + last->m_len, 0, padlen); 4893 last->m_len += padlen; 4894 pkt->m_pkthdr.len += padlen; 4895 return 0; 4896 } 4897 4898 /* 4899 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes. 4900 */ 4901 static inline int 4902 bge_compact_dma_runt(struct mbuf *pkt) 4903 { 4904 struct mbuf *m, *prev; 4905 int totlen; 4906 4907 prev = NULL; 4908 totlen = 0; 4909 4910 for (m = pkt; m != NULL; prev = m, m = m->m_next) { 4911 int mlen = m->m_len; 4912 int shortfall = 8 - mlen ; 4913 4914 totlen += mlen; 4915 if (mlen == 0) 4916 continue; 4917 if (mlen >= 8) 4918 continue; 4919 4920 /* If we get here, mbuf data is too small for DMA engine. 4921 * Try to fix by shuffling data to prev or next in chain. 4922 * If that fails, do a compacting deep-copy of the whole chain. 4923 */ 4924 4925 /* Internal frag. If fits in prev, copy it there. */ 4926 if (prev && M_TRAILINGSPACE(prev) >= m->m_len) { 4927 memcpy(prev->m_data + prev->m_len, m->m_data, mlen); 4928 prev->m_len += mlen; 4929 m->m_len = 0; 4930 /* XXX stitch chain */ 4931 prev->m_next = m_free(m); 4932 m = prev; 4933 continue; 4934 } else if (m->m_next != NULL && 4935 M_TRAILINGSPACE(m) >= shortfall && 4936 m->m_next->m_len >= (8 + shortfall)) { 4937 /* m is writable and have enough data in next, pull up. */ 4938 4939 memcpy(m->m_data + m->m_len, m->m_next->m_data, 4940 shortfall); 4941 m->m_len += shortfall; 4942 m->m_next->m_len -= shortfall; 4943 m->m_next->m_data += shortfall; 4944 } else if (m->m_next == NULL || 1) { 4945 /* Got a runt at the very end of the packet. 4946 * borrow data from the tail of the preceding mbuf and 4947 * update its length in-place. (The original data is 4948 * still valid, so we can do this even if prev is not 4949 * writable.) 4950 */ 4951 4952 /* 4953 * If we'd make prev a runt, just move all of its data. 4954 */ 4955 KASSERT(prev != NULL /*, ("runt but null PREV")*/); 4956 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/); 4957 4958 if ((prev->m_len - shortfall) < 8) 4959 shortfall = prev->m_len; 4960 4961 #ifdef notyet /* just do the safe slow thing for now */ 4962 if (!M_READONLY(m)) { 4963 if (M_LEADINGSPACE(m) < shorfall) { 4964 void *m_dat; 4965 m_dat = (m->m_flags & M_PKTHDR) ? 4966 m->m_pktdat : m->dat; 4967 memmove(m_dat, mtod(m, void*), 4968 m->m_len); 4969 m->m_data = m_dat; 4970 } 4971 } else 4972 #endif /* just do the safe slow thing */ 4973 { 4974 struct mbuf * n = NULL; 4975 int newprevlen = prev->m_len - shortfall; 4976 4977 MGET(n, M_NOWAIT, MT_DATA); 4978 if (n == NULL) 4979 return ENOBUFS; 4980 KASSERT(m->m_len + shortfall < MLEN 4981 /*, 4982 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/); 4983 4984 /* first copy the data we're stealing from prev */ 4985 memcpy(n->m_data, prev->m_data + newprevlen, 4986 shortfall); 4987 4988 /* update prev->m_len accordingly */ 4989 prev->m_len -= shortfall; 4990 4991 /* copy data from runt m */ 4992 memcpy(n->m_data + shortfall, m->m_data, 4993 m->m_len); 4994 4995 /* n holds what we stole from prev, plus m */ 4996 n->m_len = shortfall + m->m_len; 4997 4998 /* stitch n into chain and free m */ 4999 n->m_next = m->m_next; 5000 prev->m_next = n; 5001 /* KASSERT(m->m_next == NULL); */ 5002 m->m_next = NULL; 5003 m_free(m); 5004 m = n; /* for continuing loop */ 5005 } 5006 } 5007 } 5008 return 0; 5009 } 5010 5011 /* 5012 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 5013 * pointers to descriptors. 5014 */ 5015 static int 5016 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx) 5017 { 5018 struct ifnet *ifp = &sc->ethercom.ec_if; 5019 struct bge_tx_bd *f, *prev_f; 5020 uint32_t frag, cur; 5021 uint16_t csum_flags = 0; 5022 uint16_t txbd_tso_flags = 0; 5023 struct txdmamap_pool_entry *dma; 5024 bus_dmamap_t dmamap; 5025 bus_dma_tag_t dmatag; 5026 int i = 0; 5027 int use_tso, maxsegsize, error; 5028 bool have_vtag; 5029 uint16_t vtag; 5030 bool remap; 5031 5032 if (m_head->m_pkthdr.csum_flags) { 5033 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4) 5034 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 5035 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4 |M_CSUM_UDPv4)) 5036 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 5037 } 5038 5039 /* 5040 * If we were asked to do an outboard checksum, and the NIC 5041 * has the bug where it sometimes adds in the Ethernet padding, 5042 * explicitly pad with zeros so the cksum will be correct either way. 5043 * (For now, do this for all chip versions, until newer 5044 * are confirmed to not require the workaround.) 5045 */ 5046 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 || 5047 #ifdef notyet 5048 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 || 5049 #endif 5050 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD) 5051 goto check_dma_bug; 5052 5053 if (bge_cksum_pad(m_head) != 0) 5054 return ENOBUFS; 5055 5056 check_dma_bug: 5057 if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)) 5058 goto doit; 5059 5060 /* 5061 * bcm5700 Revision B silicon cannot handle DMA descriptors with 5062 * less than eight bytes. If we encounter a teeny mbuf 5063 * at the end of a chain, we can pad. Otherwise, copy. 5064 */ 5065 if (bge_compact_dma_runt(m_head) != 0) 5066 return ENOBUFS; 5067 5068 doit: 5069 dma = SLIST_FIRST(&sc->txdma_list); 5070 if (dma == NULL) { 5071 ifp->if_flags |= IFF_OACTIVE; 5072 return ENOBUFS; 5073 } 5074 dmamap = dma->dmamap; 5075 dmatag = sc->bge_dmatag; 5076 dma->is_dma32 = false; 5077 5078 /* 5079 * Set up any necessary TSO state before we start packing... 5080 */ 5081 use_tso = (m_head->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 5082 if (!use_tso) { 5083 maxsegsize = 0; 5084 } else { /* TSO setup */ 5085 unsigned mss; 5086 struct ether_header *eh; 5087 unsigned ip_tcp_hlen, iptcp_opt_words, tcp_seg_flags, offset; 5088 unsigned bge_hlen; 5089 struct mbuf * m0 = m_head; 5090 struct ip *ip; 5091 struct tcphdr *th; 5092 int iphl, hlen; 5093 5094 /* 5095 * XXX It would be nice if the mbuf pkthdr had offset 5096 * fields for the protocol headers. 5097 */ 5098 5099 eh = mtod(m0, struct ether_header *); 5100 switch (htons(eh->ether_type)) { 5101 case ETHERTYPE_IP: 5102 offset = ETHER_HDR_LEN; 5103 break; 5104 5105 case ETHERTYPE_VLAN: 5106 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 5107 break; 5108 5109 default: 5110 /* 5111 * Don't support this protocol or encapsulation. 5112 */ 5113 return ENOBUFS; 5114 } 5115 5116 /* 5117 * TCP/IP headers are in the first mbuf; we can do 5118 * this the easy way. 5119 */ 5120 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 5121 hlen = iphl + offset; 5122 if (__predict_false(m0->m_len < 5123 (hlen + sizeof(struct tcphdr)))) { 5124 5125 aprint_error_dev(sc->bge_dev, 5126 "TSO: hard case m0->m_len == %d < ip/tcp hlen %zd," 5127 "not handled yet\n", 5128 m0->m_len, hlen+ sizeof(struct tcphdr)); 5129 #ifdef NOTYET 5130 /* 5131 * XXX jonathan@NetBSD.org: untested. 5132 * how to force this branch to be taken? 5133 */ 5134 BGE_EVCNT_INCR(sc->bge_ev_txtsopain); 5135 5136 m_copydata(m0, offset, sizeof(ip), &ip); 5137 m_copydata(m0, hlen, sizeof(th), &th); 5138 5139 ip.ip_len = 0; 5140 5141 m_copyback(m0, hlen + offsetof(struct ip, ip_len), 5142 sizeof(ip.ip_len), &ip.ip_len); 5143 5144 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 5145 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 5146 5147 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 5148 sizeof(th.th_sum), &th.th_sum); 5149 5150 hlen += th.th_off << 2; 5151 iptcp_opt_words = hlen; 5152 #else 5153 /* 5154 * if_wm "hard" case not yet supported, can we not 5155 * mandate it out of existence? 5156 */ 5157 (void) ip; (void)th; (void) ip_tcp_hlen; 5158 5159 return ENOBUFS; 5160 #endif 5161 } else { 5162 ip = (struct ip *) (mtod(m0, char *) + offset); 5163 th = (struct tcphdr *) (mtod(m0, char *) + hlen); 5164 ip_tcp_hlen = iphl + (th->th_off << 2); 5165 5166 /* Total IP/TCP options, in 32-bit words */ 5167 iptcp_opt_words = (ip_tcp_hlen 5168 - sizeof(struct tcphdr) 5169 - sizeof(struct ip)) >> 2; 5170 } 5171 if (BGE_IS_575X_PLUS(sc)) { 5172 th->th_sum = 0; 5173 csum_flags = 0; 5174 } else { 5175 /* 5176 * XXX jonathan@NetBSD.org: 5705 untested. 5177 * Requires TSO firmware patch for 5701/5703/5704. 5178 */ 5179 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 5180 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 5181 } 5182 5183 mss = m_head->m_pkthdr.segsz; 5184 txbd_tso_flags |= 5185 BGE_TXBDFLAG_CPU_PRE_DMA | 5186 BGE_TXBDFLAG_CPU_POST_DMA; 5187 5188 /* 5189 * Our NIC TSO-assist assumes TSO has standard, optionless 5190 * IPv4 and TCP headers, which total 40 bytes. By default, 5191 * the NIC copies 40 bytes of IP/TCP header from the 5192 * supplied header into the IP/TCP header portion of 5193 * each post-TSO-segment. If the supplied packet has IP or 5194 * TCP options, we need to tell the NIC to copy those extra 5195 * bytes into each post-TSO header, in addition to the normal 5196 * 40-byte IP/TCP header (and to leave space accordingly). 5197 * Unfortunately, the driver encoding of option length 5198 * varies across different ASIC families. 5199 */ 5200 tcp_seg_flags = 0; 5201 bge_hlen = ip_tcp_hlen >> 2; 5202 if (BGE_IS_5717_PLUS(sc)) { 5203 tcp_seg_flags = (bge_hlen & 0x3) << 14; 5204 txbd_tso_flags |= 5205 ((bge_hlen & 0xF8) << 7) | ((bge_hlen & 0x4) << 2); 5206 } else if (BGE_IS_5705_PLUS(sc)) { 5207 tcp_seg_flags = bge_hlen << 11; 5208 } else { 5209 /* XXX iptcp_opt_words or bge_hlen ? */ 5210 txbd_tso_flags |= iptcp_opt_words << 12; 5211 } 5212 maxsegsize = mss | tcp_seg_flags; 5213 ip->ip_len = htons(mss + ip_tcp_hlen); 5214 ip->ip_sum = 0; 5215 5216 } /* TSO setup */ 5217 5218 have_vtag = vlan_has_tag(m_head); 5219 if (have_vtag) 5220 vtag = vlan_get_tag(m_head); 5221 5222 /* 5223 * Start packing the mbufs in this chain into 5224 * the fragment pointers. Stop when we run out 5225 * of fragments or hit the end of the mbuf chain. 5226 */ 5227 remap = true; 5228 load_again: 5229 error = bus_dmamap_load_mbuf(dmatag, dmamap, m_head, BUS_DMA_NOWAIT); 5230 if (__predict_false(error)) { 5231 if (error == EFBIG && remap) { 5232 struct mbuf *m; 5233 remap = false; 5234 m = m_defrag(m_head, M_NOWAIT); 5235 if (m != NULL) { 5236 KASSERT(m == m_head); 5237 goto load_again; 5238 } 5239 } 5240 return error; 5241 } 5242 /* 5243 * Sanity check: avoid coming within 16 descriptors 5244 * of the end of the ring. 5245 */ 5246 if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) { 5247 BGE_TSO_PRINTF(("%s: " 5248 " dmamap_load_mbuf too close to ring wrap\n", 5249 device_xname(sc->bge_dev))); 5250 goto fail_unload; 5251 } 5252 5253 /* Iterate over dmap-map fragments. */ 5254 f = prev_f = NULL; 5255 cur = frag = *txidx; 5256 5257 for (i = 0; i < dmamap->dm_nsegs; i++) { 5258 f = &sc->bge_rdata->bge_tx_ring[frag]; 5259 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 5260 break; 5261 5262 BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr); 5263 f->bge_len = dmamap->dm_segs[i].ds_len; 5264 if (sizeof(bus_addr_t) > 4 && dma->is_dma32 == false && use_tso && ( 5265 (dmamap->dm_segs[i].ds_addr & 0xffffffff00000000) != 5266 ((dmamap->dm_segs[i].ds_addr + f->bge_len) & 0xffffffff00000000) || 5267 (prev_f != NULL && 5268 prev_f->bge_addr.bge_addr_hi != f->bge_addr.bge_addr_hi)) 5269 ) { 5270 /* 5271 * watchdog timeout issue was observed with TSO, 5272 * limiting DMA address space to 32bits seems to 5273 * address the issue. 5274 */ 5275 bus_dmamap_unload(dmatag, dmamap); 5276 dmatag = sc->bge_dmatag32; 5277 dmamap = dma->dmamap32; 5278 dma->is_dma32 = true; 5279 remap = true; 5280 goto load_again; 5281 } 5282 5283 /* 5284 * For 5751 and follow-ons, for TSO we must turn 5285 * off checksum-assist flag in the tx-descr, and 5286 * supply the ASIC-revision-specific encoding 5287 * of TSO flags and segsize. 5288 */ 5289 if (use_tso) { 5290 if (BGE_IS_575X_PLUS(sc) || i == 0) { 5291 f->bge_rsvd = maxsegsize; 5292 f->bge_flags = csum_flags | txbd_tso_flags; 5293 } else { 5294 f->bge_rsvd = 0; 5295 f->bge_flags = 5296 (csum_flags | txbd_tso_flags) & 0x0fff; 5297 } 5298 } else { 5299 f->bge_rsvd = 0; 5300 f->bge_flags = csum_flags; 5301 } 5302 5303 if (have_vtag) { 5304 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 5305 f->bge_vlan_tag = vtag; 5306 } else { 5307 f->bge_vlan_tag = 0; 5308 } 5309 prev_f = f; 5310 cur = frag; 5311 BGE_INC(frag, BGE_TX_RING_CNT); 5312 } 5313 5314 if (i < dmamap->dm_nsegs) { 5315 BGE_TSO_PRINTF(("%s: reached %d < dm_nsegs %d\n", 5316 device_xname(sc->bge_dev), i, dmamap->dm_nsegs)); 5317 goto fail_unload; 5318 } 5319 5320 bus_dmamap_sync(dmatag, dmamap, 0, dmamap->dm_mapsize, 5321 BUS_DMASYNC_PREWRITE); 5322 5323 if (frag == sc->bge_tx_saved_considx) { 5324 BGE_TSO_PRINTF(("%s: frag %d = wrapped id %d?\n", 5325 device_xname(sc->bge_dev), frag, sc->bge_tx_saved_considx)); 5326 5327 goto fail_unload; 5328 } 5329 5330 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 5331 sc->bge_cdata.bge_tx_chain[cur] = m_head; 5332 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 5333 sc->txdma[cur] = dma; 5334 sc->bge_txcnt += dmamap->dm_nsegs; 5335 5336 *txidx = frag; 5337 5338 return 0; 5339 5340 fail_unload: 5341 bus_dmamap_unload(dmatag, dmamap); 5342 ifp->if_flags |= IFF_OACTIVE; 5343 5344 return ENOBUFS; 5345 } 5346 5347 /* 5348 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 5349 * to the mbuf data regions directly in the transmit descriptors. 5350 */ 5351 static void 5352 bge_start(struct ifnet *ifp) 5353 { 5354 struct bge_softc *sc; 5355 struct mbuf *m_head = NULL; 5356 struct mbuf *m; 5357 uint32_t prodidx; 5358 int pkts = 0; 5359 int error; 5360 5361 sc = ifp->if_softc; 5362 5363 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 5364 return; 5365 5366 prodidx = sc->bge_tx_prodidx; 5367 5368 while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 5369 IFQ_POLL(&ifp->if_snd, m_head); 5370 if (m_head == NULL) 5371 break; 5372 5373 #if 0 5374 /* 5375 * XXX 5376 * safety overkill. If this is a fragmented packet chain 5377 * with delayed TCP/UDP checksums, then only encapsulate 5378 * it if we have enough descriptors to handle the entire 5379 * chain at once. 5380 * (paranoia -- may not actually be needed) 5381 */ 5382 if (m_head->m_flags & M_FIRSTFRAG && 5383 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 5384 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 5385 M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) { 5386 ifp->if_flags |= IFF_OACTIVE; 5387 break; 5388 } 5389 } 5390 #endif 5391 5392 /* 5393 * Pack the data into the transmit ring. If we 5394 * don't have room, set the OACTIVE flag and wait 5395 * for the NIC to drain the ring. 5396 */ 5397 error = bge_encap(sc, m_head, &prodidx); 5398 if (__predict_false(error)) { 5399 if (ifp->if_flags & IFF_OACTIVE) { 5400 /* just wait for the transmit ring to drain */ 5401 break; 5402 } 5403 IFQ_DEQUEUE(&ifp->if_snd, m); 5404 KASSERT(m == m_head); 5405 m_freem(m_head); 5406 continue; 5407 } 5408 5409 /* now we are committed to transmit the packet */ 5410 IFQ_DEQUEUE(&ifp->if_snd, m); 5411 KASSERT(m == m_head); 5412 pkts++; 5413 5414 /* 5415 * If there's a BPF listener, bounce a copy of this frame 5416 * to him. 5417 */ 5418 bpf_mtap(ifp, m_head, BPF_D_OUT); 5419 } 5420 if (pkts == 0) 5421 return; 5422 5423 /* Transmit */ 5424 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 5425 /* 5700 b2 errata */ 5426 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 5427 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 5428 5429 sc->bge_tx_prodidx = prodidx; 5430 5431 /* 5432 * Set a timeout in case the chip goes out to lunch. 5433 */ 5434 ifp->if_timer = 5; 5435 } 5436 5437 static int 5438 bge_init(struct ifnet *ifp) 5439 { 5440 struct bge_softc *sc = ifp->if_softc; 5441 const uint16_t *m; 5442 uint32_t mode, reg; 5443 int s, error = 0; 5444 5445 s = splnet(); 5446 5447 ifp = &sc->ethercom.ec_if; 5448 5449 /* Cancel pending I/O and flush buffers. */ 5450 bge_stop(ifp, 0); 5451 5452 bge_stop_fw(sc); 5453 bge_sig_pre_reset(sc, BGE_RESET_START); 5454 bge_reset(sc); 5455 bge_sig_legacy(sc, BGE_RESET_START); 5456 5457 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) { 5458 reg = CSR_READ_4(sc, BGE_CPMU_CTRL); 5459 reg &= ~(BGE_CPMU_CTRL_LINK_AWARE_MODE | 5460 BGE_CPMU_CTRL_LINK_IDLE_MODE); 5461 CSR_WRITE_4(sc, BGE_CPMU_CTRL, reg); 5462 5463 reg = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK); 5464 reg &= ~BGE_CPMU_LSPD_10MB_CLK; 5465 reg |= BGE_CPMU_LSPD_10MB_MACCLK_6_25; 5466 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, reg); 5467 5468 reg = CSR_READ_4(sc, BGE_CPMU_LNK_AWARE_PWRMD); 5469 reg &= ~BGE_CPMU_LNK_AWARE_MACCLK_MASK; 5470 reg |= BGE_CPMU_LNK_AWARE_MACCLK_6_25; 5471 CSR_WRITE_4(sc, BGE_CPMU_LNK_AWARE_PWRMD, reg); 5472 5473 reg = CSR_READ_4(sc, BGE_CPMU_HST_ACC); 5474 reg &= ~BGE_CPMU_HST_ACC_MACCLK_MASK; 5475 reg |= BGE_CPMU_HST_ACC_MACCLK_6_25; 5476 CSR_WRITE_4(sc, BGE_CPMU_HST_ACC, reg); 5477 } 5478 5479 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) { 5480 pcireg_t aercap; 5481 5482 reg = CSR_READ_4(sc, BGE_PCIE_PWRMNG_THRESH); 5483 reg = (reg & ~BGE_PCIE_PWRMNG_L1THRESH_MASK) 5484 | BGE_PCIE_PWRMNG_L1THRESH_4MS 5485 | BGE_PCIE_PWRMNG_EXTASPMTMR_EN; 5486 CSR_WRITE_4(sc, BGE_PCIE_PWRMNG_THRESH, reg); 5487 5488 reg = CSR_READ_4(sc, BGE_PCIE_EIDLE_DELAY); 5489 reg = (reg & ~BGE_PCIE_EIDLE_DELAY_MASK) 5490 | BGE_PCIE_EIDLE_DELAY_13CLK; 5491 CSR_WRITE_4(sc, BGE_PCIE_EIDLE_DELAY, reg); 5492 5493 /* Clear correctable error */ 5494 if (pci_get_ext_capability(sc->sc_pc, sc->sc_pcitag, 5495 PCI_EXTCAP_AER, &aercap, NULL) != 0) 5496 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 5497 aercap + PCI_AER_COR_STATUS, 0xffffffff); 5498 5499 reg = CSR_READ_4(sc, BGE_PCIE_LINKCTL); 5500 reg = (reg & ~BGE_PCIE_LINKCTL_L1_PLL_PDEN) 5501 | BGE_PCIE_LINKCTL_L1_PLL_PDDIS; 5502 CSR_WRITE_4(sc, BGE_PCIE_LINKCTL, reg); 5503 } 5504 5505 bge_sig_post_reset(sc, BGE_RESET_START); 5506 5507 bge_chipinit(sc); 5508 5509 /* 5510 * Init the various state machines, ring 5511 * control blocks and firmware. 5512 */ 5513 error = bge_blockinit(sc); 5514 if (error != 0) { 5515 aprint_error_dev(sc->bge_dev, "initialization error %d\n", 5516 error); 5517 splx(s); 5518 return error; 5519 } 5520 5521 ifp = &sc->ethercom.ec_if; 5522 5523 /* 5718 step 25, 57XX step 54 */ 5524 /* Specify MTU. */ 5525 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 5526 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 5527 5528 /* 5718 step 23 */ 5529 /* Load our MAC address. */ 5530 m = (const uint16_t *)&(CLLADDR(ifp->if_sadl)[0]); 5531 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 5532 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 5533 5534 /* Enable or disable promiscuous mode as needed. */ 5535 if (ifp->if_flags & IFF_PROMISC) 5536 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 5537 else 5538 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 5539 5540 /* Program multicast filter. */ 5541 bge_setmulti(sc); 5542 5543 /* Init RX ring. */ 5544 bge_init_rx_ring_std(sc); 5545 5546 /* 5547 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 5548 * memory to insure that the chip has in fact read the first 5549 * entry of the ring. 5550 */ 5551 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 5552 uint32_t v, i; 5553 for (i = 0; i < 10; i++) { 5554 DELAY(20); 5555 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 5556 if (v == (MCLBYTES - ETHER_ALIGN)) 5557 break; 5558 } 5559 if (i == 10) 5560 aprint_error_dev(sc->bge_dev, 5561 "5705 A0 chip failed to load RX ring\n"); 5562 } 5563 5564 /* Init jumbo RX ring. */ 5565 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 5566 bge_init_rx_ring_jumbo(sc); 5567 5568 /* Init our RX return ring index */ 5569 sc->bge_rx_saved_considx = 0; 5570 5571 /* Init TX ring. */ 5572 bge_init_tx_ring(sc); 5573 5574 /* 5718 step 63, 57XX step 94 */ 5575 /* Enable TX MAC state machine lockup fix. */ 5576 mode = CSR_READ_4(sc, BGE_TX_MODE); 5577 if (BGE_IS_5755_PLUS(sc) || 5578 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 5579 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX; 5580 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 || 5581 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 5582 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 5583 mode |= CSR_READ_4(sc, BGE_TX_MODE) & 5584 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 5585 } 5586 5587 /* Turn on transmitter */ 5588 CSR_WRITE_4_FLUSH(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE); 5589 /* 5718 step 64 */ 5590 DELAY(100); 5591 5592 /* 5718 step 65, 57XX step 95 */ 5593 /* Turn on receiver */ 5594 mode = CSR_READ_4(sc, BGE_RX_MODE); 5595 if (BGE_IS_5755_PLUS(sc)) 5596 mode |= BGE_RXMODE_IPV6_ENABLE; 5597 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) 5598 mode |= BGE_RXMODE_IPV4_FRAG_FIX; 5599 CSR_WRITE_4_FLUSH(sc, BGE_RX_MODE, mode | BGE_RXMODE_ENABLE); 5600 /* 5718 step 66 */ 5601 DELAY(10); 5602 5603 /* 5718 step 12, 57XX step 37 */ 5604 /* 5605 * XXX Doucments of 5718 series and 577xx say the recommended value 5606 * is 1, but tg3 set 1 only on 57765 series. 5607 */ 5608 if (BGE_IS_57765_PLUS(sc)) 5609 reg = 1; 5610 else 5611 reg = 2; 5612 CSR_WRITE_4_FLUSH(sc, BGE_MAX_RX_FRAME_LOWAT, reg); 5613 5614 /* Tell firmware we're alive. */ 5615 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 5616 5617 /* Enable host interrupts. */ 5618 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 5619 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 5620 bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 0); 5621 5622 if ((error = bge_ifmedia_upd(ifp)) != 0) 5623 goto out; 5624 5625 ifp->if_flags |= IFF_RUNNING; 5626 ifp->if_flags &= ~IFF_OACTIVE; 5627 5628 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 5629 5630 out: 5631 sc->bge_if_flags = ifp->if_flags; 5632 splx(s); 5633 5634 return error; 5635 } 5636 5637 /* 5638 * Set media options. 5639 */ 5640 static int 5641 bge_ifmedia_upd(struct ifnet *ifp) 5642 { 5643 struct bge_softc *sc = ifp->if_softc; 5644 struct mii_data *mii = &sc->bge_mii; 5645 struct ifmedia *ifm = &sc->bge_ifmedia; 5646 int rc; 5647 5648 /* If this is a 1000baseX NIC, enable the TBI port. */ 5649 if (sc->bge_flags & BGEF_FIBER_TBI) { 5650 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 5651 return EINVAL; 5652 switch (IFM_SUBTYPE(ifm->ifm_media)) { 5653 case IFM_AUTO: 5654 /* 5655 * The BCM5704 ASIC appears to have a special 5656 * mechanism for programming the autoneg 5657 * advertisement registers in TBI mode. 5658 */ 5659 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 5660 uint32_t sgdig; 5661 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS); 5662 if (sgdig & BGE_SGDIGSTS_DONE) { 5663 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 5664 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 5665 sgdig |= BGE_SGDIGCFG_AUTO | 5666 BGE_SGDIGCFG_PAUSE_CAP | 5667 BGE_SGDIGCFG_ASYM_PAUSE; 5668 CSR_WRITE_4_FLUSH(sc, BGE_SGDIG_CFG, 5669 sgdig | BGE_SGDIGCFG_SEND); 5670 DELAY(5); 5671 CSR_WRITE_4_FLUSH(sc, BGE_SGDIG_CFG, 5672 sgdig); 5673 } 5674 } 5675 break; 5676 case IFM_1000_SX: 5677 if ((ifm->ifm_media & IFM_FDX) != 0) { 5678 BGE_CLRBIT(sc, BGE_MAC_MODE, 5679 BGE_MACMODE_HALF_DUPLEX); 5680 } else { 5681 BGE_SETBIT(sc, BGE_MAC_MODE, 5682 BGE_MACMODE_HALF_DUPLEX); 5683 } 5684 DELAY(40); 5685 break; 5686 default: 5687 return EINVAL; 5688 } 5689 /* XXX 802.3x flow control for 1000BASE-SX */ 5690 return 0; 5691 } 5692 5693 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784) && 5694 (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5784_AX)) { 5695 uint32_t reg; 5696 5697 reg = CSR_READ_4(sc, BGE_CPMU_CTRL); 5698 if ((reg & BGE_CPMU_CTRL_GPHY_10MB_RXONLY) != 0) { 5699 reg &= ~BGE_CPMU_CTRL_GPHY_10MB_RXONLY; 5700 CSR_WRITE_4(sc, BGE_CPMU_CTRL, reg); 5701 } 5702 } 5703 5704 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT); 5705 if ((rc = mii_mediachg(mii)) == ENXIO) 5706 return 0; 5707 5708 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) { 5709 uint32_t reg; 5710 5711 reg = CSR_READ_4(sc, BGE_CPMU_LSPD_1000MB_CLK); 5712 if ((reg & BGE_CPMU_LSPD_1000MB_MACCLK_MASK) 5713 == (BGE_CPMU_LSPD_1000MB_MACCLK_12_5)) { 5714 reg &= ~BGE_CPMU_LSPD_1000MB_MACCLK_MASK; 5715 delay(40); 5716 CSR_WRITE_4(sc, BGE_CPMU_LSPD_1000MB_CLK, reg); 5717 } 5718 } 5719 5720 /* 5721 * Force an interrupt so that we will call bge_link_upd 5722 * if needed and clear any pending link state attention. 5723 * Without this we are not getting any further interrupts 5724 * for link state changes and thus will not UP the link and 5725 * not be able to send in bge_start. The only way to get 5726 * things working was to receive a packet and get a RX intr. 5727 */ 5728 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 5729 sc->bge_flags & BGEF_IS_5788) 5730 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 5731 else 5732 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 5733 5734 return rc; 5735 } 5736 5737 /* 5738 * Report current media status. 5739 */ 5740 static void 5741 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 5742 { 5743 struct bge_softc *sc = ifp->if_softc; 5744 struct mii_data *mii = &sc->bge_mii; 5745 5746 if (sc->bge_flags & BGEF_FIBER_TBI) { 5747 ifmr->ifm_status = IFM_AVALID; 5748 ifmr->ifm_active = IFM_ETHER; 5749 if (CSR_READ_4(sc, BGE_MAC_STS) & 5750 BGE_MACSTAT_TBI_PCS_SYNCHED) 5751 ifmr->ifm_status |= IFM_ACTIVE; 5752 ifmr->ifm_active |= IFM_1000_SX; 5753 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 5754 ifmr->ifm_active |= IFM_HDX; 5755 else 5756 ifmr->ifm_active |= IFM_FDX; 5757 return; 5758 } 5759 5760 mii_pollstat(mii); 5761 ifmr->ifm_status = mii->mii_media_status; 5762 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) | 5763 sc->bge_flowflags; 5764 } 5765 5766 static int 5767 bge_ifflags_cb(struct ethercom *ec) 5768 { 5769 struct ifnet *ifp = &ec->ec_if; 5770 struct bge_softc *sc = ifp->if_softc; 5771 int change = ifp->if_flags ^ sc->bge_if_flags; 5772 5773 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) 5774 return ENETRESET; 5775 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0) 5776 return 0; 5777 5778 if ((ifp->if_flags & IFF_PROMISC) == 0) 5779 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 5780 else 5781 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 5782 5783 bge_setmulti(sc); 5784 5785 sc->bge_if_flags = ifp->if_flags; 5786 return 0; 5787 } 5788 5789 static int 5790 bge_ioctl(struct ifnet *ifp, u_long command, void *data) 5791 { 5792 struct bge_softc *sc = ifp->if_softc; 5793 struct ifreq *ifr = (struct ifreq *) data; 5794 int s, error = 0; 5795 struct mii_data *mii; 5796 5797 s = splnet(); 5798 5799 switch (command) { 5800 case SIOCSIFMEDIA: 5801 /* XXX Flow control is not supported for 1000BASE-SX */ 5802 if (sc->bge_flags & BGEF_FIBER_TBI) { 5803 ifr->ifr_media &= ~IFM_ETH_FMASK; 5804 sc->bge_flowflags = 0; 5805 } 5806 5807 /* Flow control requires full-duplex mode. */ 5808 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 5809 (ifr->ifr_media & IFM_FDX) == 0) { 5810 ifr->ifr_media &= ~IFM_ETH_FMASK; 5811 } 5812 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 5813 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 5814 /* We can do both TXPAUSE and RXPAUSE. */ 5815 ifr->ifr_media |= 5816 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 5817 } 5818 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 5819 } 5820 5821 if (sc->bge_flags & BGEF_FIBER_TBI) { 5822 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 5823 command); 5824 } else { 5825 mii = &sc->bge_mii; 5826 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 5827 command); 5828 } 5829 break; 5830 default: 5831 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) 5832 break; 5833 5834 error = 0; 5835 5836 if (command != SIOCADDMULTI && command != SIOCDELMULTI) 5837 ; 5838 else if (ifp->if_flags & IFF_RUNNING) 5839 bge_setmulti(sc); 5840 break; 5841 } 5842 5843 splx(s); 5844 5845 return error; 5846 } 5847 5848 static void 5849 bge_watchdog(struct ifnet *ifp) 5850 { 5851 struct bge_softc *sc; 5852 uint32_t status; 5853 5854 sc = ifp->if_softc; 5855 5856 /* If pause frames are active then don't reset the hardware. */ 5857 if ((CSR_READ_4(sc, BGE_RX_MODE) & BGE_RXMODE_FLOWCTL_ENABLE) != 0) { 5858 status = CSR_READ_4(sc, BGE_RX_STS); 5859 if ((status & BGE_RXSTAT_REMOTE_XOFFED) != 0) { 5860 /* 5861 * If link partner has us in XOFF state then wait for 5862 * the condition to clear. 5863 */ 5864 CSR_WRITE_4(sc, BGE_RX_STS, status); 5865 ifp->if_timer = 5; 5866 return; 5867 } else if ((status & BGE_RXSTAT_RCVD_XOFF) != 0 && 5868 (status & BGE_RXSTAT_RCVD_XON) != 0) { 5869 /* 5870 * If link partner has us in XOFF state then wait for 5871 * the condition to clear. 5872 */ 5873 CSR_WRITE_4(sc, BGE_RX_STS, status); 5874 ifp->if_timer = 5; 5875 return; 5876 } 5877 /* 5878 * Any other condition is unexpected and the controller 5879 * should be reset. 5880 */ 5881 } 5882 5883 aprint_error_dev(sc->bge_dev, "watchdog timeout -- resetting\n"); 5884 5885 ifp->if_flags &= ~IFF_RUNNING; 5886 bge_init(ifp); 5887 5888 ifp->if_oerrors++; 5889 } 5890 5891 static void 5892 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit) 5893 { 5894 int i; 5895 5896 BGE_CLRBIT_FLUSH(sc, reg, bit); 5897 5898 for (i = 0; i < 1000; i++) { 5899 delay(100); 5900 if ((CSR_READ_4(sc, reg) & bit) == 0) 5901 return; 5902 } 5903 5904 /* 5905 * Doesn't print only when the register is BGE_SRS_MODE. It occurs 5906 * on some environment (and once after boot?) 5907 */ 5908 if (reg != BGE_SRS_MODE) 5909 aprint_error_dev(sc->bge_dev, 5910 "block failed to stop: reg 0x%lx, bit 0x%08x\n", 5911 (u_long)reg, bit); 5912 } 5913 5914 /* 5915 * Stop the adapter and free any mbufs allocated to the 5916 * RX and TX lists. 5917 */ 5918 static void 5919 bge_stop(struct ifnet *ifp, int disable) 5920 { 5921 struct bge_softc *sc = ifp->if_softc; 5922 5923 if (disable) { 5924 sc->bge_detaching = 1; 5925 callout_halt(&sc->bge_timeout, NULL); 5926 } else 5927 callout_stop(&sc->bge_timeout); 5928 5929 /* Disable host interrupts. */ 5930 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 5931 bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 1); 5932 5933 /* 5934 * Tell firmware we're shutting down. 5935 */ 5936 bge_stop_fw(sc); 5937 bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN); 5938 5939 /* 5940 * Disable all of the receiver blocks. 5941 */ 5942 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 5943 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 5944 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 5945 if (BGE_IS_5700_FAMILY(sc)) 5946 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 5947 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 5948 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 5949 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 5950 5951 /* 5952 * Disable all of the transmit blocks. 5953 */ 5954 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 5955 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 5956 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 5957 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 5958 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 5959 if (BGE_IS_5700_FAMILY(sc)) 5960 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 5961 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 5962 5963 BGE_CLRBIT_FLUSH(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB); 5964 delay(40); 5965 5966 bge_stop_block(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 5967 5968 /* 5969 * Shut down all of the memory managers and related 5970 * state machines. 5971 */ 5972 /* 5718 step 5a,5b */ 5973 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 5974 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 5975 if (BGE_IS_5700_FAMILY(sc)) 5976 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 5977 5978 /* 5718 step 5c,5d */ 5979 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 5980 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 5981 5982 if (BGE_IS_5700_FAMILY(sc)) { 5983 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 5984 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 5985 } 5986 5987 bge_reset(sc); 5988 bge_sig_legacy(sc, BGE_RESET_SHUTDOWN); 5989 bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN); 5990 5991 /* 5992 * Keep the ASF firmware running if up. 5993 */ 5994 if (sc->bge_asf_mode & ASF_STACKUP) 5995 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 5996 else 5997 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 5998 5999 /* Free the RX lists. */ 6000 bge_free_rx_ring_std(sc, disable); 6001 6002 /* Free jumbo RX list. */ 6003 if (BGE_IS_JUMBO_CAPABLE(sc)) 6004 bge_free_rx_ring_jumbo(sc); 6005 6006 /* Free TX buffers. */ 6007 bge_free_tx_ring(sc, disable); 6008 6009 /* 6010 * Isolate/power down the PHY. 6011 */ 6012 if (!(sc->bge_flags & BGEF_FIBER_TBI)) 6013 mii_down(&sc->bge_mii); 6014 6015 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 6016 6017 /* Clear MAC's link state (PHY may still have link UP). */ 6018 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 6019 6020 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 6021 } 6022 6023 static void 6024 bge_link_upd(struct bge_softc *sc) 6025 { 6026 struct ifnet *ifp = &sc->ethercom.ec_if; 6027 struct mii_data *mii = &sc->bge_mii; 6028 uint32_t status; 6029 uint16_t phyval; 6030 int link; 6031 6032 /* Clear 'pending link event' flag */ 6033 BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT); 6034 6035 /* 6036 * Process link state changes. 6037 * Grrr. The link status word in the status block does 6038 * not work correctly on the BCM5700 rev AX and BX chips, 6039 * according to all available information. Hence, we have 6040 * to enable MII interrupts in order to properly obtain 6041 * async link changes. Unfortunately, this also means that 6042 * we have to read the MAC status register to detect link 6043 * changes, thereby adding an additional register access to 6044 * the interrupt handler. 6045 */ 6046 6047 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) { 6048 status = CSR_READ_4(sc, BGE_MAC_STS); 6049 if (status & BGE_MACSTAT_MI_INTERRUPT) { 6050 mii_pollstat(mii); 6051 6052 if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 6053 mii->mii_media_status & IFM_ACTIVE && 6054 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 6055 BGE_STS_SETBIT(sc, BGE_STS_LINK); 6056 else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 6057 (!(mii->mii_media_status & IFM_ACTIVE) || 6058 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 6059 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 6060 6061 /* Clear the interrupt */ 6062 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 6063 BGE_EVTENB_MI_INTERRUPT); 6064 bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr, 6065 BRGPHY_MII_ISR, &phyval); 6066 bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr, 6067 BRGPHY_MII_IMR, BRGPHY_INTRS); 6068 } 6069 return; 6070 } 6071 6072 if (sc->bge_flags & BGEF_FIBER_TBI) { 6073 status = CSR_READ_4(sc, BGE_MAC_STS); 6074 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 6075 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) { 6076 BGE_STS_SETBIT(sc, BGE_STS_LINK); 6077 if (BGE_ASICREV(sc->bge_chipid) 6078 == BGE_ASICREV_BCM5704) { 6079 BGE_CLRBIT(sc, BGE_MAC_MODE, 6080 BGE_MACMODE_TBI_SEND_CFGS); 6081 DELAY(40); 6082 } 6083 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 6084 if_link_state_change(ifp, LINK_STATE_UP); 6085 } 6086 } else if (BGE_STS_BIT(sc, BGE_STS_LINK)) { 6087 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 6088 if_link_state_change(ifp, LINK_STATE_DOWN); 6089 } 6090 } else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) { 6091 /* 6092 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED 6093 * bit in status word always set. Workaround this bug by 6094 * reading PHY link status directly. 6095 */ 6096 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)? 6097 BGE_STS_LINK : 0; 6098 6099 if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) { 6100 mii_pollstat(mii); 6101 6102 if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 6103 mii->mii_media_status & IFM_ACTIVE && 6104 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 6105 BGE_STS_SETBIT(sc, BGE_STS_LINK); 6106 else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 6107 (!(mii->mii_media_status & IFM_ACTIVE) || 6108 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 6109 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 6110 } 6111 } else { 6112 /* 6113 * For controllers that call mii_tick, we have to poll 6114 * link status. 6115 */ 6116 mii_pollstat(mii); 6117 } 6118 6119 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5784_AX) { 6120 uint32_t reg, scale; 6121 6122 reg = CSR_READ_4(sc, BGE_CPMU_CLCK_STAT) & 6123 BGE_CPMU_CLCK_STAT_MAC_CLCK_MASK; 6124 if (reg == BGE_CPMU_CLCK_STAT_MAC_CLCK_62_5) 6125 scale = 65; 6126 else if (reg == BGE_CPMU_CLCK_STAT_MAC_CLCK_6_25) 6127 scale = 6; 6128 else 6129 scale = 12; 6130 6131 reg = CSR_READ_4(sc, BGE_MISC_CFG) & 6132 ~BGE_MISCCFG_TIMER_PRESCALER; 6133 reg |= scale << 1; 6134 CSR_WRITE_4(sc, BGE_MISC_CFG, reg); 6135 } 6136 /* Clear the attention */ 6137 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 6138 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 6139 BGE_MACSTAT_LINK_CHANGED); 6140 } 6141 6142 static int 6143 bge_sysctl_verify(SYSCTLFN_ARGS) 6144 { 6145 int error, t; 6146 struct sysctlnode node; 6147 6148 node = *rnode; 6149 t = *(int*)rnode->sysctl_data; 6150 node.sysctl_data = &t; 6151 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 6152 if (error || newp == NULL) 6153 return error; 6154 6155 #if 0 6156 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t, 6157 node.sysctl_num, rnode->sysctl_num)); 6158 #endif 6159 6160 if (node.sysctl_num == bge_rxthresh_nodenum) { 6161 if (t < 0 || t >= NBGE_RX_THRESH) 6162 return EINVAL; 6163 bge_update_all_threshes(t); 6164 } else 6165 return EINVAL; 6166 6167 *(int*)rnode->sysctl_data = t; 6168 6169 return 0; 6170 } 6171 6172 /* 6173 * Set up sysctl(3) MIB, hw.bge.*. 6174 */ 6175 static void 6176 bge_sysctl_init(struct bge_softc *sc) 6177 { 6178 int rc, bge_root_num; 6179 const struct sysctlnode *node; 6180 6181 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node, 6182 0, CTLTYPE_NODE, "bge", 6183 SYSCTL_DESCR("BGE interface controls"), 6184 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 6185 goto out; 6186 } 6187 6188 bge_root_num = node->sysctl_num; 6189 6190 /* BGE Rx interrupt mitigation level */ 6191 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node, 6192 CTLFLAG_READWRITE, 6193 CTLTYPE_INT, "rx_lvl", 6194 SYSCTL_DESCR("BGE receive interrupt mitigation level"), 6195 bge_sysctl_verify, 0, 6196 &bge_rx_thresh_lvl, 6197 0, CTL_HW, bge_root_num, CTL_CREATE, 6198 CTL_EOL)) != 0) { 6199 goto out; 6200 } 6201 6202 bge_rxthresh_nodenum = node->sysctl_num; 6203 6204 return; 6205 6206 out: 6207 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc); 6208 } 6209 6210 #ifdef BGE_DEBUG 6211 void 6212 bge_debug_info(struct bge_softc *sc) 6213 { 6214 6215 printf("Hardware Flags:\n"); 6216 if (BGE_IS_57765_PLUS(sc)) 6217 printf(" - 57765 Plus\n"); 6218 if (BGE_IS_5717_PLUS(sc)) 6219 printf(" - 5717 Plus\n"); 6220 if (BGE_IS_5755_PLUS(sc)) 6221 printf(" - 5755 Plus\n"); 6222 if (BGE_IS_575X_PLUS(sc)) 6223 printf(" - 575X Plus\n"); 6224 if (BGE_IS_5705_PLUS(sc)) 6225 printf(" - 5705 Plus\n"); 6226 if (BGE_IS_5714_FAMILY(sc)) 6227 printf(" - 5714 Family\n"); 6228 if (BGE_IS_5700_FAMILY(sc)) 6229 printf(" - 5700 Family\n"); 6230 if (sc->bge_flags & BGEF_IS_5788) 6231 printf(" - 5788\n"); 6232 if (sc->bge_flags & BGEF_JUMBO_CAPABLE) 6233 printf(" - Supports Jumbo Frames\n"); 6234 if (sc->bge_flags & BGEF_NO_EEPROM) 6235 printf(" - No EEPROM\n"); 6236 if (sc->bge_flags & BGEF_PCIX) 6237 printf(" - PCI-X Bus\n"); 6238 if (sc->bge_flags & BGEF_PCIE) 6239 printf(" - PCI Express Bus\n"); 6240 if (sc->bge_flags & BGEF_RX_ALIGNBUG) 6241 printf(" - RX Alignment Bug\n"); 6242 if (sc->bge_flags & BGEF_APE) 6243 printf(" - APE\n"); 6244 if (sc->bge_flags & BGEF_CPMU_PRESENT) 6245 printf(" - CPMU\n"); 6246 if (sc->bge_flags & BGEF_TSO) 6247 printf(" - TSO\n"); 6248 if (sc->bge_flags & BGEF_TAGGED_STATUS) 6249 printf(" - TAGGED_STATUS\n"); 6250 6251 /* PHY related */ 6252 if (sc->bge_phy_flags & BGEPHYF_NO_3LED) 6253 printf(" - No 3 LEDs\n"); 6254 if (sc->bge_phy_flags & BGEPHYF_CRC_BUG) 6255 printf(" - CRC bug\n"); 6256 if (sc->bge_phy_flags & BGEPHYF_ADC_BUG) 6257 printf(" - ADC bug\n"); 6258 if (sc->bge_phy_flags & BGEPHYF_5704_A0_BUG) 6259 printf(" - 5704 A0 bug\n"); 6260 if (sc->bge_phy_flags & BGEPHYF_JITTER_BUG) 6261 printf(" - jitter bug\n"); 6262 if (sc->bge_phy_flags & BGEPHYF_BER_BUG) 6263 printf(" - BER bug\n"); 6264 if (sc->bge_phy_flags & BGEPHYF_ADJUST_TRIM) 6265 printf(" - adjust trim\n"); 6266 if (sc->bge_phy_flags & BGEPHYF_NO_WIRESPEED) 6267 printf(" - no wirespeed\n"); 6268 6269 /* ASF related */ 6270 if (sc->bge_asf_mode & ASF_ENABLE) 6271 printf(" - ASF enable\n"); 6272 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) 6273 printf(" - ASF new handshake\n"); 6274 if (sc->bge_asf_mode & ASF_STACKUP) 6275 printf(" - ASF stackup\n"); 6276 } 6277 #endif /* BGE_DEBUG */ 6278 6279 static int 6280 bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]) 6281 { 6282 prop_dictionary_t dict; 6283 prop_data_t ea; 6284 6285 if ((sc->bge_flags & BGEF_NO_EEPROM) == 0) 6286 return 1; 6287 6288 dict = device_properties(sc->bge_dev); 6289 ea = prop_dictionary_get(dict, "mac-address"); 6290 if (ea != NULL) { 6291 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 6292 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 6293 memcpy(ether_addr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN); 6294 return 0; 6295 } 6296 6297 return 1; 6298 } 6299 6300 static int 6301 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[]) 6302 { 6303 uint32_t mac_addr; 6304 6305 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB); 6306 if ((mac_addr >> 16) == 0x484b) { 6307 ether_addr[0] = (uint8_t)(mac_addr >> 8); 6308 ether_addr[1] = (uint8_t)mac_addr; 6309 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB); 6310 ether_addr[2] = (uint8_t)(mac_addr >> 24); 6311 ether_addr[3] = (uint8_t)(mac_addr >> 16); 6312 ether_addr[4] = (uint8_t)(mac_addr >> 8); 6313 ether_addr[5] = (uint8_t)mac_addr; 6314 return 0; 6315 } 6316 return 1; 6317 } 6318 6319 static int 6320 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[]) 6321 { 6322 int mac_offset = BGE_EE_MAC_OFFSET; 6323 6324 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 6325 mac_offset = BGE_EE_MAC_OFFSET_5906; 6326 6327 return (bge_read_nvram(sc, ether_addr, mac_offset + 2, 6328 ETHER_ADDR_LEN)); 6329 } 6330 6331 static int 6332 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[]) 6333 { 6334 6335 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 6336 return 1; 6337 6338 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, 6339 ETHER_ADDR_LEN)); 6340 } 6341 6342 static int 6343 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[]) 6344 { 6345 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = { 6346 /* NOTE: Order is critical */ 6347 bge_get_eaddr_fw, 6348 bge_get_eaddr_mem, 6349 bge_get_eaddr_nvram, 6350 bge_get_eaddr_eeprom, 6351 NULL 6352 }; 6353 const bge_eaddr_fcn_t *func; 6354 6355 for (func = bge_eaddr_funcs; *func != NULL; ++func) { 6356 if ((*func)(sc, eaddr) == 0) 6357 break; 6358 } 6359 return (*func == NULL ? ENXIO : 0); 6360 } 6361