1 /* $OpenBSD: if_bge.c,v 1.407 2024/09/04 07:54:52 mglocker Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2001 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: if_bge.c,v 1.25 2002/11/14 23:54:49 sam Exp $ 36 */ 37 38 /* 39 * Broadcom BCM57xx/BCM590x family ethernet driver for OpenBSD. 40 * 41 * Written by Bill Paul <wpaul@windriver.com> 42 * Senior Engineer, Wind River Systems 43 */ 44 45 /* 46 * The Broadcom BCM5700 is based on technology originally developed by 47 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 48 * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has 49 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 50 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 51 * frames, highly configurable RX filtering, and 16 RX and TX queues 52 * (which, along with RX filter rules, can be used for QOS applications). 53 * Other features, such as TCP segmentation, may be available as part 54 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 55 * firmware images can be stored in hardware and need not be compiled 56 * into the driver. 57 * 58 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 59 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. 60 * 61 * The BCM5701 is a single-chip solution incorporating both the BCM5700 62 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 63 * does not support external SSRAM. 64 * 65 * Broadcom also produces a variation of the BCM5700 under the "Altima" 66 * brand name, which is functionally similar but lacks PCI-X support. 67 * 68 * Without external SSRAM, you can only have at most 4 TX rings, 69 * and the use of the mini RX ring is disabled. This seems to imply 70 * that these features are simply not available on the BCM5701. As a 71 * result, this driver does not implement any support for the mini RX 72 * ring. 73 */ 74 75 #include "bpfilter.h" 76 #include "vlan.h" 77 #include "kstat.h" 78 79 #include <sys/param.h> 80 #include <sys/systm.h> 81 #include <sys/sockio.h> 82 #include <sys/mbuf.h> 83 #include <sys/malloc.h> 84 #include <sys/device.h> 85 #include <sys/timeout.h> 86 #include <sys/atomic.h> 87 #include <sys/kstat.h> 88 89 #include <net/if.h> 90 #include <net/if_media.h> 91 92 #include <netinet/in.h> 93 #include <netinet/if_ether.h> 94 95 #if NBPFILTER > 0 96 #include <net/bpf.h> 97 #endif 98 99 #if defined(__sparc64__) || defined(__HAVE_FDT) 100 #include <dev/ofw/openfirm.h> 101 #endif 102 103 #include <dev/pci/pcireg.h> 104 #include <dev/pci/pcivar.h> 105 #include <dev/pci/pcidevs.h> 106 107 #include <dev/mii/mii.h> 108 #include <dev/mii/miivar.h> 109 #include <dev/mii/brgphyreg.h> 110 111 #include <dev/pci/if_bgereg.h> 112 113 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 114 115 const struct bge_revision * bge_lookup_rev(u_int32_t); 116 int bge_can_use_msi(struct bge_softc *); 117 int bge_probe(struct device *, void *, void *); 118 void bge_attach(struct device *, struct device *, void *); 119 int bge_detach(struct device *, int); 120 int bge_activate(struct device *, int); 121 122 const struct cfattach bge_ca = { 123 sizeof(struct bge_softc), bge_probe, bge_attach, bge_detach, 124 bge_activate 125 }; 126 127 struct cfdriver bge_cd = { 128 NULL, "bge", DV_IFNET 129 }; 130 131 void bge_txeof(struct bge_softc *); 132 void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *); 133 void bge_rxeof(struct bge_softc *); 134 135 void bge_tick(void *); 136 void bge_stats_update(struct bge_softc *); 137 void bge_stats_update_regs(struct bge_softc *); 138 int bge_cksum_pad(struct mbuf *); 139 int bge_encap(struct bge_softc *, struct mbuf *, int *); 140 int bge_compact_dma_runt(struct mbuf *); 141 142 int bge_intr(void *); 143 void bge_start(struct ifqueue *); 144 int bge_ioctl(struct ifnet *, u_long, caddr_t); 145 int bge_rxrinfo(struct bge_softc *, struct if_rxrinfo *); 146 void bge_init(void *); 147 void bge_stop_block(struct bge_softc *, bus_size_t, u_int32_t); 148 void bge_stop(struct bge_softc *, int); 149 void bge_watchdog(struct ifnet *); 150 int bge_ifmedia_upd(struct ifnet *); 151 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 152 153 u_int8_t bge_nvram_getbyte(struct bge_softc *, int, u_int8_t *); 154 int bge_read_nvram(struct bge_softc *, caddr_t, int, int); 155 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *); 156 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); 157 158 void bge_iff(struct bge_softc *); 159 160 int bge_newbuf_jumbo(struct bge_softc *, int); 161 int bge_init_rx_ring_jumbo(struct bge_softc *); 162 void bge_fill_rx_ring_jumbo(struct bge_softc *); 163 void bge_free_rx_ring_jumbo(struct bge_softc *); 164 165 int bge_newbuf(struct bge_softc *, int); 166 int bge_init_rx_ring_std(struct bge_softc *); 167 void bge_rxtick(void *); 168 void bge_fill_rx_ring_std(struct bge_softc *); 169 void bge_free_rx_ring_std(struct bge_softc *); 170 171 void bge_free_tx_ring(struct bge_softc *); 172 int bge_init_tx_ring(struct bge_softc *); 173 174 void bge_chipinit(struct bge_softc *); 175 int bge_blockinit(struct bge_softc *); 176 u_int32_t bge_dma_swap_options(struct bge_softc *); 177 int bge_phy_addr(struct bge_softc *); 178 179 u_int32_t bge_readmem_ind(struct bge_softc *, int); 180 void bge_writemem_ind(struct bge_softc *, int, int); 181 void bge_writereg_ind(struct bge_softc *, int, int); 182 void bge_writembx(struct bge_softc *, int, int); 183 184 int bge_miibus_readreg(struct device *, int, int); 185 void bge_miibus_writereg(struct device *, int, int, int); 186 void bge_miibus_statchg(struct device *); 187 188 #define BGE_RESET_SHUTDOWN 0 189 #define BGE_RESET_START 1 190 #define BGE_RESET_SUSPEND 2 191 void bge_sig_post_reset(struct bge_softc *, int); 192 void bge_sig_legacy(struct bge_softc *, int); 193 void bge_sig_pre_reset(struct bge_softc *, int); 194 void bge_stop_fw(struct bge_softc *, int); 195 void bge_reset(struct bge_softc *); 196 void bge_link_upd(struct bge_softc *); 197 198 void bge_ape_lock_init(struct bge_softc *); 199 void bge_ape_read_fw_ver(struct bge_softc *); 200 int bge_ape_lock(struct bge_softc *, int); 201 void bge_ape_unlock(struct bge_softc *, int); 202 void bge_ape_send_event(struct bge_softc *, uint32_t); 203 void bge_ape_driver_state_change(struct bge_softc *, int); 204 205 #if NKSTAT > 0 206 void bge_kstat_attach(struct bge_softc *); 207 208 enum { 209 bge_stat_out_octets = 0, 210 bge_stat_collisions, 211 bge_stat_xon_sent, 212 bge_stat_xoff_sent, 213 bge_stat_xmit_errors, 214 bge_stat_coll_frames, 215 bge_stat_multicoll_frames, 216 bge_stat_deferred_xmit, 217 bge_stat_excess_coll, 218 bge_stat_late_coll, 219 bge_stat_out_ucast_pkt, 220 bge_stat_out_mcast_pkt, 221 bge_stat_out_bcast_pkt, 222 bge_stat_in_octets, 223 bge_stat_fragments, 224 bge_stat_in_ucast_pkt, 225 bge_stat_in_mcast_pkt, 226 bge_stat_in_bcast_pkt, 227 bge_stat_fcs_errors, 228 bge_stat_align_errors, 229 bge_stat_xon_rcvd, 230 bge_stat_xoff_rcvd, 231 bge_stat_ctrl_frame_rcvd, 232 bge_stat_xoff_entered, 233 bge_stat_too_long_frames, 234 bge_stat_jabbers, 235 bge_stat_too_short_pkts, 236 237 bge_stat_dma_rq_full, 238 bge_stat_dma_hprq_full, 239 bge_stat_sdc_queue_full, 240 bge_stat_nic_sendprod_set, 241 bge_stat_status_updated, 242 bge_stat_irqs, 243 bge_stat_avoided_irqs, 244 bge_stat_tx_thresh_hit, 245 246 bge_stat_filtdrop, 247 bge_stat_dma_wrq_full, 248 bge_stat_dma_hpwrq_full, 249 bge_stat_out_of_bds, 250 bge_stat_if_in_drops, 251 bge_stat_if_in_errors, 252 bge_stat_rx_thresh_hit, 253 }; 254 255 #endif 256 257 #ifdef BGE_DEBUG 258 #define DPRINTF(x) do { if (bgedebug) printf x; } while (0) 259 #define DPRINTFN(n,x) do { if (bgedebug >= (n)) printf x; } while (0) 260 int bgedebug = 0; 261 #else 262 #define DPRINTF(x) 263 #define DPRINTFN(n,x) 264 #endif 265 266 /* 267 * Various supported device vendors/types and their names. Note: the 268 * spec seems to indicate that the hardware still has Alteon's vendor 269 * ID burned into it, though it will always be overridden by the vendor 270 * ID in the EEPROM. Just to be safe, we cover all possibilities. 271 */ 272 const struct pci_matchid bge_devices[] = { 273 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700 }, 274 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701 }, 275 276 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000 }, 277 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001 }, 278 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1003 }, 279 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100 }, 280 281 { PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701 }, 282 283 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700 }, 284 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701 }, 285 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702 }, 286 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT }, 287 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X }, 288 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703 }, 289 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703_ALT }, 290 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X }, 291 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C }, 292 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S }, 293 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT }, 294 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705 }, 295 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F }, 296 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K }, 297 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M }, 298 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT }, 299 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714 }, 300 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S }, 301 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715 }, 302 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S }, 303 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717 }, 304 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5717C }, 305 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5718 }, 306 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5719 }, 307 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720 }, 308 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721 }, 309 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722 }, 310 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5723 }, 311 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5725 }, 312 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5727 }, 313 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751 }, 314 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F }, 315 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M }, 316 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752 }, 317 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M }, 318 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753 }, 319 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F }, 320 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M }, 321 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754 }, 322 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M }, 323 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755 }, 324 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M }, 325 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756 }, 326 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761 }, 327 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761E }, 328 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761S }, 329 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5761SE }, 330 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5762 }, 331 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5764 }, 332 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780 }, 333 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S }, 334 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781 }, 335 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782 }, 336 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5784 }, 337 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785F }, 338 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5785G }, 339 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786 }, 340 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787 }, 341 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F }, 342 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M }, 343 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788 }, 344 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789 }, 345 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901 }, 346 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2 }, 347 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M }, 348 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906 }, 349 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906M }, 350 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57760 }, 351 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57761 }, 352 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57762 }, 353 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57764 }, 354 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57765 }, 355 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57766 }, 356 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57767 }, 357 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57780 }, 358 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57781 }, 359 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57782 }, 360 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57785 }, 361 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57786 }, 362 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57787 }, 363 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57788 }, 364 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57790 }, 365 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57791 }, 366 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM57795 }, 367 368 { PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PW008GE4 }, 369 { PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PW008GE5 }, 370 { PCI_VENDOR_FUJITSU, PCI_PRODUCT_FUJITSU_PP250_450_LAN }, 371 372 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9D21 }, 373 374 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996 } 375 }; 376 377 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_JUMBO_CAPABLE) 378 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_5700_FAMILY) 379 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_5705_PLUS) 380 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_5714_FAMILY) 381 #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_575X_PLUS) 382 #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_5755_PLUS) 383 #define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_5717_PLUS) 384 #define BGE_IS_57765_PLUS(sc) ((sc)->bge_flags & BGE_57765_PLUS) 385 386 static const struct bge_revision { 387 u_int32_t br_chipid; 388 const char *br_name; 389 } bge_revisions[] = { 390 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, 391 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, 392 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, 393 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, 394 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, 395 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, 396 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, 397 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, 398 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, 399 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, 400 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, 401 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, 402 /* the 5702 and 5703 share the same ASIC ID */ 403 { BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" }, 404 { BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" }, 405 { BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" }, 406 { BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" }, 407 { BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" }, 408 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, 409 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, 410 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, 411 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, 412 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, 413 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, 414 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, 415 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, 416 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, 417 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, 418 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, 419 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, 420 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, 421 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, 422 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, 423 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, 424 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, 425 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, 426 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, 427 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, 428 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, 429 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, 430 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, 431 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, 432 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, 433 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" }, 434 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" }, 435 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" }, 436 { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" }, 437 { BGE_CHIPID_BCM5719_A1, "BCM5719 A1" }, 438 { BGE_CHIPID_BCM5720_A0, "BCM5720 A0" }, 439 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" }, 440 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" }, 441 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" }, 442 { BGE_CHIPID_BCM5755_C0, "BCM5755 C0" }, 443 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" }, 444 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" }, 445 { BGE_CHIPID_BCM5762_A0, "BCM5762 A0" }, 446 { BGE_CHIPID_BCM5762_B0, "BCM5762 B0" }, 447 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" }, 448 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" }, 449 /* the 5754 and 5787 share the same ASIC ID */ 450 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" }, 451 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" }, 452 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" }, 453 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" }, 454 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" }, 455 { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" }, 456 { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" }, 457 { BGE_CHIPID_BCM57766_A0, "BCM57766 A0" }, 458 { BGE_CHIPID_BCM57766_A1, "BCM57766 A1" }, 459 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" }, 460 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" }, 461 462 { 0, NULL } 463 }; 464 465 /* 466 * Some defaults for major revisions, so that newer steppings 467 * that we don't know about have a shot at working. 468 */ 469 static const struct bge_revision bge_majorrevs[] = { 470 { BGE_ASICREV_BCM5700, "unknown BCM5700" }, 471 { BGE_ASICREV_BCM5701, "unknown BCM5701" }, 472 /* 5702 and 5703 share the same ASIC ID */ 473 { BGE_ASICREV_BCM5703, "unknown BCM5703" }, 474 { BGE_ASICREV_BCM5704, "unknown BCM5704" }, 475 { BGE_ASICREV_BCM5705, "unknown BCM5705" }, 476 { BGE_ASICREV_BCM5750, "unknown BCM5750" }, 477 { BGE_ASICREV_BCM5714, "unknown BCM5714" }, 478 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, 479 { BGE_ASICREV_BCM5752, "unknown BCM5752" }, 480 { BGE_ASICREV_BCM5780, "unknown BCM5780" }, 481 { BGE_ASICREV_BCM5755, "unknown BCM5755" }, 482 { BGE_ASICREV_BCM5761, "unknown BCM5761" }, 483 { BGE_ASICREV_BCM5784, "unknown BCM5784" }, 484 { BGE_ASICREV_BCM5785, "unknown BCM5785" }, 485 /* 5754 and 5787 share the same ASIC ID */ 486 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" }, 487 { BGE_ASICREV_BCM5906, "unknown BCM5906" }, 488 { BGE_ASICREV_BCM57765, "unknown BCM57765" }, 489 { BGE_ASICREV_BCM57766, "unknown BCM57766" }, 490 { BGE_ASICREV_BCM57780, "unknown BCM57780" }, 491 { BGE_ASICREV_BCM5717, "unknown BCM5717" }, 492 { BGE_ASICREV_BCM5719, "unknown BCM5719" }, 493 { BGE_ASICREV_BCM5720, "unknown BCM5720" }, 494 { BGE_ASICREV_BCM5762, "unknown BCM5762" }, 495 496 { 0, NULL } 497 }; 498 499 u_int32_t 500 bge_readmem_ind(struct bge_softc *sc, int off) 501 { 502 struct pci_attach_args *pa = &(sc->bge_pa); 503 u_int32_t val; 504 505 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 && 506 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) 507 return (0); 508 509 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 510 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA); 511 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, 0); 512 return (val); 513 } 514 515 void 516 bge_writemem_ind(struct bge_softc *sc, int off, int val) 517 { 518 struct pci_attach_args *pa = &(sc->bge_pa); 519 520 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 && 521 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) 522 return; 523 524 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off); 525 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val); 526 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, 0); 527 } 528 529 void 530 bge_writereg_ind(struct bge_softc *sc, int off, int val) 531 { 532 struct pci_attach_args *pa = &(sc->bge_pa); 533 534 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off); 535 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val); 536 } 537 538 void 539 bge_writembx(struct bge_softc *sc, int off, int val) 540 { 541 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 542 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; 543 544 CSR_WRITE_4(sc, off, val); 545 } 546 547 /* 548 * Clear all stale locks and select the lock for this driver instance. 549 */ 550 void 551 bge_ape_lock_init(struct bge_softc *sc) 552 { 553 struct pci_attach_args *pa = &(sc->bge_pa); 554 uint32_t bit, regbase; 555 int i; 556 557 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 558 regbase = BGE_APE_LOCK_GRANT; 559 else 560 regbase = BGE_APE_PER_LOCK_GRANT; 561 562 /* Clear any stale locks. */ 563 for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) { 564 switch (i) { 565 case BGE_APE_LOCK_PHY0: 566 case BGE_APE_LOCK_PHY1: 567 case BGE_APE_LOCK_PHY2: 568 case BGE_APE_LOCK_PHY3: 569 bit = BGE_APE_LOCK_GRANT_DRIVER0; 570 break; 571 default: 572 if (pa->pa_function == 0) 573 bit = BGE_APE_LOCK_GRANT_DRIVER0; 574 else 575 bit = (1 << pa->pa_function); 576 } 577 APE_WRITE_4(sc, regbase + 4 * i, bit); 578 } 579 580 /* Select the PHY lock based on the device's function number. */ 581 switch (pa->pa_function) { 582 case 0: 583 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0; 584 break; 585 case 1: 586 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1; 587 break; 588 case 2: 589 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2; 590 break; 591 case 3: 592 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3; 593 break; 594 default: 595 printf("%s: PHY lock not supported on function %d\n", 596 sc->bge_dev.dv_xname, pa->pa_function); 597 break; 598 } 599 } 600 601 /* 602 * Check for APE firmware, set flags, and print version info. 603 */ 604 void 605 bge_ape_read_fw_ver(struct bge_softc *sc) 606 { 607 const char *fwtype; 608 uint32_t apedata, features; 609 610 /* Check for a valid APE signature in shared memory. */ 611 apedata = APE_READ_4(sc, BGE_APE_SEG_SIG); 612 if (apedata != BGE_APE_SEG_SIG_MAGIC) { 613 sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE; 614 return; 615 } 616 617 /* Check if APE firmware is running. */ 618 apedata = APE_READ_4(sc, BGE_APE_FW_STATUS); 619 if ((apedata & BGE_APE_FW_STATUS_READY) == 0) { 620 printf("%s: APE signature found but FW status not ready! " 621 "0x%08x\n", sc->bge_dev.dv_xname, apedata); 622 return; 623 } 624 625 sc->bge_mfw_flags |= BGE_MFW_ON_APE; 626 627 /* Fetch the APE firmware type and version. */ 628 apedata = APE_READ_4(sc, BGE_APE_FW_VERSION); 629 features = APE_READ_4(sc, BGE_APE_FW_FEATURES); 630 if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) { 631 sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI; 632 fwtype = "NCSI"; 633 } else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) { 634 sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH; 635 fwtype = "DASH"; 636 } else 637 fwtype = "UNKN"; 638 639 /* Print the APE firmware version. */ 640 printf(", APE firmware %s %d.%d.%d.%d", fwtype, 641 (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT, 642 (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT, 643 (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT, 644 (apedata & BGE_APE_FW_VERSION_BLDMSK)); 645 } 646 647 int 648 bge_ape_lock(struct bge_softc *sc, int locknum) 649 { 650 struct pci_attach_args *pa = &(sc->bge_pa); 651 uint32_t bit, gnt, req, status; 652 int i, off; 653 654 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 655 return (0); 656 657 /* Lock request/grant registers have different bases. */ 658 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) { 659 req = BGE_APE_LOCK_REQ; 660 gnt = BGE_APE_LOCK_GRANT; 661 } else { 662 req = BGE_APE_PER_LOCK_REQ; 663 gnt = BGE_APE_PER_LOCK_GRANT; 664 } 665 666 off = 4 * locknum; 667 668 switch (locknum) { 669 case BGE_APE_LOCK_GPIO: 670 /* Lock required when using GPIO. */ 671 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 672 return (0); 673 if (pa->pa_function == 0) 674 bit = BGE_APE_LOCK_REQ_DRIVER0; 675 else 676 bit = (1 << pa->pa_function); 677 break; 678 case BGE_APE_LOCK_GRC: 679 /* Lock required to reset the device. */ 680 if (pa->pa_function == 0) 681 bit = BGE_APE_LOCK_REQ_DRIVER0; 682 else 683 bit = (1 << pa->pa_function); 684 break; 685 case BGE_APE_LOCK_MEM: 686 /* Lock required when accessing certain APE memory. */ 687 if (pa->pa_function == 0) 688 bit = BGE_APE_LOCK_REQ_DRIVER0; 689 else 690 bit = (1 << pa->pa_function); 691 break; 692 case BGE_APE_LOCK_PHY0: 693 case BGE_APE_LOCK_PHY1: 694 case BGE_APE_LOCK_PHY2: 695 case BGE_APE_LOCK_PHY3: 696 /* Lock required when accessing PHYs. */ 697 bit = BGE_APE_LOCK_REQ_DRIVER0; 698 break; 699 default: 700 return (EINVAL); 701 } 702 703 /* Request a lock. */ 704 APE_WRITE_4(sc, req + off, bit); 705 706 /* Wait up to 1 second to acquire lock. */ 707 for (i = 0; i < 20000; i++) { 708 status = APE_READ_4(sc, gnt + off); 709 if (status == bit) 710 break; 711 DELAY(50); 712 } 713 714 /* Handle any errors. */ 715 if (status != bit) { 716 printf("%s: APE lock %d request failed! " 717 "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n", 718 sc->bge_dev.dv_xname, 719 locknum, req + off, bit & 0xFFFF, gnt + off, 720 status & 0xFFFF); 721 /* Revoke the lock request. */ 722 APE_WRITE_4(sc, gnt + off, bit); 723 return (EBUSY); 724 } 725 726 return (0); 727 } 728 729 void 730 bge_ape_unlock(struct bge_softc *sc, int locknum) 731 { 732 struct pci_attach_args *pa = &(sc->bge_pa); 733 uint32_t bit, gnt; 734 int off; 735 736 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 737 return; 738 739 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 740 gnt = BGE_APE_LOCK_GRANT; 741 else 742 gnt = BGE_APE_PER_LOCK_GRANT; 743 744 off = 4 * locknum; 745 746 switch (locknum) { 747 case BGE_APE_LOCK_GPIO: 748 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 749 return; 750 if (pa->pa_function == 0) 751 bit = BGE_APE_LOCK_GRANT_DRIVER0; 752 else 753 bit = (1 << pa->pa_function); 754 break; 755 case BGE_APE_LOCK_GRC: 756 if (pa->pa_function == 0) 757 bit = BGE_APE_LOCK_GRANT_DRIVER0; 758 else 759 bit = (1 << pa->pa_function); 760 break; 761 case BGE_APE_LOCK_MEM: 762 if (pa->pa_function == 0) 763 bit = BGE_APE_LOCK_GRANT_DRIVER0; 764 else 765 bit = (1 << pa->pa_function); 766 break; 767 case BGE_APE_LOCK_PHY0: 768 case BGE_APE_LOCK_PHY1: 769 case BGE_APE_LOCK_PHY2: 770 case BGE_APE_LOCK_PHY3: 771 bit = BGE_APE_LOCK_GRANT_DRIVER0; 772 break; 773 default: 774 return; 775 } 776 777 APE_WRITE_4(sc, gnt + off, bit); 778 } 779 780 /* 781 * Send an event to the APE firmware. 782 */ 783 void 784 bge_ape_send_event(struct bge_softc *sc, uint32_t event) 785 { 786 uint32_t apedata; 787 int i; 788 789 /* NCSI does not support APE events. */ 790 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 791 return; 792 793 /* Wait up to 1ms for APE to service previous event. */ 794 for (i = 10; i > 0; i--) { 795 if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0) 796 break; 797 apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS); 798 if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) { 799 APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event | 800 BGE_APE_EVENT_STATUS_EVENT_PENDING); 801 bge_ape_unlock(sc, BGE_APE_LOCK_MEM); 802 APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1); 803 break; 804 } 805 bge_ape_unlock(sc, BGE_APE_LOCK_MEM); 806 DELAY(100); 807 } 808 if (i == 0) { 809 printf("%s: APE event 0x%08x send timed out\n", 810 sc->bge_dev.dv_xname, event); 811 } 812 } 813 814 void 815 bge_ape_driver_state_change(struct bge_softc *sc, int kind) 816 { 817 uint32_t apedata, event; 818 819 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 820 return; 821 822 switch (kind) { 823 case BGE_RESET_START: 824 /* If this is the first load, clear the load counter. */ 825 apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG); 826 if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC) 827 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0); 828 else { 829 apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT); 830 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata); 831 } 832 APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG, 833 BGE_APE_HOST_SEG_SIG_MAGIC); 834 APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN, 835 BGE_APE_HOST_SEG_LEN_MAGIC); 836 837 /* Add some version info if bge(4) supports it. */ 838 APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID, 839 BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0)); 840 APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR, 841 BGE_APE_HOST_BEHAV_NO_PHYLOCK); 842 APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS, 843 BGE_APE_HOST_HEARTBEAT_INT_DISABLE); 844 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, 845 BGE_APE_HOST_DRVR_STATE_START); 846 event = BGE_APE_EVENT_STATUS_STATE_START; 847 break; 848 case BGE_RESET_SHUTDOWN: 849 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, 850 BGE_APE_HOST_DRVR_STATE_UNLOAD); 851 event = BGE_APE_EVENT_STATUS_STATE_UNLOAD; 852 break; 853 case BGE_RESET_SUSPEND: 854 event = BGE_APE_EVENT_STATUS_STATE_SUSPEND; 855 break; 856 default: 857 return; 858 } 859 860 bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT | 861 BGE_APE_EVENT_STATUS_STATE_CHNGE); 862 } 863 864 865 u_int8_t 866 bge_nvram_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest) 867 { 868 u_int32_t access, byte = 0; 869 int i; 870 871 /* Lock. */ 872 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 873 for (i = 0; i < 8000; i++) { 874 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) 875 break; 876 DELAY(20); 877 } 878 if (i == 8000) 879 return (1); 880 881 /* Enable access. */ 882 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); 883 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); 884 885 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); 886 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); 887 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 888 DELAY(10); 889 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { 890 DELAY(10); 891 break; 892 } 893 } 894 895 if (i == BGE_TIMEOUT * 10) { 896 printf("%s: nvram read timed out\n", sc->bge_dev.dv_xname); 897 return (1); 898 } 899 900 /* Get result. */ 901 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); 902 903 *dest = (swap32(byte) >> ((addr % 4) * 8)) & 0xFF; 904 905 /* Disable access. */ 906 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); 907 908 /* Unlock. */ 909 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); 910 CSR_READ_4(sc, BGE_NVRAM_SWARB); 911 912 return (0); 913 } 914 915 /* 916 * Read a sequence of bytes from NVRAM. 917 */ 918 919 int 920 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt) 921 { 922 int err = 0, i; 923 u_int8_t byte = 0; 924 925 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906) 926 return (1); 927 928 for (i = 0; i < cnt; i++) { 929 err = bge_nvram_getbyte(sc, off + i, &byte); 930 if (err) 931 break; 932 *(dest + i) = byte; 933 } 934 935 return (err ? 1 : 0); 936 } 937 938 /* 939 * Read a byte of data stored in the EEPROM at address 'addr.' The 940 * BCM570x supports both the traditional bitbang interface and an 941 * auto access interface for reading the EEPROM. We use the auto 942 * access method. 943 */ 944 u_int8_t 945 bge_eeprom_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest) 946 { 947 int i; 948 u_int32_t byte = 0; 949 950 /* 951 * Enable use of auto EEPROM access so we can avoid 952 * having to use the bitbang method. 953 */ 954 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 955 956 /* Reset the EEPROM, load the clock period. */ 957 CSR_WRITE_4(sc, BGE_EE_ADDR, 958 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 959 DELAY(20); 960 961 /* Issue the read EEPROM command. */ 962 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 963 964 /* Wait for completion */ 965 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 966 DELAY(10); 967 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 968 break; 969 } 970 971 if (i == BGE_TIMEOUT * 10) { 972 printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname); 973 return (1); 974 } 975 976 /* Get result. */ 977 byte = CSR_READ_4(sc, BGE_EE_DATA); 978 979 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 980 981 return (0); 982 } 983 984 /* 985 * Read a sequence of bytes from the EEPROM. 986 */ 987 int 988 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt) 989 { 990 int i, error = 0; 991 u_int8_t byte = 0; 992 993 for (i = 0; i < cnt; i++) { 994 error = bge_eeprom_getbyte(sc, off + i, &byte); 995 if (error) 996 break; 997 *(dest + i) = byte; 998 } 999 1000 return (error ? 1 : 0); 1001 } 1002 1003 int 1004 bge_miibus_readreg(struct device *dev, int phy, int reg) 1005 { 1006 struct bge_softc *sc = (struct bge_softc *)dev; 1007 u_int32_t val, autopoll; 1008 int i; 1009 1010 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) 1011 return (0); 1012 1013 /* Reading with autopolling on may trigger PCI errors */ 1014 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 1015 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1016 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL); 1017 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1018 DELAY(80); 1019 } 1020 1021 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 1022 BGE_MIPHY(phy)|BGE_MIREG(reg)); 1023 CSR_READ_4(sc, BGE_MI_COMM); /* force write */ 1024 1025 for (i = 0; i < 200; i++) { 1026 delay(1); 1027 val = CSR_READ_4(sc, BGE_MI_COMM); 1028 if (!(val & BGE_MICOMM_BUSY)) 1029 break; 1030 delay(10); 1031 } 1032 1033 if (i == 200) { 1034 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 1035 val = 0; 1036 goto done; 1037 } 1038 1039 val = CSR_READ_4(sc, BGE_MI_COMM); 1040 1041 done: 1042 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1043 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 1044 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1045 DELAY(80); 1046 } 1047 1048 bge_ape_unlock(sc, sc->bge_phy_ape_lock); 1049 1050 if (val & BGE_MICOMM_READFAIL) 1051 return (0); 1052 1053 return (val & 0xFFFF); 1054 } 1055 1056 void 1057 bge_miibus_writereg(struct device *dev, int phy, int reg, int val) 1058 { 1059 struct bge_softc *sc = (struct bge_softc *)dev; 1060 u_int32_t autopoll; 1061 int i; 1062 1063 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 && 1064 (reg == MII_100T2CR || reg == BRGPHY_MII_AUXCTL)) 1065 return; 1066 1067 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) 1068 return; 1069 1070 /* Reading with autopolling on may trigger PCI errors */ 1071 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 1072 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1073 DELAY(40); 1074 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL); 1075 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1076 DELAY(40); /* 40 usec is supposed to be adequate */ 1077 } 1078 1079 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 1080 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 1081 CSR_READ_4(sc, BGE_MI_COMM); /* force write */ 1082 1083 for (i = 0; i < 200; i++) { 1084 delay(1); 1085 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 1086 break; 1087 delay(10); 1088 } 1089 1090 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1091 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 1092 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1093 DELAY(40); 1094 } 1095 1096 bge_ape_unlock(sc, sc->bge_phy_ape_lock); 1097 1098 if (i == 200) { 1099 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname); 1100 } 1101 } 1102 1103 void 1104 bge_miibus_statchg(struct device *dev) 1105 { 1106 struct bge_softc *sc = (struct bge_softc *)dev; 1107 struct mii_data *mii = &sc->bge_mii; 1108 u_int32_t mac_mode, rx_mode, tx_mode; 1109 1110 /* 1111 * Get flow control negotiation result. 1112 */ 1113 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1114 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) 1115 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1116 1117 if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 1118 mii->mii_media_status & IFM_ACTIVE && 1119 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 1120 BGE_STS_SETBIT(sc, BGE_STS_LINK); 1121 else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 1122 (!(mii->mii_media_status & IFM_ACTIVE) || 1123 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 1124 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 1125 1126 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) 1127 return; 1128 1129 /* Set the port mode (MII/GMII) to match the link speed. */ 1130 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & 1131 ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX); 1132 tx_mode = CSR_READ_4(sc, BGE_TX_MODE); 1133 rx_mode = CSR_READ_4(sc, BGE_RX_MODE); 1134 1135 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 1136 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 1137 mac_mode |= BGE_PORTMODE_GMII; 1138 else 1139 mac_mode |= BGE_PORTMODE_MII; 1140 1141 /* Set MAC flow control behavior to match link flow control settings. */ 1142 tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE; 1143 rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE; 1144 if (mii->mii_media_active & IFM_FDX) { 1145 if (sc->bge_flowflags & IFM_ETH_TXPAUSE) 1146 tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE; 1147 if (sc->bge_flowflags & IFM_ETH_RXPAUSE) 1148 rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE; 1149 } else 1150 mac_mode |= BGE_MACMODE_HALF_DUPLEX; 1151 1152 CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode); 1153 DELAY(40); 1154 CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode); 1155 CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode); 1156 } 1157 1158 /* 1159 * Initialize a standard receive ring descriptor. 1160 */ 1161 int 1162 bge_newbuf(struct bge_softc *sc, int i) 1163 { 1164 bus_dmamap_t dmap = sc->bge_cdata.bge_rx_std_map[i]; 1165 struct bge_rx_bd *r = &sc->bge_rdata->bge_rx_std_ring[i]; 1166 struct mbuf *m; 1167 int error; 1168 1169 m = MCLGETL(NULL, M_DONTWAIT, sc->bge_rx_std_len); 1170 if (!m) 1171 return (ENOBUFS); 1172 m->m_len = m->m_pkthdr.len = sc->bge_rx_std_len; 1173 if (!(sc->bge_flags & BGE_RX_ALIGNBUG)) 1174 m_adj(m, ETHER_ALIGN); 1175 1176 error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmap, m, 1177 BUS_DMA_READ|BUS_DMA_NOWAIT); 1178 if (error) { 1179 m_freem(m); 1180 return (ENOBUFS); 1181 } 1182 1183 bus_dmamap_sync(sc->bge_dmatag, dmap, 0, dmap->dm_mapsize, 1184 BUS_DMASYNC_PREREAD); 1185 sc->bge_cdata.bge_rx_std_chain[i] = m; 1186 1187 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1188 offsetof(struct bge_ring_data, bge_rx_std_ring) + 1189 i * sizeof (struct bge_rx_bd), 1190 sizeof (struct bge_rx_bd), 1191 BUS_DMASYNC_POSTWRITE); 1192 1193 BGE_HOSTADDR(r->bge_addr, dmap->dm_segs[0].ds_addr); 1194 r->bge_flags = BGE_RXBDFLAG_END; 1195 r->bge_len = m->m_len; 1196 r->bge_idx = i; 1197 1198 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1199 offsetof(struct bge_ring_data, bge_rx_std_ring) + 1200 i * sizeof (struct bge_rx_bd), 1201 sizeof (struct bge_rx_bd), 1202 BUS_DMASYNC_PREWRITE); 1203 1204 return (0); 1205 } 1206 1207 /* 1208 * Initialize a Jumbo receive ring descriptor. 1209 */ 1210 int 1211 bge_newbuf_jumbo(struct bge_softc *sc, int i) 1212 { 1213 bus_dmamap_t dmap = sc->bge_cdata.bge_rx_jumbo_map[i]; 1214 struct bge_ext_rx_bd *r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 1215 struct mbuf *m; 1216 int error; 1217 1218 m = MCLGETL(NULL, M_DONTWAIT, BGE_JLEN); 1219 if (!m) 1220 return (ENOBUFS); 1221 m->m_len = m->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 1222 if (!(sc->bge_flags & BGE_RX_ALIGNBUG)) 1223 m_adj(m, ETHER_ALIGN); 1224 1225 error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmap, m, 1226 BUS_DMA_READ|BUS_DMA_NOWAIT); 1227 if (error) { 1228 m_freem(m); 1229 return (ENOBUFS); 1230 } 1231 1232 bus_dmamap_sync(sc->bge_dmatag, dmap, 0, dmap->dm_mapsize, 1233 BUS_DMASYNC_PREREAD); 1234 sc->bge_cdata.bge_rx_jumbo_chain[i] = m; 1235 1236 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1237 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 1238 i * sizeof (struct bge_ext_rx_bd), 1239 sizeof (struct bge_ext_rx_bd), 1240 BUS_DMASYNC_POSTWRITE); 1241 1242 /* 1243 * Fill in the extended RX buffer descriptor. 1244 */ 1245 r->bge_bd.bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END; 1246 r->bge_bd.bge_idx = i; 1247 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0; 1248 switch (dmap->dm_nsegs) { 1249 case 4: 1250 BGE_HOSTADDR(r->bge_addr3, dmap->dm_segs[3].ds_addr); 1251 r->bge_len3 = dmap->dm_segs[3].ds_len; 1252 /* FALLTHROUGH */ 1253 case 3: 1254 BGE_HOSTADDR(r->bge_addr2, dmap->dm_segs[2].ds_addr); 1255 r->bge_len2 = dmap->dm_segs[2].ds_len; 1256 /* FALLTHROUGH */ 1257 case 2: 1258 BGE_HOSTADDR(r->bge_addr1, dmap->dm_segs[1].ds_addr); 1259 r->bge_len1 = dmap->dm_segs[1].ds_len; 1260 /* FALLTHROUGH */ 1261 case 1: 1262 BGE_HOSTADDR(r->bge_bd.bge_addr, dmap->dm_segs[0].ds_addr); 1263 r->bge_bd.bge_len = dmap->dm_segs[0].ds_len; 1264 break; 1265 default: 1266 panic("%s: %d segments", __func__, dmap->dm_nsegs); 1267 } 1268 1269 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1270 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 1271 i * sizeof (struct bge_ext_rx_bd), 1272 sizeof (struct bge_ext_rx_bd), 1273 BUS_DMASYNC_PREWRITE); 1274 1275 return (0); 1276 } 1277 1278 int 1279 bge_init_rx_ring_std(struct bge_softc *sc) 1280 { 1281 int i; 1282 1283 if (ISSET(sc->bge_flags, BGE_RXRING_VALID)) 1284 return (0); 1285 1286 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1287 if (bus_dmamap_create(sc->bge_dmatag, sc->bge_rx_std_len, 1, 1288 sc->bge_rx_std_len, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1289 &sc->bge_cdata.bge_rx_std_map[i]) != 0) { 1290 printf("%s: unable to create dmamap for slot %d\n", 1291 sc->bge_dev.dv_xname, i); 1292 goto uncreate; 1293 } 1294 bzero(&sc->bge_rdata->bge_rx_std_ring[i], 1295 sizeof(struct bge_rx_bd)); 1296 } 1297 1298 sc->bge_std = BGE_STD_RX_RING_CNT - 1; 1299 1300 /* lwm must be greater than the replenish threshold */ 1301 if_rxr_init(&sc->bge_std_ring, 17, BGE_STD_RX_RING_CNT); 1302 bge_fill_rx_ring_std(sc); 1303 1304 SET(sc->bge_flags, BGE_RXRING_VALID); 1305 1306 return (0); 1307 1308 uncreate: 1309 while (--i) { 1310 bus_dmamap_destroy(sc->bge_dmatag, 1311 sc->bge_cdata.bge_rx_std_map[i]); 1312 } 1313 return (1); 1314 } 1315 1316 /* 1317 * When the refill timeout for a ring is active, that ring is so empty 1318 * that no more packets can be received on it, so the interrupt handler 1319 * will not attempt to refill it, meaning we don't need to protect against 1320 * interrupts here. 1321 */ 1322 1323 void 1324 bge_rxtick(void *arg) 1325 { 1326 struct bge_softc *sc = arg; 1327 1328 if (ISSET(sc->bge_flags, BGE_RXRING_VALID) && 1329 if_rxr_inuse(&sc->bge_std_ring) <= 8) 1330 bge_fill_rx_ring_std(sc); 1331 } 1332 1333 void 1334 bge_rxtick_jumbo(void *arg) 1335 { 1336 struct bge_softc *sc = arg; 1337 1338 if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID) && 1339 if_rxr_inuse(&sc->bge_jumbo_ring) <= 8) 1340 bge_fill_rx_ring_jumbo(sc); 1341 } 1342 1343 void 1344 bge_fill_rx_ring_std(struct bge_softc *sc) 1345 { 1346 int i; 1347 int post = 0; 1348 u_int slots; 1349 1350 i = sc->bge_std; 1351 for (slots = if_rxr_get(&sc->bge_std_ring, BGE_STD_RX_RING_CNT); 1352 slots > 0; slots--) { 1353 BGE_INC(i, BGE_STD_RX_RING_CNT); 1354 1355 if (bge_newbuf(sc, i) != 0) 1356 break; 1357 1358 sc->bge_std = i; 1359 post = 1; 1360 } 1361 if_rxr_put(&sc->bge_std_ring, slots); 1362 1363 if (post) 1364 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 1365 1366 /* 1367 * bge always needs more than 8 packets on the ring. if we can't do 1368 * that now, then try again later. 1369 */ 1370 if (if_rxr_inuse(&sc->bge_std_ring) <= 8) 1371 timeout_add(&sc->bge_rxtimeout, 1); 1372 } 1373 1374 void 1375 bge_free_rx_ring_std(struct bge_softc *sc) 1376 { 1377 bus_dmamap_t dmap; 1378 struct mbuf *m; 1379 int i; 1380 1381 if (!ISSET(sc->bge_flags, BGE_RXRING_VALID)) 1382 return; 1383 1384 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1385 dmap = sc->bge_cdata.bge_rx_std_map[i]; 1386 m = sc->bge_cdata.bge_rx_std_chain[i]; 1387 if (m != NULL) { 1388 bus_dmamap_sync(sc->bge_dmatag, dmap, 0, 1389 dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1390 bus_dmamap_unload(sc->bge_dmatag, dmap); 1391 m_freem(m); 1392 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 1393 } 1394 bus_dmamap_destroy(sc->bge_dmatag, dmap); 1395 sc->bge_cdata.bge_rx_std_map[i] = NULL; 1396 bzero(&sc->bge_rdata->bge_rx_std_ring[i], 1397 sizeof(struct bge_rx_bd)); 1398 } 1399 1400 CLR(sc->bge_flags, BGE_RXRING_VALID); 1401 } 1402 1403 int 1404 bge_init_rx_ring_jumbo(struct bge_softc *sc) 1405 { 1406 volatile struct bge_rcb *rcb; 1407 int i; 1408 1409 if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID)) 1410 return (0); 1411 1412 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1413 if (bus_dmamap_create(sc->bge_dmatag, BGE_JLEN, 4, BGE_JLEN, 0, 1414 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1415 &sc->bge_cdata.bge_rx_jumbo_map[i]) != 0) { 1416 printf("%s: unable to create dmamap for slot %d\n", 1417 sc->bge_dev.dv_xname, i); 1418 goto uncreate; 1419 } 1420 bzero(&sc->bge_rdata->bge_rx_jumbo_ring[i], 1421 sizeof(struct bge_ext_rx_bd)); 1422 } 1423 1424 sc->bge_jumbo = BGE_JUMBO_RX_RING_CNT - 1; 1425 1426 /* lwm must be greater than the replenish threshold */ 1427 if_rxr_init(&sc->bge_jumbo_ring, 17, BGE_JUMBO_RX_RING_CNT); 1428 bge_fill_rx_ring_jumbo(sc); 1429 1430 SET(sc->bge_flags, BGE_JUMBO_RXRING_VALID); 1431 1432 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1433 rcb->bge_maxlen_flags = 1434 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD); 1435 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1436 1437 return (0); 1438 1439 uncreate: 1440 while (--i) { 1441 bus_dmamap_destroy(sc->bge_dmatag, 1442 sc->bge_cdata.bge_rx_jumbo_map[i]); 1443 } 1444 return (1); 1445 } 1446 1447 void 1448 bge_fill_rx_ring_jumbo(struct bge_softc *sc) 1449 { 1450 int i; 1451 int post = 0; 1452 u_int slots; 1453 1454 i = sc->bge_jumbo; 1455 for (slots = if_rxr_get(&sc->bge_jumbo_ring, BGE_JUMBO_RX_RING_CNT); 1456 slots > 0; slots--) { 1457 BGE_INC(i, BGE_JUMBO_RX_RING_CNT); 1458 1459 if (bge_newbuf_jumbo(sc, i) != 0) 1460 break; 1461 1462 sc->bge_jumbo = i; 1463 post = 1; 1464 } 1465 if_rxr_put(&sc->bge_jumbo_ring, slots); 1466 1467 if (post) 1468 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1469 1470 /* 1471 * bge always needs more than 8 packets on the ring. if we can't do 1472 * that now, then try again later. 1473 */ 1474 if (if_rxr_inuse(&sc->bge_jumbo_ring) <= 8) 1475 timeout_add(&sc->bge_rxtimeout_jumbo, 1); 1476 } 1477 1478 void 1479 bge_free_rx_ring_jumbo(struct bge_softc *sc) 1480 { 1481 bus_dmamap_t dmap; 1482 struct mbuf *m; 1483 int i; 1484 1485 if (!ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID)) 1486 return; 1487 1488 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1489 dmap = sc->bge_cdata.bge_rx_jumbo_map[i]; 1490 m = sc->bge_cdata.bge_rx_jumbo_chain[i]; 1491 if (m != NULL) { 1492 bus_dmamap_sync(sc->bge_dmatag, dmap, 0, 1493 dmap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1494 bus_dmamap_unload(sc->bge_dmatag, dmap); 1495 m_freem(m); 1496 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 1497 } 1498 bus_dmamap_destroy(sc->bge_dmatag, dmap); 1499 sc->bge_cdata.bge_rx_jumbo_map[i] = NULL; 1500 bzero(&sc->bge_rdata->bge_rx_jumbo_ring[i], 1501 sizeof(struct bge_ext_rx_bd)); 1502 } 1503 1504 CLR(sc->bge_flags, BGE_JUMBO_RXRING_VALID); 1505 } 1506 1507 void 1508 bge_free_tx_ring(struct bge_softc *sc) 1509 { 1510 int i; 1511 1512 if (!(sc->bge_flags & BGE_TXRING_VALID)) 1513 return; 1514 1515 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1516 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 1517 m_freem(sc->bge_cdata.bge_tx_chain[i]); 1518 sc->bge_cdata.bge_tx_chain[i] = NULL; 1519 sc->bge_cdata.bge_tx_map[i] = NULL; 1520 } 1521 bzero(&sc->bge_rdata->bge_tx_ring[i], 1522 sizeof(struct bge_tx_bd)); 1523 1524 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_txdma[i]); 1525 } 1526 1527 sc->bge_flags &= ~BGE_TXRING_VALID; 1528 } 1529 1530 int 1531 bge_init_tx_ring(struct bge_softc *sc) 1532 { 1533 int i; 1534 bus_size_t txsegsz, txmaxsegsz; 1535 1536 if (sc->bge_flags & BGE_TXRING_VALID) 1537 return (0); 1538 1539 sc->bge_txcnt = 0; 1540 sc->bge_tx_saved_considx = 0; 1541 1542 /* Initialize transmit producer index for host-memory send ring. */ 1543 sc->bge_tx_prodidx = 0; 1544 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1545 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 1546 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1547 1548 /* NIC-memory send ring not used; initialize to zero. */ 1549 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1550 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 1551 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1552 1553 if (BGE_IS_JUMBO_CAPABLE(sc)) { 1554 txsegsz = 4096; 1555 txmaxsegsz = BGE_JLEN; 1556 } else { 1557 txsegsz = MCLBYTES; 1558 txmaxsegsz = MCLBYTES; 1559 } 1560 1561 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1562 if (bus_dmamap_create(sc->bge_dmatag, txmaxsegsz, 1563 BGE_NTXSEG, txsegsz, 0, BUS_DMA_NOWAIT, &sc->bge_txdma[i])) 1564 return (ENOBUFS); 1565 } 1566 1567 sc->bge_flags |= BGE_TXRING_VALID; 1568 1569 return (0); 1570 } 1571 1572 void 1573 bge_iff(struct bge_softc *sc) 1574 { 1575 struct arpcom *ac = &sc->arpcom; 1576 struct ifnet *ifp = &ac->ac_if; 1577 struct ether_multi *enm; 1578 struct ether_multistep step; 1579 u_int8_t hashes[16]; 1580 u_int32_t h, rxmode; 1581 1582 /* First, zot all the existing filters. */ 1583 rxmode = CSR_READ_4(sc, BGE_RX_MODE) & ~BGE_RXMODE_RX_PROMISC; 1584 ifp->if_flags &= ~IFF_ALLMULTI; 1585 memset(hashes, 0x00, sizeof(hashes)); 1586 1587 if (ifp->if_flags & IFF_PROMISC) { 1588 ifp->if_flags |= IFF_ALLMULTI; 1589 rxmode |= BGE_RXMODE_RX_PROMISC; 1590 } else if (ac->ac_multirangecnt > 0) { 1591 ifp->if_flags |= IFF_ALLMULTI; 1592 memset(hashes, 0xff, sizeof(hashes)); 1593 } else { 1594 ETHER_FIRST_MULTI(step, ac, enm); 1595 while (enm != NULL) { 1596 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1597 1598 setbit(hashes, h & 0x7F); 1599 1600 ETHER_NEXT_MULTI(step, enm); 1601 } 1602 } 1603 1604 bus_space_write_raw_region_4(sc->bge_btag, sc->bge_bhandle, BGE_MAR0, 1605 hashes, sizeof(hashes)); 1606 CSR_WRITE_4(sc, BGE_RX_MODE, rxmode); 1607 } 1608 1609 void 1610 bge_sig_pre_reset(struct bge_softc *sc, int type) 1611 { 1612 /* no bge_asf_mode. */ 1613 1614 if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND) 1615 bge_ape_driver_state_change(sc, type); 1616 } 1617 1618 void 1619 bge_sig_post_reset(struct bge_softc *sc, int type) 1620 { 1621 /* no bge_asf_mode. */ 1622 1623 if (type == BGE_RESET_SHUTDOWN) 1624 bge_ape_driver_state_change(sc, type); 1625 } 1626 1627 void 1628 bge_sig_legacy(struct bge_softc *sc, int type) 1629 { 1630 /* no bge_asf_mode. */ 1631 } 1632 1633 void 1634 bge_stop_fw(struct bge_softc *sc, int type) 1635 { 1636 /* no bge_asf_mode. */ 1637 } 1638 1639 u_int32_t 1640 bge_dma_swap_options(struct bge_softc *sc) 1641 { 1642 u_int32_t dma_options = BGE_DMA_SWAP_OPTIONS; 1643 1644 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { 1645 dma_options |= BGE_MODECTL_BYTESWAP_B2HRX_DATA | 1646 BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE | 1647 BGE_MODECTL_HTX2B_ENABLE; 1648 } 1649 1650 return (dma_options); 1651 } 1652 1653 int 1654 bge_phy_addr(struct bge_softc *sc) 1655 { 1656 struct pci_attach_args *pa = &(sc->bge_pa); 1657 int phy_addr = 1; 1658 1659 switch (BGE_ASICREV(sc->bge_chipid)) { 1660 case BGE_ASICREV_BCM5717: 1661 case BGE_ASICREV_BCM5719: 1662 case BGE_ASICREV_BCM5720: 1663 phy_addr = pa->pa_function; 1664 if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) { 1665 phy_addr += (CSR_READ_4(sc, BGE_SGDIG_STS) & 1666 BGE_SGDIGSTS_IS_SERDES) ? 8 : 1; 1667 } else { 1668 phy_addr += (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) & 1669 BGE_CPMU_PHY_STRAP_IS_SERDES) ? 8 : 1; 1670 } 1671 } 1672 1673 return (phy_addr); 1674 } 1675 1676 /* 1677 * Do endian, PCI and DMA initialization. 1678 */ 1679 void 1680 bge_chipinit(struct bge_softc *sc) 1681 { 1682 struct pci_attach_args *pa = &(sc->bge_pa); 1683 u_int32_t dma_rw_ctl, misc_ctl, mode_ctl; 1684 int i; 1685 1686 /* Set endianness before we access any non-PCI registers. */ 1687 misc_ctl = BGE_INIT; 1688 if (sc->bge_flags & BGE_TAGGED_STATUS) 1689 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS; 1690 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 1691 misc_ctl); 1692 1693 /* 1694 * Clear the MAC statistics block in the NIC's 1695 * internal memory. 1696 */ 1697 for (i = BGE_STATS_BLOCK; 1698 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1699 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1700 1701 for (i = BGE_STATUS_BLOCK; 1702 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) 1703 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0); 1704 1705 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 || 1706 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) { 1707 /* 1708 * For the 57766 and non Ax versions of 57765, bootcode 1709 * needs to setup the PCIE Fast Training Sequence (FTS) 1710 * value to prevent transmit hangs. 1711 */ 1712 if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_57765_AX) { 1713 CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, 1714 CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL) | 1715 BGE_CPMU_PADRNG_CTL_RDIV2); 1716 } 1717 } 1718 1719 /* 1720 * Set up the PCI DMA control register. 1721 */ 1722 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) | 1723 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7); 1724 1725 if (sc->bge_flags & BGE_PCIE) { 1726 if (sc->bge_mps >= 256) 1727 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); 1728 else 1729 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 1730 } else if (sc->bge_flags & BGE_PCIX) { 1731 /* PCI-X bus */ 1732 if (BGE_IS_5714_FAMILY(sc)) { 1733 /* 256 bytes for read and write. */ 1734 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) | 1735 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2); 1736 1737 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780) 1738 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 1739 else 1740 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL; 1741 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 1742 /* 1536 bytes for read, 384 bytes for write. */ 1743 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 1744 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 1745 } else { 1746 /* 384 bytes for read and write. */ 1747 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) | 1748 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) | 1749 (0x0F); 1750 } 1751 1752 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 1753 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 1754 u_int32_t tmp; 1755 1756 /* Set ONEDMA_ATONCE for hardware workaround. */ 1757 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; 1758 if (tmp == 6 || tmp == 7) 1759 dma_rw_ctl |= 1760 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 1761 1762 /* Set PCI-X DMA write workaround. */ 1763 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE; 1764 } 1765 } else { 1766 /* Conventional PCI bus: 256 bytes for read and write. */ 1767 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 1768 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); 1769 1770 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 && 1771 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750) 1772 dma_rw_ctl |= 0x0F; 1773 } 1774 1775 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 1776 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) 1777 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM | 1778 BGE_PCIDMARWCTL_ASRT_ALL_BE; 1779 1780 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 1781 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 1782 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 1783 1784 if (BGE_IS_5717_PLUS(sc)) { 1785 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT; 1786 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) 1787 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK; 1788 1789 /* 1790 * Enable HW workaround for controllers that misinterpret 1791 * a status tag update and leave interrupts permanently 1792 * disabled. 1793 */ 1794 if (!BGE_IS_57765_PLUS(sc) && 1795 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 && 1796 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5762) 1797 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA; 1798 } 1799 1800 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl); 1801 1802 /* 1803 * Set up general mode register. 1804 */ 1805 mode_ctl = bge_dma_swap_options(sc); 1806 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 || 1807 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 1808 /* Retain Host-2-BMC settings written by APE firmware. */ 1809 mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) & 1810 (BGE_MODECTL_BYTESWAP_B2HRX_DATA | 1811 BGE_MODECTL_WORDSWAP_B2HRX_DATA | 1812 BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE); 1813 } 1814 mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS | 1815 BGE_MODECTL_TX_NO_PHDR_CSUM; 1816 1817 /* 1818 * BCM5701 B5 have a bug causing data corruption when using 1819 * 64-bit DMA reads, which can be terminated early and then 1820 * completed later as 32-bit accesses, in combination with 1821 * certain bridges. 1822 */ 1823 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 && 1824 sc->bge_chipid == BGE_CHIPID_BCM5701_B5) 1825 mode_ctl |= BGE_MODECTL_FORCE_PCI32; 1826 1827 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 1828 1829 /* 1830 * Disable memory write invalidate. Apparently it is not supported 1831 * properly by these devices. 1832 */ 1833 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 1834 PCI_COMMAND_INVALIDATE_ENABLE); 1835 1836 #ifdef __brokenalpha__ 1837 /* 1838 * Must ensure that we do not cross an 8K (bytes) boundary 1839 * for DMA reads. Our highest limit is 1K bytes. This is a 1840 * restriction on some ALPHA platforms with early revision 1841 * 21174 PCI chipsets, such as the AlphaPC 164lx 1842 */ 1843 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, 1844 BGE_PCI_READ_BNDRY_1024); 1845 #endif 1846 1847 /* Set the timer prescaler (always 66MHz) */ 1848 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); 1849 1850 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 1851 DELAY(40); /* XXX */ 1852 1853 /* Put PHY into ready state */ 1854 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); 1855 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */ 1856 DELAY(40); 1857 } 1858 } 1859 1860 int 1861 bge_blockinit(struct bge_softc *sc) 1862 { 1863 volatile struct bge_rcb *rcb; 1864 vaddr_t rcb_addr; 1865 bge_hostaddr taddr; 1866 u_int32_t dmactl, rdmareg, mimode, val; 1867 int i, limit; 1868 1869 /* 1870 * Initialize the memory window pointer register so that 1871 * we can access the first 32K of internal NIC RAM. This will 1872 * allow us to set up the TX send ring RCBs and the RX return 1873 * ring RCBs, plus other things which live in NIC memory. 1874 */ 1875 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1876 1877 /* Configure mbuf memory pool */ 1878 if (!BGE_IS_5705_PLUS(sc)) { 1879 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1880 BGE_BUFFPOOL_1); 1881 1882 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 1883 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1884 else 1885 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1886 1887 /* Configure DMA resource pool */ 1888 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1889 BGE_DMA_DESCRIPTORS); 1890 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1891 } 1892 1893 /* Configure mbuf pool watermarks */ 1894 /* new Broadcom docs strongly recommend these: */ 1895 if (BGE_IS_5717_PLUS(sc)) { 1896 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1897 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a); 1898 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0); 1899 } else if (BGE_IS_5705_PLUS(sc)) { 1900 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1901 1902 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 1903 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); 1904 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); 1905 } else { 1906 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1907 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1908 } 1909 } else { 1910 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1911 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1912 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1913 } 1914 1915 /* Configure DMA resource watermarks */ 1916 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1917 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1918 1919 /* Enable buffer manager */ 1920 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN; 1921 /* 1922 * Change the arbitration algorithm of TXMBUF read request to 1923 * round-robin instead of priority based for BCM5719. When 1924 * TXFIFO is almost empty, RDMA will hold its request until 1925 * TXFIFO is not almost empty. 1926 */ 1927 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) 1928 val |= BGE_BMANMODE_NO_TX_UNDERRUN; 1929 CSR_WRITE_4(sc, BGE_BMAN_MODE, val); 1930 1931 /* Poll for buffer manager start indication */ 1932 for (i = 0; i < 2000; i++) { 1933 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1934 break; 1935 DELAY(10); 1936 } 1937 1938 if (i == 2000) { 1939 printf("%s: buffer manager failed to start\n", 1940 sc->bge_dev.dv_xname); 1941 return (ENXIO); 1942 } 1943 1944 /* Enable flow-through queues */ 1945 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1946 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1947 1948 /* Wait until queue initialization is complete */ 1949 for (i = 0; i < 2000; i++) { 1950 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1951 break; 1952 DELAY(10); 1953 } 1954 1955 if (i == 2000) { 1956 printf("%s: flow-through queue init failed\n", 1957 sc->bge_dev.dv_xname); 1958 return (ENXIO); 1959 } 1960 1961 /* 1962 * Summary of rings supported by the controller: 1963 * 1964 * Standard Receive Producer Ring 1965 * - This ring is used to feed receive buffers for "standard" 1966 * sized frames (typically 1536 bytes) to the controller. 1967 * 1968 * Jumbo Receive Producer Ring 1969 * - This ring is used to feed receive buffers for jumbo sized 1970 * frames (i.e. anything bigger than the "standard" frames) 1971 * to the controller. 1972 * 1973 * Mini Receive Producer Ring 1974 * - This ring is used to feed receive buffers for "mini" 1975 * sized frames to the controller. 1976 * - This feature required external memory for the controller 1977 * but was never used in a production system. Should always 1978 * be disabled. 1979 * 1980 * Receive Return Ring 1981 * - After the controller has placed an incoming frame into a 1982 * receive buffer that buffer is moved into a receive return 1983 * ring. The driver is then responsible to passing the 1984 * buffer up to the stack. Many versions of the controller 1985 * support multiple RR rings. 1986 * 1987 * Send Ring 1988 * - This ring is used for outgoing frames. Many versions of 1989 * the controller support multiple send rings. 1990 */ 1991 1992 /* Initialize the standard RX ring control block */ 1993 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 1994 BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); 1995 if (BGE_IS_5717_PLUS(sc)) { 1996 /* 1997 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32) 1998 * Bits 15-2 : Maximum RX frame size 1999 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled 2000 * Bit 0 : Reserved 2001 */ 2002 rcb->bge_maxlen_flags = 2003 BGE_RCB_MAXLEN_FLAGS(512, ETHER_MAX_DIX_LEN << 2); 2004 } else if (BGE_IS_5705_PLUS(sc)) { 2005 /* 2006 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32) 2007 * Bits 15-2 : Reserved (should be 0) 2008 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 2009 * Bit 0 : Reserved 2010 */ 2011 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 2012 } else { 2013 /* 2014 * Ring size is always XXX entries 2015 * Bits 31-16: Maximum RX frame size 2016 * Bits 15-2 : Reserved (should be 0) 2017 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 2018 * Bit 0 : Reserved 2019 */ 2020 rcb->bge_maxlen_flags = 2021 BGE_RCB_MAXLEN_FLAGS(ETHER_MAX_DIX_LEN, 0); 2022 } 2023 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2024 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2025 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 2026 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717; 2027 else 2028 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 2029 /* Write the standard receive producer ring control block. */ 2030 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 2031 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 2032 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 2033 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 2034 2035 /* Reset the standard receive producer ring producer index. */ 2036 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); 2037 2038 /* 2039 * Initialize the Jumbo RX ring control block 2040 * We set the 'ring disabled' bit in the flags 2041 * field until we're actually ready to start 2042 * using this ring (i.e. once we set the MTU 2043 * high enough to require it). 2044 */ 2045 if (sc->bge_flags & BGE_JUMBO_RING) { 2046 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 2047 BGE_HOSTADDR(rcb->bge_hostaddr, 2048 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); 2049 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 2050 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED); 2051 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2052 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2053 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 2054 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717; 2055 else 2056 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 2057 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 2058 rcb->bge_hostaddr.bge_addr_hi); 2059 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 2060 rcb->bge_hostaddr.bge_addr_lo); 2061 /* Program the jumbo receive producer ring RCB parameters. */ 2062 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 2063 rcb->bge_maxlen_flags); 2064 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 2065 /* Reset the jumbo receive producer ring producer index. */ 2066 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 2067 } 2068 2069 /* Disable the mini receive producer ring RCB. */ 2070 if (BGE_IS_5700_FAMILY(sc)) { 2071 /* Set up dummy disabled mini ring RCB */ 2072 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 2073 rcb->bge_maxlen_flags = 2074 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 2075 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 2076 rcb->bge_maxlen_flags); 2077 /* Reset the mini receive producer ring producer index. */ 2078 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 2079 2080 /* XXX why? */ 2081 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2082 offsetof(struct bge_ring_data, bge_info), 2083 sizeof (struct bge_gib), 2084 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2085 } 2086 2087 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */ 2088 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 2089 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 || 2090 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 || 2091 sc->bge_chipid == BGE_CHIPID_BCM5906_A2) 2092 CSR_WRITE_4(sc, BGE_ISO_PKT_TX, 2093 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2); 2094 } 2095 /* 2096 * The BD ring replenish thresholds control how often the 2097 * hardware fetches new BD's from the producer rings in host 2098 * memory. Setting the value too low on a busy system can 2099 * starve the hardware and reduce the throughput. 2100 * 2101 * Set the BD ring replenish thresholds. The recommended 2102 * values are 1/8th the number of descriptors allocated to 2103 * each ring, but since we try to avoid filling the entire 2104 * ring we set these to the minimal value of 8. This needs to 2105 * be done on several of the supported chip revisions anyway, 2106 * to work around HW bugs. 2107 */ 2108 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, 8); 2109 if (sc->bge_flags & BGE_JUMBO_RING) 2110 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 8); 2111 2112 if (BGE_IS_5717_PLUS(sc)) { 2113 CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4); 2114 CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4); 2115 } 2116 2117 /* 2118 * Disable all send rings by setting the 'ring disabled' bit 2119 * in the flags field of all the TX send ring control blocks, 2120 * located in NIC memory. 2121 */ 2122 if (BGE_IS_5700_FAMILY(sc)) { 2123 /* 5700 to 5704 had 16 send rings. */ 2124 limit = BGE_TX_RINGS_EXTSSRAM_MAX; 2125 } else if (BGE_IS_57765_PLUS(sc) || 2126 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) 2127 limit = 2; 2128 else if (BGE_IS_5717_PLUS(sc)) 2129 limit = 4; 2130 else 2131 limit = 1; 2132 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 2133 for (i = 0; i < limit; i++) { 2134 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2135 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 2136 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 2137 rcb_addr += sizeof(struct bge_rcb); 2138 } 2139 2140 /* Configure send ring RCB 0 (we use only the first ring) */ 2141 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 2142 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 2143 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 2144 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 2145 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2146 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2147 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 2148 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, BGE_SEND_RING_5717); 2149 else 2150 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 2151 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 2152 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2153 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 2154 2155 /* 2156 * Disable all receive return rings by setting the 2157 * 'ring disabled' bit in the flags field of all the receive 2158 * return ring control blocks, located in NIC memory. 2159 */ 2160 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2161 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2162 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { 2163 /* Should be 17, use 16 until we get an SRAM map. */ 2164 limit = 16; 2165 } else if (BGE_IS_5700_FAMILY(sc)) 2166 limit = BGE_RX_RINGS_MAX; 2167 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 2168 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762 || 2169 BGE_IS_57765_PLUS(sc)) 2170 limit = 4; 2171 else 2172 limit = 1; 2173 /* Disable all receive return rings */ 2174 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 2175 for (i = 0; i < limit; i++) { 2176 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 2177 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 2178 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2179 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 2180 BGE_RCB_FLAG_RING_DISABLED)); 2181 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 2182 bge_writembx(sc, BGE_MBX_RX_CONS0_LO + 2183 (i * (sizeof(u_int64_t))), 0); 2184 rcb_addr += sizeof(struct bge_rcb); 2185 } 2186 2187 /* 2188 * Set up receive return ring 0. Note that the NIC address 2189 * for RX return rings is 0x0. The return rings live entirely 2190 * within the host, so the nicaddr field in the RCB isn't used. 2191 */ 2192 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 2193 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 2194 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 2195 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 2196 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 2197 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2198 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 2199 2200 /* Set random backoff seed for TX */ 2201 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 2202 (sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] + 2203 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] + 2204 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5]) & 2205 BGE_TX_BACKOFF_SEED_MASK); 2206 2207 /* Set inter-packet gap */ 2208 val = 0x2620; 2209 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 || 2210 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) 2211 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) & 2212 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK); 2213 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val); 2214 2215 /* 2216 * Specify which ring to use for packets that don't match 2217 * any RX rules. 2218 */ 2219 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 2220 2221 /* 2222 * Configure number of RX lists. One interrupt distribution 2223 * list, sixteen active lists, one bad frames class. 2224 */ 2225 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 2226 2227 /* Initialize RX list placement stats mask. */ 2228 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007BFFFF); 2229 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 2230 2231 /* Disable host coalescing until we get it set up */ 2232 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 2233 2234 /* Poll to make sure it's shut down. */ 2235 for (i = 0; i < 2000; i++) { 2236 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 2237 break; 2238 DELAY(10); 2239 } 2240 2241 if (i == 2000) { 2242 printf("%s: host coalescing engine failed to idle\n", 2243 sc->bge_dev.dv_xname); 2244 return (ENXIO); 2245 } 2246 2247 /* Set up host coalescing defaults */ 2248 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 2249 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 2250 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 2251 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 2252 if (!(BGE_IS_5705_PLUS(sc))) { 2253 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 2254 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 2255 } 2256 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 2257 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 2258 2259 /* Set up address of statistics block */ 2260 if (!(BGE_IS_5705_PLUS(sc))) { 2261 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 2262 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi); 2263 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo); 2264 2265 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 2266 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 2267 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 2268 } 2269 2270 /* Set up address of status block */ 2271 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block)); 2272 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi); 2273 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo); 2274 2275 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 2276 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 2277 2278 /* Set up status block size. */ 2279 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 && 2280 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) { 2281 val = BGE_STATBLKSZ_FULL; 2282 bzero(&sc->bge_rdata->bge_status_block, BGE_STATUS_BLK_SZ); 2283 } else { 2284 val = BGE_STATBLKSZ_32BYTE; 2285 bzero(&sc->bge_rdata->bge_status_block, 32); 2286 } 2287 2288 /* Turn on host coalescing state machine */ 2289 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE); 2290 2291 /* Turn on RX BD completion state machine and enable attentions */ 2292 CSR_WRITE_4(sc, BGE_RBDC_MODE, 2293 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 2294 2295 /* Turn on RX list placement state machine */ 2296 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 2297 2298 /* Turn on RX list selector state machine. */ 2299 if (!(BGE_IS_5705_PLUS(sc))) 2300 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 2301 2302 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | 2303 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | 2304 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | 2305 BGE_MACMODE_FRMHDR_DMA_ENB; 2306 2307 if (sc->bge_flags & BGE_FIBER_TBI) 2308 val |= BGE_PORTMODE_TBI; 2309 else if (sc->bge_flags & BGE_FIBER_MII) 2310 val |= BGE_PORTMODE_GMII; 2311 else 2312 val |= BGE_PORTMODE_MII; 2313 2314 /* Allow APE to send/receive frames. */ 2315 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) 2316 val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; 2317 2318 /* Turn on DMA, clear stats */ 2319 CSR_WRITE_4(sc, BGE_MAC_MODE, val); 2320 DELAY(40); 2321 2322 /* Set misc. local control, enable interrupts on attentions */ 2323 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 2324 2325 #ifdef notdef 2326 /* Assert GPIO pins for PHY reset */ 2327 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 2328 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 2329 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 2330 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 2331 #endif 2332 2333 /* Turn on DMA completion state machine */ 2334 if (!(BGE_IS_5705_PLUS(sc))) 2335 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 2336 2337 val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS; 2338 2339 /* Enable host coalescing bug fix. */ 2340 if (BGE_IS_5755_PLUS(sc)) 2341 val |= BGE_WDMAMODE_STATUS_TAG_FIX; 2342 2343 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785) 2344 val |= BGE_WDMAMODE_BURST_ALL_DATA; 2345 2346 /* Turn on write DMA state machine */ 2347 CSR_WRITE_4(sc, BGE_WDMA_MODE, val); 2348 DELAY(40); 2349 2350 val = BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS; 2351 2352 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717) 2353 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS; 2354 2355 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2356 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 2357 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) 2358 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | 2359 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | 2360 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; 2361 2362 if (sc->bge_flags & BGE_PCIE) 2363 val |= BGE_RDMAMODE_FIFO_LONG_BURST; 2364 2365 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 || 2366 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2367 val |= CSR_READ_4(sc, BGE_RDMA_MODE) & 2368 BGE_RDMAMODE_H2BNC_VLAN_DET; 2369 /* 2370 * Allow multiple outstanding read requests from 2371 * non-LSO read DMA engine. 2372 */ 2373 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS; 2374 } 2375 2376 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 2377 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2378 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 2379 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780 || 2380 BGE_IS_5717_PLUS(sc) || BGE_IS_57765_PLUS(sc)) { 2381 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) 2382 rdmareg = BGE_RDMA_RSRVCTRL_REG2; 2383 else 2384 rdmareg = BGE_RDMA_RSRVCTRL; 2385 dmactl = CSR_READ_4(sc, rdmareg); 2386 /* 2387 * Adjust tx margin to prevent TX data corruption and 2388 * fix internal FIFO overflow. 2389 */ 2390 if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0 || 2391 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2392 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK | 2393 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK | 2394 BGE_RDMA_RSRVCTRL_TXMRGN_MASK); 2395 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K | 2396 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K | 2397 BGE_RDMA_RSRVCTRL_TXMRGN_320B; 2398 } 2399 /* 2400 * Enable fix for read DMA FIFO overruns. 2401 * The fix is to limit the number of RX BDs 2402 * the hardware would fetch at a time. 2403 */ 2404 CSR_WRITE_4(sc, rdmareg, dmactl | 2405 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 2406 } 2407 2408 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) { 2409 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 2410 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | 2411 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | 2412 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 2413 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { 2414 /* 2415 * Allow 4KB burst length reads for non-LSO frames. 2416 * Enable 512B burst length reads for buffer descriptors. 2417 */ 2418 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 2419 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | 2420 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 | 2421 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 2422 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 2423 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2, 2424 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2) | 2425 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | 2426 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 2427 } 2428 2429 CSR_WRITE_4(sc, BGE_RDMA_MODE, val); 2430 DELAY(40); 2431 2432 if (sc->bge_flags & BGE_RDMA_BUG) { 2433 for (i = 0; i < BGE_NUM_RDMA_CHANNELS / 2; i++) { 2434 val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4); 2435 if ((val & 0xFFFF) > ETHER_MAX_LEN) 2436 break; 2437 if (((val >> 16) & 0xFFFF) > ETHER_MAX_LEN) 2438 break; 2439 } 2440 if (i != BGE_NUM_RDMA_CHANNELS / 2) { 2441 val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL); 2442 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) 2443 val |= BGE_RDMA_TX_LENGTH_WA_5719; 2444 else 2445 val |= BGE_RDMA_TX_LENGTH_WA_5720; 2446 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val); 2447 } 2448 } 2449 2450 /* Turn on RX data completion state machine */ 2451 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 2452 2453 /* Turn on RX BD initiator state machine */ 2454 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 2455 2456 /* Turn on RX data and RX BD initiator state machine */ 2457 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 2458 2459 /* Turn on Mbuf cluster free state machine */ 2460 if (!BGE_IS_5705_PLUS(sc)) 2461 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 2462 2463 /* Turn on send BD completion state machine */ 2464 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 2465 2466 /* Turn on send data completion state machine */ 2467 val = BGE_SDCMODE_ENABLE; 2468 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 2469 val |= BGE_SDCMODE_CDELAY; 2470 CSR_WRITE_4(sc, BGE_SDC_MODE, val); 2471 2472 /* Turn on send data initiator state machine */ 2473 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 2474 2475 /* Turn on send BD initiator state machine */ 2476 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 2477 2478 /* Turn on send BD selector state machine */ 2479 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 2480 2481 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007BFFFF); 2482 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 2483 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 2484 2485 /* ack/clear link change events */ 2486 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 2487 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 2488 BGE_MACSTAT_LINK_CHANGED); 2489 2490 /* Enable PHY auto polling (for MII/GMII only) */ 2491 if (sc->bge_flags & BGE_FIBER_TBI) { 2492 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 2493 } else { 2494 if ((sc->bge_flags & BGE_CPMU_PRESENT) != 0) 2495 mimode = BGE_MIMODE_500KHZ_CONST; 2496 else 2497 mimode = BGE_MIMODE_BASE; 2498 if (BGE_IS_5700_FAMILY(sc) || 2499 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705) { 2500 mimode |= BGE_MIMODE_AUTOPOLL; 2501 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 2502 } 2503 mimode |= BGE_MIMODE_PHYADDR(sc->bge_phy_addr); 2504 CSR_WRITE_4(sc, BGE_MI_MODE, mimode); 2505 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) 2506 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 2507 BGE_EVTENB_MI_INTERRUPT); 2508 } 2509 2510 /* 2511 * Clear any pending link state attention. 2512 * Otherwise some link state change events may be lost until attention 2513 * is cleared by bge_intr() -> bge_link_upd() sequence. 2514 * It's not necessary on newer BCM chips - perhaps enabling link 2515 * state change attentions implies clearing pending attention. 2516 */ 2517 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 2518 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 2519 BGE_MACSTAT_LINK_CHANGED); 2520 2521 /* Enable link state change attentions. */ 2522 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 2523 2524 return (0); 2525 } 2526 2527 const struct bge_revision * 2528 bge_lookup_rev(u_int32_t chipid) 2529 { 2530 const struct bge_revision *br; 2531 2532 for (br = bge_revisions; br->br_name != NULL; br++) { 2533 if (br->br_chipid == chipid) 2534 return (br); 2535 } 2536 2537 for (br = bge_majorrevs; br->br_name != NULL; br++) { 2538 if (br->br_chipid == BGE_ASICREV(chipid)) 2539 return (br); 2540 } 2541 2542 return (NULL); 2543 } 2544 2545 int 2546 bge_can_use_msi(struct bge_softc *sc) 2547 { 2548 int can_use_msi = 0; 2549 2550 switch (BGE_ASICREV(sc->bge_chipid)) { 2551 case BGE_ASICREV_BCM5714_A0: 2552 case BGE_ASICREV_BCM5714: 2553 /* 2554 * Apparently, MSI doesn't work when these chips are 2555 * configured in single-port mode. 2556 */ 2557 break; 2558 case BGE_ASICREV_BCM5750: 2559 if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_AX && 2560 BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_5750_BX) 2561 can_use_msi = 1; 2562 break; 2563 default: 2564 if (BGE_IS_575X_PLUS(sc)) 2565 can_use_msi = 1; 2566 } 2567 2568 return (can_use_msi); 2569 } 2570 2571 /* 2572 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 2573 * against our list and return its name if we find a match. Note 2574 * that since the Broadcom controller contains VPD support, we 2575 * can get the device name string from the controller itself instead 2576 * of the compiled-in string. This is a little slow, but it guarantees 2577 * we'll always announce the right product name. 2578 */ 2579 int 2580 bge_probe(struct device *parent, void *match, void *aux) 2581 { 2582 return (pci_matchbyid(aux, bge_devices, nitems(bge_devices))); 2583 } 2584 2585 void 2586 bge_attach(struct device *parent, struct device *self, void *aux) 2587 { 2588 struct bge_softc *sc = (struct bge_softc *)self; 2589 struct pci_attach_args *pa = aux; 2590 pci_chipset_tag_t pc = pa->pa_pc; 2591 const struct bge_revision *br; 2592 pcireg_t pm_ctl, memtype, subid, reg; 2593 pci_intr_handle_t ih; 2594 const char *intrstr = NULL; 2595 int gotenaddr = 0; 2596 u_int32_t hwcfg = 0; 2597 u_int32_t mac_addr = 0; 2598 u_int32_t misccfg; 2599 struct ifnet *ifp; 2600 caddr_t kva; 2601 #ifdef __sparc64__ 2602 char name[32]; 2603 #endif 2604 2605 sc->bge_pa = *pa; 2606 2607 subid = pci_conf_read(pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 2608 2609 /* 2610 * Map control/status registers. 2611 */ 2612 DPRINTFN(5, ("Map control/status regs\n")); 2613 2614 DPRINTFN(5, ("pci_mapreg_map\n")); 2615 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0); 2616 if (pci_mapreg_map(pa, BGE_PCI_BAR0, memtype, 0, &sc->bge_btag, 2617 &sc->bge_bhandle, NULL, &sc->bge_bsize, 0)) { 2618 printf(": can't find mem space\n"); 2619 return; 2620 } 2621 2622 /* 2623 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) 2624 * can clobber the chip's PCI config-space power control registers, 2625 * leaving the card in D3 powersave state. 2626 * We do not have memory-mapped registers in this state, 2627 * so force device into D0 state before starting initialization. 2628 */ 2629 pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD); 2630 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3); 2631 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */ 2632 pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl); 2633 DELAY(1000); /* 27 usec is allegedly sufficient */ 2634 2635 /* 2636 * Save ASIC rev. 2637 */ 2638 sc->bge_chipid = 2639 (pci_conf_read(pc, pa->pa_tag, BGE_PCI_MISC_CTL) 2640 >> BGE_PCIMISCCTL_ASICREV_SHIFT); 2641 2642 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) { 2643 switch (PCI_PRODUCT(pa->pa_id)) { 2644 case PCI_PRODUCT_BROADCOM_BCM5717: 2645 case PCI_PRODUCT_BROADCOM_BCM5718: 2646 case PCI_PRODUCT_BROADCOM_BCM5719: 2647 case PCI_PRODUCT_BROADCOM_BCM5720: 2648 case PCI_PRODUCT_BROADCOM_BCM5725: 2649 case PCI_PRODUCT_BROADCOM_BCM5727: 2650 case PCI_PRODUCT_BROADCOM_BCM5762: 2651 case PCI_PRODUCT_BROADCOM_BCM57764: 2652 case PCI_PRODUCT_BROADCOM_BCM57767: 2653 case PCI_PRODUCT_BROADCOM_BCM57787: 2654 sc->bge_chipid = pci_conf_read(pc, pa->pa_tag, 2655 BGE_PCI_GEN2_PRODID_ASICREV); 2656 break; 2657 case PCI_PRODUCT_BROADCOM_BCM57761: 2658 case PCI_PRODUCT_BROADCOM_BCM57762: 2659 case PCI_PRODUCT_BROADCOM_BCM57765: 2660 case PCI_PRODUCT_BROADCOM_BCM57766: 2661 case PCI_PRODUCT_BROADCOM_BCM57781: 2662 case PCI_PRODUCT_BROADCOM_BCM57782: 2663 case PCI_PRODUCT_BROADCOM_BCM57785: 2664 case PCI_PRODUCT_BROADCOM_BCM57786: 2665 case PCI_PRODUCT_BROADCOM_BCM57791: 2666 case PCI_PRODUCT_BROADCOM_BCM57795: 2667 sc->bge_chipid = pci_conf_read(pc, pa->pa_tag, 2668 BGE_PCI_GEN15_PRODID_ASICREV); 2669 break; 2670 default: 2671 sc->bge_chipid = pci_conf_read(pc, pa->pa_tag, 2672 BGE_PCI_PRODID_ASICREV); 2673 break; 2674 } 2675 } 2676 2677 sc->bge_phy_addr = bge_phy_addr(sc); 2678 2679 printf(", "); 2680 br = bge_lookup_rev(sc->bge_chipid); 2681 if (br == NULL) 2682 printf("unknown ASIC (0x%x)", sc->bge_chipid); 2683 else 2684 printf("%s (0x%x)", br->br_name, sc->bge_chipid); 2685 2686 /* 2687 * PCI Express or PCI-X controller check. 2688 */ 2689 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, 2690 &sc->bge_expcap, NULL) != 0) { 2691 /* Extract supported maximum payload size. */ 2692 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, sc->bge_expcap + 2693 PCI_PCIE_DCAP); 2694 sc->bge_mps = 128 << (reg & 0x7); 2695 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2696 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 2697 sc->bge_expmrq = (fls(2048) - 8) << 12; 2698 else 2699 sc->bge_expmrq = (fls(4096) - 8) << 12; 2700 /* Disable PCIe Active State Power Management (ASPM). */ 2701 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, 2702 sc->bge_expcap + PCI_PCIE_LCSR); 2703 reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1); 2704 pci_conf_write(pa->pa_pc, pa->pa_tag, 2705 sc->bge_expcap + PCI_PCIE_LCSR, reg); 2706 sc->bge_flags |= BGE_PCIE; 2707 } else { 2708 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) & 2709 BGE_PCISTATE_PCI_BUSMODE) == 0) 2710 sc->bge_flags |= BGE_PCIX; 2711 } 2712 2713 /* 2714 * SEEPROM check. 2715 */ 2716 #ifdef __sparc64__ 2717 /* 2718 * Onboard interfaces on UltraSPARC systems generally don't 2719 * have a SEEPROM fitted. These interfaces, and cards that 2720 * have FCode, are named "network" by the PROM, whereas cards 2721 * without FCode show up as "ethernet". Since we don't really 2722 * need the information from the SEEPROM on cards that have 2723 * FCode it's fine to pretend they don't have one. 2724 */ 2725 if (OF_getprop(PCITAG_NODE(pa->pa_tag), "name", name, 2726 sizeof(name)) > 0 && strcmp(name, "network") == 0) 2727 sc->bge_flags |= BGE_NO_EEPROM; 2728 #endif 2729 2730 /* Save chipset family. */ 2731 switch (BGE_ASICREV(sc->bge_chipid)) { 2732 case BGE_ASICREV_BCM5762: 2733 case BGE_ASICREV_BCM57765: 2734 case BGE_ASICREV_BCM57766: 2735 sc->bge_flags |= BGE_57765_PLUS; 2736 /* FALLTHROUGH */ 2737 case BGE_ASICREV_BCM5717: 2738 case BGE_ASICREV_BCM5719: 2739 case BGE_ASICREV_BCM5720: 2740 sc->bge_flags |= BGE_5717_PLUS | BGE_5755_PLUS | BGE_575X_PLUS | 2741 BGE_5705_PLUS | BGE_JUMBO_CAPABLE | BGE_JUMBO_RING | 2742 BGE_JUMBO_FRAME; 2743 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2744 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { 2745 /* 2746 * Enable work around for DMA engine miscalculation 2747 * of TXMBUF available space. 2748 */ 2749 sc->bge_flags |= BGE_RDMA_BUG; 2750 2751 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 && 2752 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) { 2753 /* Jumbo frame on BCM5719 A0 does not work. */ 2754 sc->bge_flags &= ~(BGE_JUMBO_CAPABLE | 2755 BGE_JUMBO_RING | BGE_JUMBO_FRAME); 2756 } 2757 } 2758 break; 2759 case BGE_ASICREV_BCM5755: 2760 case BGE_ASICREV_BCM5761: 2761 case BGE_ASICREV_BCM5784: 2762 case BGE_ASICREV_BCM5785: 2763 case BGE_ASICREV_BCM5787: 2764 case BGE_ASICREV_BCM57780: 2765 sc->bge_flags |= BGE_5755_PLUS | BGE_575X_PLUS | BGE_5705_PLUS; 2766 break; 2767 case BGE_ASICREV_BCM5700: 2768 case BGE_ASICREV_BCM5701: 2769 case BGE_ASICREV_BCM5703: 2770 case BGE_ASICREV_BCM5704: 2771 sc->bge_flags |= BGE_5700_FAMILY | BGE_JUMBO_CAPABLE | BGE_JUMBO_RING; 2772 break; 2773 case BGE_ASICREV_BCM5714_A0: 2774 case BGE_ASICREV_BCM5780: 2775 case BGE_ASICREV_BCM5714: 2776 sc->bge_flags |= BGE_5714_FAMILY | BGE_JUMBO_CAPABLE | BGE_JUMBO_STD; 2777 /* FALLTHROUGH */ 2778 case BGE_ASICREV_BCM5750: 2779 case BGE_ASICREV_BCM5752: 2780 case BGE_ASICREV_BCM5906: 2781 sc->bge_flags |= BGE_575X_PLUS; 2782 /* FALLTHROUGH */ 2783 case BGE_ASICREV_BCM5705: 2784 sc->bge_flags |= BGE_5705_PLUS; 2785 break; 2786 } 2787 2788 if (sc->bge_flags & BGE_JUMBO_STD) 2789 sc->bge_rx_std_len = BGE_JLEN; 2790 else 2791 sc->bge_rx_std_len = MCLBYTES; 2792 2793 /* 2794 * When using the BCM5701 in PCI-X mode, data corruption has 2795 * been observed in the first few bytes of some received packets. 2796 * Aligning the packet buffer in memory eliminates the corruption. 2797 * Unfortunately, this misaligns the packet payloads. On platforms 2798 * which do not support unaligned accesses, we will realign the 2799 * payloads by copying the received packets. 2800 */ 2801 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 && 2802 sc->bge_flags & BGE_PCIX) 2803 sc->bge_flags |= BGE_RX_ALIGNBUG; 2804 2805 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 2806 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) && 2807 PCI_VENDOR(subid) == DELL_VENDORID) 2808 sc->bge_phy_flags |= BGE_PHY_NO_3LED; 2809 2810 misccfg = CSR_READ_4(sc, BGE_MISC_CFG); 2811 misccfg &= BGE_MISCCFG_BOARD_ID_MASK; 2812 2813 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 2814 (misccfg == BGE_MISCCFG_BOARD_ID_5788 || 2815 misccfg == BGE_MISCCFG_BOARD_ID_5788M)) 2816 sc->bge_flags |= BGE_IS_5788; 2817 2818 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 && 2819 (misccfg == 0x4000 || misccfg == 0x8000)) || 2820 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 2821 PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && 2822 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 || 2823 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 || 2824 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) || 2825 (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && 2826 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F || 2827 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F || 2828 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) || 2829 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 || 2830 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57791 || 2831 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57795 || 2832 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 2833 sc->bge_phy_flags |= BGE_PHY_10_100_ONLY; 2834 2835 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 2836 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 2837 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && 2838 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) || 2839 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 2840 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED; 2841 2842 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || 2843 sc->bge_chipid == BGE_CHIPID_BCM5701_B0) 2844 sc->bge_phy_flags |= BGE_PHY_CRC_BUG; 2845 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX || 2846 BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX) 2847 sc->bge_phy_flags |= BGE_PHY_ADC_BUG; 2848 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) 2849 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG; 2850 2851 if ((BGE_IS_5705_PLUS(sc)) && 2852 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 && 2853 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 && 2854 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780 && 2855 !BGE_IS_5717_PLUS(sc)) { 2856 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 2857 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 2858 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2859 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) { 2860 if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 && 2861 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756) 2862 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG; 2863 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M) 2864 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM; 2865 } else 2866 sc->bge_phy_flags |= BGE_PHY_BER_BUG; 2867 } 2868 2869 /* Identify chips with APE processor. */ 2870 switch (BGE_ASICREV(sc->bge_chipid)) { 2871 case BGE_ASICREV_BCM5717: 2872 case BGE_ASICREV_BCM5719: 2873 case BGE_ASICREV_BCM5720: 2874 case BGE_ASICREV_BCM5761: 2875 case BGE_ASICREV_BCM5762: 2876 sc->bge_flags |= BGE_APE; 2877 break; 2878 } 2879 2880 /* Chips with APE need BAR2 access for APE registers/memory. */ 2881 if ((sc->bge_flags & BGE_APE) != 0) { 2882 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2); 2883 if (pci_mapreg_map(pa, BGE_PCI_BAR2, memtype, 0, 2884 &sc->bge_apetag, &sc->bge_apehandle, NULL, 2885 &sc->bge_apesize, 0)) { 2886 printf(": couldn't map BAR2 memory\n"); 2887 goto fail_1; 2888 } 2889 2890 /* Enable APE register/memory access by host driver. */ 2891 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE); 2892 reg |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | 2893 BGE_PCISTATE_ALLOW_APE_SHMEM_WR | 2894 BGE_PCISTATE_ALLOW_APE_PSPACE_WR; 2895 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE, reg); 2896 2897 bge_ape_lock_init(sc); 2898 bge_ape_read_fw_ver(sc); 2899 } 2900 2901 /* Identify the chips that use an CPMU. */ 2902 if (BGE_IS_5717_PLUS(sc) || 2903 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2904 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 2905 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 2906 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) 2907 sc->bge_flags |= BGE_CPMU_PRESENT; 2908 2909 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSI, 2910 &sc->bge_msicap, NULL)) { 2911 if (bge_can_use_msi(sc) == 0) 2912 pa->pa_flags &= ~PCI_FLAGS_MSI_ENABLED; 2913 } 2914 2915 DPRINTFN(5, ("pci_intr_map\n")); 2916 if (pci_intr_map_msi(pa, &ih) == 0) 2917 sc->bge_flags |= BGE_MSI; 2918 else if (pci_intr_map(pa, &ih)) { 2919 printf(": couldn't map interrupt\n"); 2920 goto fail_1; 2921 } 2922 2923 /* 2924 * All controllers except BCM5700 supports tagged status but 2925 * we use tagged status only for MSI case on BCM5717. Otherwise 2926 * MSI on BCM5717 does not work. 2927 */ 2928 if (BGE_IS_5717_PLUS(sc) && sc->bge_flags & BGE_MSI) 2929 sc->bge_flags |= BGE_TAGGED_STATUS; 2930 2931 DPRINTFN(5, ("pci_intr_string\n")); 2932 intrstr = pci_intr_string(pc, ih); 2933 2934 /* Try to reset the chip. */ 2935 DPRINTFN(5, ("bge_reset\n")); 2936 bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN); 2937 bge_reset(sc); 2938 2939 bge_sig_legacy(sc, BGE_RESET_SHUTDOWN); 2940 bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN); 2941 2942 bge_chipinit(sc); 2943 2944 #if defined(__sparc64__) || defined(__HAVE_FDT) 2945 if (!gotenaddr && PCITAG_NODE(pa->pa_tag)) { 2946 if (OF_getprop(PCITAG_NODE(pa->pa_tag), "local-mac-address", 2947 sc->arpcom.ac_enaddr, ETHER_ADDR_LEN) == ETHER_ADDR_LEN) 2948 gotenaddr = 1; 2949 } 2950 #endif 2951 2952 /* 2953 * Get station address from the EEPROM. 2954 */ 2955 if (!gotenaddr) { 2956 mac_addr = bge_readmem_ind(sc, 0x0c14); 2957 if ((mac_addr >> 16) == 0x484b) { 2958 sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8); 2959 sc->arpcom.ac_enaddr[1] = (u_char)mac_addr; 2960 mac_addr = bge_readmem_ind(sc, 0x0c18); 2961 sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24); 2962 sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16); 2963 sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8); 2964 sc->arpcom.ac_enaddr[5] = (u_char)mac_addr; 2965 gotenaddr = 1; 2966 } 2967 } 2968 if (!gotenaddr) { 2969 int mac_offset = BGE_EE_MAC_OFFSET; 2970 2971 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 2972 mac_offset = BGE_EE_MAC_OFFSET_5906; 2973 2974 if (bge_read_nvram(sc, (caddr_t)&sc->arpcom.ac_enaddr, 2975 mac_offset + 2, ETHER_ADDR_LEN) == 0) 2976 gotenaddr = 1; 2977 } 2978 if (!gotenaddr && (!(sc->bge_flags & BGE_NO_EEPROM))) { 2979 if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 2980 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN) == 0) 2981 gotenaddr = 1; 2982 } 2983 2984 #ifdef __sparc64__ 2985 if (!gotenaddr) { 2986 extern void myetheraddr(u_char *); 2987 2988 myetheraddr(sc->arpcom.ac_enaddr); 2989 gotenaddr = 1; 2990 } 2991 #endif 2992 2993 if (!gotenaddr) { 2994 printf(": failed to read station address\n"); 2995 goto fail_2; 2996 } 2997 2998 /* Allocate the general information block and ring buffers. */ 2999 sc->bge_dmatag = pa->pa_dmat; 3000 DPRINTFN(5, ("bus_dmamem_alloc\n")); 3001 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 3002 PAGE_SIZE, 0, &sc->bge_ring_seg, 1, &sc->bge_ring_nseg, 3003 BUS_DMA_NOWAIT)) { 3004 printf(": can't alloc rx buffers\n"); 3005 goto fail_2; 3006 } 3007 DPRINTFN(5, ("bus_dmamem_map\n")); 3008 if (bus_dmamem_map(sc->bge_dmatag, &sc->bge_ring_seg, 3009 sc->bge_ring_nseg, sizeof(struct bge_ring_data), &kva, 3010 BUS_DMA_NOWAIT)) { 3011 printf(": can't map dma buffers (%lu bytes)\n", 3012 sizeof(struct bge_ring_data)); 3013 goto fail_3; 3014 } 3015 DPRINTFN(5, ("bus_dmamap_create\n")); 3016 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 3017 sizeof(struct bge_ring_data), 0, 3018 BUS_DMA_NOWAIT, &sc->bge_ring_map)) { 3019 printf(": can't create dma map\n"); 3020 goto fail_4; 3021 } 3022 DPRINTFN(5, ("bus_dmamap_load\n")); 3023 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 3024 sizeof(struct bge_ring_data), NULL, 3025 BUS_DMA_NOWAIT)) { 3026 goto fail_5; 3027 } 3028 3029 DPRINTFN(5, ("bzero\n")); 3030 sc->bge_rdata = (struct bge_ring_data *)kva; 3031 3032 bzero(sc->bge_rdata, sizeof(struct bge_ring_data)); 3033 3034 /* Set default tuneable values. */ 3035 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 3036 sc->bge_rx_coal_ticks = 150; 3037 sc->bge_rx_max_coal_bds = 64; 3038 sc->bge_tx_coal_ticks = 300; 3039 sc->bge_tx_max_coal_bds = 400; 3040 3041 /* 5705 limits RX return ring to 512 entries. */ 3042 if (BGE_IS_5700_FAMILY(sc) || BGE_IS_5717_PLUS(sc)) 3043 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 3044 else 3045 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 3046 3047 mtx_init(&sc->bge_kstat_mtx, IPL_SOFTCLOCK); 3048 #if NKSTAT > 0 3049 if (BGE_IS_5705_PLUS(sc)) 3050 bge_kstat_attach(sc); 3051 #endif 3052 3053 /* Set up ifnet structure */ 3054 ifp = &sc->arpcom.ac_if; 3055 ifp->if_softc = sc; 3056 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 3057 ifp->if_xflags = IFXF_MPSAFE; 3058 ifp->if_ioctl = bge_ioctl; 3059 ifp->if_qstart = bge_start; 3060 ifp->if_watchdog = bge_watchdog; 3061 ifq_init_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1); 3062 3063 DPRINTFN(5, ("bcopy\n")); 3064 bcopy(sc->bge_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 3065 3066 ifp->if_capabilities = IFCAP_VLAN_MTU; 3067 3068 #if NVLAN > 0 3069 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 3070 #endif 3071 3072 /* 3073 * 5700 B0 chips do not support checksumming correctly due 3074 * to hardware bugs. 3075 * 3076 * It seems all controllers have a bug that can generate UDP 3077 * datagrams with a checksum value 0 when TX UDP checksum 3078 * offloading is enabled. Generating UDP checksum value 0 is 3079 * a violation of RFC 768. 3080 */ 3081 if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) 3082 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4; 3083 3084 if (BGE_IS_JUMBO_CAPABLE(sc)) 3085 ifp->if_hardmtu = BGE_JUMBO_MTU; 3086 3087 /* 3088 * Do MII setup. 3089 */ 3090 DPRINTFN(5, ("mii setup\n")); 3091 sc->bge_mii.mii_ifp = ifp; 3092 sc->bge_mii.mii_readreg = bge_miibus_readreg; 3093 sc->bge_mii.mii_writereg = bge_miibus_writereg; 3094 sc->bge_mii.mii_statchg = bge_miibus_statchg; 3095 3096 /* 3097 * Figure out what sort of media we have by checking the hardware 3098 * config word in the first 32K of internal NIC memory, or fall back to 3099 * examining the EEPROM if necessary. Note: on some BCM5700 cards, 3100 * this value seems to be unset. If that's the case, we have to rely on 3101 * identifying the NIC by its PCI subsystem ID, as we do below for the 3102 * SysKonnect SK-9D41. 3103 */ 3104 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) 3105 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 3106 else if (!(sc->bge_flags & BGE_NO_EEPROM)) { 3107 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 3108 sizeof(hwcfg))) { 3109 printf(": failed to read media type\n"); 3110 goto fail_6; 3111 } 3112 hwcfg = ntohl(hwcfg); 3113 } 3114 3115 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 3116 if (PCI_PRODUCT(subid) == SK_SUBSYSID_9D41 || 3117 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) { 3118 if (BGE_IS_5700_FAMILY(sc)) 3119 sc->bge_flags |= BGE_FIBER_TBI; 3120 else 3121 sc->bge_flags |= BGE_FIBER_MII; 3122 } 3123 3124 /* Take advantage of single-shot MSI. */ 3125 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_MSI) 3126 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) & 3127 ~BGE_MSIMODE_ONE_SHOT_DISABLE); 3128 3129 /* Hookup IRQ last. */ 3130 DPRINTFN(5, ("pci_intr_establish\n")); 3131 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE, 3132 bge_intr, sc, sc->bge_dev.dv_xname); 3133 if (sc->bge_intrhand == NULL) { 3134 printf(": couldn't establish interrupt"); 3135 if (intrstr != NULL) 3136 printf(" at %s", intrstr); 3137 printf("\n"); 3138 goto fail_6; 3139 } 3140 3141 /* 3142 * A Broadcom chip was detected. Inform the world. 3143 */ 3144 printf(": %s, address %s\n", intrstr, 3145 ether_sprintf(sc->arpcom.ac_enaddr)); 3146 3147 if (sc->bge_flags & BGE_FIBER_TBI) { 3148 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 3149 bge_ifmedia_sts); 3150 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 3151 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 3152 0, NULL); 3153 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 3154 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 3155 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 3156 } else { 3157 int mii_flags; 3158 3159 /* 3160 * Do transceiver setup. 3161 */ 3162 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd, 3163 bge_ifmedia_sts); 3164 mii_flags = MIIF_DOPAUSE; 3165 if (sc->bge_flags & BGE_FIBER_MII) 3166 mii_flags |= MIIF_HAVEFIBER; 3167 mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff, 3168 sc->bge_phy_addr, MII_OFFSET_ANY, mii_flags); 3169 3170 if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) { 3171 printf("%s: no PHY found!\n", sc->bge_dev.dv_xname); 3172 ifmedia_add(&sc->bge_mii.mii_media, 3173 IFM_ETHER|IFM_MANUAL, 0, NULL); 3174 ifmedia_set(&sc->bge_mii.mii_media, 3175 IFM_ETHER|IFM_MANUAL); 3176 } else 3177 ifmedia_set(&sc->bge_mii.mii_media, 3178 IFM_ETHER|IFM_AUTO); 3179 } 3180 3181 /* 3182 * Call MI attach routine. 3183 */ 3184 if_attach(ifp); 3185 ether_ifattach(ifp); 3186 3187 timeout_set(&sc->bge_timeout, bge_tick, sc); 3188 timeout_set(&sc->bge_rxtimeout, bge_rxtick, sc); 3189 timeout_set(&sc->bge_rxtimeout_jumbo, bge_rxtick_jumbo, sc); 3190 return; 3191 3192 fail_6: 3193 bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map); 3194 3195 fail_5: 3196 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 3197 3198 fail_4: 3199 bus_dmamem_unmap(sc->bge_dmatag, (caddr_t)sc->bge_rdata, 3200 sizeof(struct bge_ring_data)); 3201 3202 fail_3: 3203 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, sc->bge_ring_nseg); 3204 3205 fail_2: 3206 if ((sc->bge_flags & BGE_APE) != 0) 3207 bus_space_unmap(sc->bge_apetag, sc->bge_apehandle, 3208 sc->bge_apesize); 3209 3210 fail_1: 3211 bus_space_unmap(sc->bge_btag, sc->bge_bhandle, sc->bge_bsize); 3212 } 3213 3214 int 3215 bge_detach(struct device *self, int flags) 3216 { 3217 struct bge_softc *sc = (struct bge_softc *)self; 3218 struct ifnet *ifp = &sc->arpcom.ac_if; 3219 3220 bge_stop(sc, 1); 3221 3222 if (sc->bge_intrhand) 3223 pci_intr_disestablish(sc->bge_pa.pa_pc, sc->bge_intrhand); 3224 3225 /* Detach any PHYs we might have. */ 3226 if (LIST_FIRST(&sc->bge_mii.mii_phys) != NULL) 3227 mii_detach(&sc->bge_mii, MII_PHY_ANY, MII_OFFSET_ANY); 3228 3229 /* Delete any remaining media. */ 3230 ifmedia_delete_instance(&sc->bge_mii.mii_media, IFM_INST_ANY); 3231 3232 ether_ifdetach(ifp); 3233 if_detach(ifp); 3234 3235 bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map); 3236 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 3237 bus_dmamem_unmap(sc->bge_dmatag, (caddr_t)sc->bge_rdata, 3238 sizeof(struct bge_ring_data)); 3239 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, sc->bge_ring_nseg); 3240 3241 if ((sc->bge_flags & BGE_APE) != 0) 3242 bus_space_unmap(sc->bge_apetag, sc->bge_apehandle, 3243 sc->bge_apesize); 3244 3245 bus_space_unmap(sc->bge_btag, sc->bge_bhandle, sc->bge_bsize); 3246 return (0); 3247 } 3248 3249 int 3250 bge_activate(struct device *self, int act) 3251 { 3252 struct bge_softc *sc = (struct bge_softc *)self; 3253 struct ifnet *ifp = &sc->arpcom.ac_if; 3254 3255 switch (act) { 3256 case DVACT_SUSPEND: 3257 if (ifp->if_flags & IFF_RUNNING) 3258 bge_stop(sc, 0); 3259 break; 3260 case DVACT_RESUME: 3261 if (ifp->if_flags & IFF_UP) 3262 bge_init(sc); 3263 break; 3264 } 3265 return (0); 3266 } 3267 3268 void 3269 bge_reset(struct bge_softc *sc) 3270 { 3271 struct pci_attach_args *pa = &sc->bge_pa; 3272 pcireg_t cachesize, command, devctl; 3273 u_int32_t reset, mac_mode, mac_mode_mask, val; 3274 void (*write_op)(struct bge_softc *, int, int); 3275 int i; 3276 3277 mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE; 3278 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) 3279 mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; 3280 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask; 3281 3282 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) && 3283 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906) { 3284 if (sc->bge_flags & BGE_PCIE) 3285 write_op = bge_writembx; 3286 else 3287 write_op = bge_writemem_ind; 3288 } else 3289 write_op = bge_writereg_ind; 3290 3291 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5700 && 3292 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5701 && 3293 !(sc->bge_flags & BGE_NO_EEPROM)) { 3294 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 3295 for (i = 0; i < 8000; i++) { 3296 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & 3297 BGE_NVRAMSWARB_GNT1) 3298 break; 3299 DELAY(20); 3300 } 3301 if (i == 8000) 3302 printf("%s: nvram lock timed out\n", 3303 sc->bge_dev.dv_xname); 3304 } 3305 /* Take APE lock when performing reset. */ 3306 bge_ape_lock(sc, BGE_APE_LOCK_GRC); 3307 3308 /* Save some important PCI state. */ 3309 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ); 3310 command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD); 3311 3312 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 3313 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 3314 BGE_PCIMISCCTL_ENDIAN_WORDSWAP | BGE_PCIMISCCTL_PCISTATE_RW); 3315 3316 /* Disable fastboot on controllers that support it. */ 3317 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || 3318 BGE_IS_5755_PLUS(sc)) 3319 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0); 3320 3321 /* 3322 * Write the magic number to SRAM at offset 0xB50. 3323 * When firmware finishes its initialization it will 3324 * write ~BGE_SRAM_FW_MB_MAGIC to the same location. 3325 */ 3326 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 3327 3328 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ; 3329 3330 if (sc->bge_flags & BGE_PCIE) { 3331 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 && 3332 !BGE_IS_5717_PLUS(sc)) { 3333 if (CSR_READ_4(sc, 0x7e2c) == 0x60) { 3334 /* PCI Express 1.0 system */ 3335 CSR_WRITE_4(sc, 0x7e2c, 0x20); 3336 } 3337 } 3338 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 3339 /* 3340 * Prevent PCI Express link training 3341 * during global reset. 3342 */ 3343 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29)); 3344 reset |= (1<<29); 3345 } 3346 } 3347 3348 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 3349 val = CSR_READ_4(sc, BGE_VCPU_STATUS); 3350 CSR_WRITE_4(sc, BGE_VCPU_STATUS, 3351 val | BGE_VCPU_STATUS_DRV_RESET); 3352 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); 3353 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, 3354 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU); 3355 3356 sc->bge_flags |= BGE_NO_EEPROM; 3357 } 3358 3359 /* 3360 * Set GPHY Power Down Override to leave GPHY 3361 * powered up in D0 uninitialized. 3362 */ 3363 if (BGE_IS_5705_PLUS(sc) && 3364 (sc->bge_flags & BGE_CPMU_PRESENT) == 0) 3365 reset |= BGE_MISCCFG_KEEP_GPHY_POWER; 3366 3367 /* Issue global reset */ 3368 write_op(sc, BGE_MISC_CFG, reset); 3369 3370 if (sc->bge_flags & BGE_PCIE) 3371 DELAY(100 * 1000); 3372 else 3373 DELAY(1000); 3374 3375 if (sc->bge_flags & BGE_PCIE) { 3376 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 3377 pcireg_t v; 3378 3379 DELAY(500000); /* wait for link training to complete */ 3380 v = pci_conf_read(pa->pa_pc, pa->pa_tag, 0xc4); 3381 pci_conf_write(pa->pa_pc, pa->pa_tag, 0xc4, v | (1<<15)); 3382 } 3383 3384 devctl = pci_conf_read(pa->pa_pc, pa->pa_tag, sc->bge_expcap + 3385 PCI_PCIE_DCSR); 3386 /* Clear enable no snoop and disable relaxed ordering. */ 3387 devctl &= ~(PCI_PCIE_DCSR_ERO | PCI_PCIE_DCSR_ENS); 3388 /* Set PCI Express max payload size. */ 3389 devctl = (devctl & ~PCI_PCIE_DCSR_MPS) | sc->bge_expmrq; 3390 /* Clear error status. */ 3391 devctl |= PCI_PCIE_DCSR_CEE | PCI_PCIE_DCSR_NFE | 3392 PCI_PCIE_DCSR_FEE | PCI_PCIE_DCSR_URE; 3393 pci_conf_write(pa->pa_pc, pa->pa_tag, sc->bge_expcap + 3394 PCI_PCIE_DCSR, devctl); 3395 } 3396 3397 /* Reset some of the PCI state that got zapped by reset */ 3398 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL, 3399 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 3400 BGE_PCIMISCCTL_ENDIAN_WORDSWAP | BGE_PCIMISCCTL_PCISTATE_RW); 3401 val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE; 3402 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 && 3403 (sc->bge_flags & BGE_PCIX) != 0) 3404 val |= BGE_PCISTATE_RETRY_SAME_DMA; 3405 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) 3406 val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | 3407 BGE_PCISTATE_ALLOW_APE_SHMEM_WR | 3408 BGE_PCISTATE_ALLOW_APE_PSPACE_WR; 3409 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE, val); 3410 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize); 3411 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command); 3412 3413 /* Re-enable MSI, if necessary, and enable memory arbiter. */ 3414 if (BGE_IS_5714_FAMILY(sc)) { 3415 /* This chip disables MSI on reset. */ 3416 if (sc->bge_flags & BGE_MSI) { 3417 val = pci_conf_read(pa->pa_pc, pa->pa_tag, 3418 sc->bge_msicap + PCI_MSI_MC); 3419 pci_conf_write(pa->pa_pc, pa->pa_tag, 3420 sc->bge_msicap + PCI_MSI_MC, 3421 val | PCI_MSI_MC_MSIE); 3422 val = CSR_READ_4(sc, BGE_MSI_MODE); 3423 CSR_WRITE_4(sc, BGE_MSI_MODE, 3424 val | BGE_MSIMODE_ENABLE); 3425 } 3426 val = CSR_READ_4(sc, BGE_MARB_MODE); 3427 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); 3428 } else 3429 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 3430 3431 /* Fix up byte swapping */ 3432 CSR_WRITE_4(sc, BGE_MODE_CTL, bge_dma_swap_options(sc)); 3433 3434 val = CSR_READ_4(sc, BGE_MAC_MODE); 3435 val = (val & ~mac_mode_mask) | mac_mode; 3436 CSR_WRITE_4(sc, BGE_MAC_MODE, val); 3437 DELAY(40); 3438 3439 bge_ape_unlock(sc, BGE_APE_LOCK_GRC); 3440 3441 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 3442 for (i = 0; i < BGE_TIMEOUT; i++) { 3443 val = CSR_READ_4(sc, BGE_VCPU_STATUS); 3444 if (val & BGE_VCPU_STATUS_INIT_DONE) 3445 break; 3446 DELAY(100); 3447 } 3448 3449 if (i >= BGE_TIMEOUT) 3450 printf("%s: reset timed out\n", sc->bge_dev.dv_xname); 3451 } else { 3452 /* 3453 * Poll until we see 1's complement of the magic number. 3454 * This indicates that the firmware initialization 3455 * is complete. We expect this to fail if no SEEPROM 3456 * is fitted. 3457 */ 3458 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 3459 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 3460 if (val == ~BGE_MAGIC_NUMBER) 3461 break; 3462 DELAY(10); 3463 } 3464 3465 if ((i >= BGE_TIMEOUT * 10) && 3466 (!(sc->bge_flags & BGE_NO_EEPROM))) 3467 printf("%s: firmware handshake timed out\n", 3468 sc->bge_dev.dv_xname); 3469 /* BCM57765 A0 needs additional time before accessing. */ 3470 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) 3471 DELAY(10 * 1000); /* XXX */ 3472 } 3473 3474 /* 3475 * The 5704 in TBI mode apparently needs some special 3476 * adjustment to ensure the SERDES drive level is set 3477 * to 1.2V. 3478 */ 3479 if (sc->bge_flags & BGE_FIBER_TBI && 3480 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 3481 val = CSR_READ_4(sc, BGE_SERDES_CFG); 3482 val = (val & ~0xFFF) | 0x880; 3483 CSR_WRITE_4(sc, BGE_SERDES_CFG, val); 3484 } 3485 3486 if (sc->bge_flags & BGE_PCIE && 3487 !BGE_IS_5717_PLUS(sc) && 3488 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 && 3489 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) { 3490 /* Enable Data FIFO protection. */ 3491 val = CSR_READ_4(sc, 0x7c00); 3492 CSR_WRITE_4(sc, 0x7c00, val | (1<<25)); 3493 } 3494 3495 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 3496 BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE, 3497 CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 3498 } 3499 3500 /* 3501 * Frame reception handling. This is called if there's a frame 3502 * on the receive return list. 3503 * 3504 * Note: we have to be able to handle two possibilities here: 3505 * 1) the frame is from the jumbo receive ring 3506 * 2) the frame is from the standard receive ring 3507 */ 3508 3509 void 3510 bge_rxeof(struct bge_softc *sc) 3511 { 3512 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 3513 struct ifnet *ifp; 3514 uint16_t rx_prod, rx_cons; 3515 int stdcnt = 0, jumbocnt = 0; 3516 bus_dmamap_t dmamap; 3517 bus_addr_t offset, toff; 3518 bus_size_t tlen; 3519 int tosync; 3520 int livelocked; 3521 3522 rx_cons = sc->bge_rx_saved_considx; 3523 rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx; 3524 3525 /* Nothing to do */ 3526 if (rx_cons == rx_prod) 3527 return; 3528 3529 ifp = &sc->arpcom.ac_if; 3530 3531 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3532 offsetof(struct bge_ring_data, bge_status_block), 3533 sizeof (struct bge_status_block), 3534 BUS_DMASYNC_POSTREAD); 3535 3536 offset = offsetof(struct bge_ring_data, bge_rx_return_ring); 3537 tosync = rx_prod - rx_cons; 3538 3539 toff = offset + (rx_cons * sizeof (struct bge_rx_bd)); 3540 3541 if (tosync < 0) { 3542 tlen = (sc->bge_return_ring_cnt - rx_cons) * 3543 sizeof (struct bge_rx_bd); 3544 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3545 toff, tlen, BUS_DMASYNC_POSTREAD); 3546 tosync = -tosync; 3547 } 3548 3549 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3550 offset, tosync * sizeof (struct bge_rx_bd), 3551 BUS_DMASYNC_POSTREAD); 3552 3553 while (rx_cons != rx_prod) { 3554 struct bge_rx_bd *cur_rx; 3555 u_int32_t rxidx; 3556 struct mbuf *m = NULL; 3557 3558 cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons]; 3559 3560 rxidx = cur_rx->bge_idx; 3561 BGE_INC(rx_cons, sc->bge_return_ring_cnt); 3562 3563 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 3564 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 3565 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 3566 3567 jumbocnt++; 3568 3569 dmamap = sc->bge_cdata.bge_rx_jumbo_map[rxidx]; 3570 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, 3571 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 3572 bus_dmamap_unload(sc->bge_dmatag, dmamap); 3573 3574 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3575 m_freem(m); 3576 continue; 3577 } 3578 } else { 3579 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 3580 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 3581 3582 stdcnt++; 3583 3584 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; 3585 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, 3586 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 3587 bus_dmamap_unload(sc->bge_dmatag, dmamap); 3588 3589 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3590 m_freem(m); 3591 continue; 3592 } 3593 } 3594 3595 #ifdef __STRICT_ALIGNMENT 3596 /* 3597 * The i386 allows unaligned accesses, but for other 3598 * platforms we must make sure the payload is aligned. 3599 */ 3600 if (sc->bge_flags & BGE_RX_ALIGNBUG) { 3601 bcopy(m->m_data, m->m_data + ETHER_ALIGN, 3602 cur_rx->bge_len); 3603 m->m_data += ETHER_ALIGN; 3604 } 3605 #endif 3606 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 3607 3608 bge_rxcsum(sc, cur_rx, m); 3609 3610 #if NVLAN > 0 3611 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING && 3612 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 3613 m->m_pkthdr.ether_vtag = cur_rx->bge_vlan_tag; 3614 m->m_flags |= M_VLANTAG; 3615 } 3616 #endif 3617 3618 ml_enqueue(&ml, m); 3619 } 3620 3621 sc->bge_rx_saved_considx = rx_cons; 3622 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 3623 3624 livelocked = ifiq_input(&ifp->if_rcv, &ml); 3625 if (stdcnt) { 3626 if_rxr_put(&sc->bge_std_ring, stdcnt); 3627 if (livelocked) 3628 if_rxr_livelocked(&sc->bge_std_ring); 3629 bge_fill_rx_ring_std(sc); 3630 } 3631 if (jumbocnt) { 3632 if_rxr_put(&sc->bge_jumbo_ring, jumbocnt); 3633 if (livelocked) 3634 if_rxr_livelocked(&sc->bge_jumbo_ring); 3635 bge_fill_rx_ring_jumbo(sc); 3636 } 3637 } 3638 3639 void 3640 bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m) 3641 { 3642 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) { 3643 /* 3644 * 5700 B0 chips do not support checksumming correctly due 3645 * to hardware bugs. 3646 */ 3647 return; 3648 } else if (BGE_IS_5717_PLUS(sc)) { 3649 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) { 3650 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM && 3651 (cur_rx->bge_error_flag & 3652 BGE_RXERRFLAG_IP_CSUM_NOK) == 0) 3653 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 3654 3655 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 3656 m->m_pkthdr.csum_flags |= 3657 M_TCP_CSUM_IN_OK|M_UDP_CSUM_IN_OK; 3658 } 3659 } 3660 } else { 3661 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM && 3662 cur_rx->bge_ip_csum == 0xFFFF) 3663 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 3664 3665 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 3666 m->m_pkthdr.len >= ETHER_MIN_NOPAD && 3667 cur_rx->bge_tcp_udp_csum == 0xFFFF) { 3668 m->m_pkthdr.csum_flags |= 3669 M_TCP_CSUM_IN_OK|M_UDP_CSUM_IN_OK; 3670 } 3671 } 3672 } 3673 3674 void 3675 bge_txeof(struct bge_softc *sc) 3676 { 3677 struct bge_tx_bd *cur_tx = NULL; 3678 struct ifnet *ifp; 3679 bus_dmamap_t dmamap; 3680 bus_addr_t offset, toff; 3681 bus_size_t tlen; 3682 int tosync, freed, txcnt; 3683 u_int32_t cons, newcons; 3684 struct mbuf *m; 3685 3686 /* Nothing to do */ 3687 cons = sc->bge_tx_saved_considx; 3688 newcons = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx; 3689 if (cons == newcons) 3690 return; 3691 3692 ifp = &sc->arpcom.ac_if; 3693 3694 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3695 offsetof(struct bge_ring_data, bge_status_block), 3696 sizeof (struct bge_status_block), 3697 BUS_DMASYNC_POSTREAD); 3698 3699 offset = offsetof(struct bge_ring_data, bge_tx_ring); 3700 tosync = newcons - cons; 3701 3702 toff = offset + (cons * sizeof (struct bge_tx_bd)); 3703 3704 if (tosync < 0) { 3705 tlen = (BGE_TX_RING_CNT - cons) * sizeof (struct bge_tx_bd); 3706 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3707 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3708 tosync = -tosync; 3709 } 3710 3711 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3712 offset, tosync * sizeof (struct bge_tx_bd), 3713 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3714 3715 /* 3716 * Go through our tx ring and free mbufs for those 3717 * frames that have been sent. 3718 */ 3719 freed = 0; 3720 while (cons != newcons) { 3721 cur_tx = &sc->bge_rdata->bge_tx_ring[cons]; 3722 m = sc->bge_cdata.bge_tx_chain[cons]; 3723 if (m != NULL) { 3724 dmamap = sc->bge_cdata.bge_tx_map[cons]; 3725 3726 sc->bge_cdata.bge_tx_chain[cons] = NULL; 3727 sc->bge_cdata.bge_tx_map[cons] = NULL; 3728 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, 3729 dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 3730 bus_dmamap_unload(sc->bge_dmatag, dmamap); 3731 3732 m_freem(m); 3733 } 3734 freed++; 3735 BGE_INC(cons, BGE_TX_RING_CNT); 3736 } 3737 3738 txcnt = atomic_sub_int_nv(&sc->bge_txcnt, freed); 3739 3740 sc->bge_tx_saved_considx = cons; 3741 3742 if (ifq_is_oactive(&ifp->if_snd)) 3743 ifq_restart(&ifp->if_snd); 3744 else if (txcnt == 0) 3745 ifp->if_timer = 0; 3746 } 3747 3748 int 3749 bge_intr(void *xsc) 3750 { 3751 struct bge_softc *sc; 3752 struct ifnet *ifp; 3753 u_int32_t statusword, statustag; 3754 3755 sc = xsc; 3756 ifp = &sc->arpcom.ac_if; 3757 3758 /* read status word from status block */ 3759 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3760 offsetof(struct bge_ring_data, bge_status_block), 3761 sizeof (struct bge_status_block), 3762 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3763 3764 statusword = sc->bge_rdata->bge_status_block.bge_status; 3765 statustag = sc->bge_rdata->bge_status_block.bge_status_tag << 24; 3766 3767 if (sc->bge_flags & BGE_TAGGED_STATUS) { 3768 if (sc->bge_lasttag == statustag && 3769 (CSR_READ_4(sc, BGE_PCI_PCISTATE) & 3770 BGE_PCISTATE_INTR_NOT_ACTIVE)) 3771 return (0); 3772 sc->bge_lasttag = statustag; 3773 } else { 3774 if (!(statusword & BGE_STATFLAG_UPDATED) && 3775 (CSR_READ_4(sc, BGE_PCI_PCISTATE) & 3776 BGE_PCISTATE_INTR_NOT_ACTIVE)) 3777 return (0); 3778 /* Ack interrupt and stop others from occurring. */ 3779 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 3780 statustag = 0; 3781 } 3782 3783 /* clear status word */ 3784 sc->bge_rdata->bge_status_block.bge_status = 0; 3785 3786 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3787 offsetof(struct bge_ring_data, bge_status_block), 3788 sizeof (struct bge_status_block), 3789 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3790 3791 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 3792 statusword & BGE_STATFLAG_LINKSTATE_CHANGED || 3793 BGE_STS_BIT(sc, BGE_STS_LINK_EVT)) { 3794 KERNEL_LOCK(); 3795 bge_link_upd(sc); 3796 KERNEL_UNLOCK(); 3797 } 3798 3799 /* Re-enable interrupts. */ 3800 bge_writembx(sc, BGE_MBX_IRQ0_LO, statustag); 3801 3802 if (ifp->if_flags & IFF_RUNNING) { 3803 /* Check RX return ring producer/consumer */ 3804 bge_rxeof(sc); 3805 3806 /* Check TX ring producer/consumer */ 3807 bge_txeof(sc); 3808 } 3809 3810 return (1); 3811 } 3812 3813 void 3814 bge_tick(void *xsc) 3815 { 3816 struct bge_softc *sc = xsc; 3817 struct mii_data *mii = &sc->bge_mii; 3818 int s; 3819 3820 s = splnet(); 3821 3822 if (BGE_IS_5705_PLUS(sc)) { 3823 mtx_enter(&sc->bge_kstat_mtx); 3824 bge_stats_update_regs(sc); 3825 mtx_leave(&sc->bge_kstat_mtx); 3826 } else 3827 bge_stats_update(sc); 3828 3829 if (sc->bge_flags & BGE_FIBER_TBI) { 3830 /* 3831 * Since in TBI mode auto-polling can't be used we should poll 3832 * link status manually. Here we register pending link event 3833 * and trigger interrupt. 3834 */ 3835 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT); 3836 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 3837 } else { 3838 /* 3839 * Do not touch PHY if we have link up. This could break 3840 * IPMI/ASF mode or produce extra input errors. 3841 * (extra input errors was reported for bcm5701 & bcm5704). 3842 */ 3843 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) 3844 mii_tick(mii); 3845 } 3846 3847 timeout_add_sec(&sc->bge_timeout, 1); 3848 3849 splx(s); 3850 } 3851 3852 void 3853 bge_stats_update_regs(struct bge_softc *sc) 3854 { 3855 struct ifnet *ifp = &sc->arpcom.ac_if; 3856 uint32_t collisions, discards, inerrors; 3857 uint32_t ucast, mcast, bcast; 3858 u_int32_t val; 3859 #if NKSTAT > 0 3860 struct kstat_kv *kvs = sc->bge_kstat->ks_data; 3861 #endif 3862 3863 collisions = CSR_READ_4(sc, BGE_MAC_STATS + 3864 offsetof(struct bge_mac_stats_regs, etherStatsCollisions)); 3865 3866 /* 3867 * XXX 3868 * Unlike other controllers, the BGE_RXLP_LOCSTAT_IFIN_DROPS counter 3869 * of the BCM5717, BCM5718, BCM5762, BCM5719 A0 and BCM5720 A0 3870 * controllers includes the number of unwanted multicast frames. 3871 * This comes from a silicon bug and known workaround to get rough 3872 * (not exact) counter is to enable interrupt on MBUF low watermark 3873 * attention. This can be accomplished by setting BGE_HCCMODE_ATTN 3874 * bit of BGE_HDD_MODE, BGE_BMANMODE_LOMBUF_ATTN bit of BGE_BMAN_MODE 3875 * and BGE_MODECTL_FLOWCTL_ATTN_INTR bit of BGE_MODE_CTL. However 3876 * that change would generate more interrupts and there are still 3877 * possibilities of losing multiple frames during 3878 * BGE_MODECTL_FLOWCTL_ATTN_INTR interrupt handling. Given that 3879 * the workaround still would not get correct counter I don't think 3880 * it's worth to implement it. So ignore reading the counter on 3881 * controllers that have the silicon bug. 3882 */ 3883 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 && 3884 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5762 && 3885 sc->bge_chipid != BGE_CHIPID_BCM5719_A0 && 3886 sc->bge_chipid != BGE_CHIPID_BCM5720_A0) 3887 discards = CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); 3888 else 3889 discards = 0; 3890 3891 inerrors = CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS); 3892 3893 ifp->if_collisions += collisions; 3894 ifp->if_ierrors += discards + inerrors; 3895 3896 ucast = CSR_READ_4(sc, BGE_MAC_STATS + 3897 offsetof(struct bge_mac_stats_regs, ifHCOutUcastPkts)); 3898 mcast = CSR_READ_4(sc, BGE_MAC_STATS + 3899 offsetof(struct bge_mac_stats_regs, ifHCOutMulticastPkts)); 3900 bcast = CSR_READ_4(sc, BGE_MAC_STATS + 3901 offsetof(struct bge_mac_stats_regs, ifHCOutBroadcastPkts)); 3902 if (sc->bge_flags & BGE_RDMA_BUG) { 3903 /* 3904 * If controller transmitted more than BGE_NUM_RDMA_CHANNELS 3905 * frames, it's safe to disable workaround for DMA engine's 3906 * miscalculation of TXMBUF space. 3907 */ 3908 if (ucast + mcast + bcast > BGE_NUM_RDMA_CHANNELS) { 3909 val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL); 3910 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) 3911 val &= ~BGE_RDMA_TX_LENGTH_WA_5719; 3912 else 3913 val &= ~BGE_RDMA_TX_LENGTH_WA_5720; 3914 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val); 3915 sc->bge_flags &= ~BGE_RDMA_BUG; 3916 } 3917 } 3918 3919 #if NKSTAT > 0 3920 kstat_kv_u32(&kvs[bge_stat_out_ucast_pkt]) += ucast; 3921 kstat_kv_u32(&kvs[bge_stat_out_mcast_pkt]) += mcast; 3922 kstat_kv_u32(&kvs[bge_stat_out_bcast_pkt]) += bcast; 3923 kstat_kv_u32(&kvs[bge_stat_collisions]) += collisions; 3924 kstat_kv_u32(&kvs[bge_stat_if_in_drops]) += discards; 3925 kstat_kv_u32(&kvs[bge_stat_if_in_errors]) += inerrors; 3926 #endif 3927 } 3928 3929 void 3930 bge_stats_update(struct bge_softc *sc) 3931 { 3932 struct ifnet *ifp = &sc->arpcom.ac_if; 3933 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 3934 u_int32_t cnt; 3935 3936 #define READ_STAT(sc, stats, stat) \ 3937 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 3938 3939 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo); 3940 ifp->if_collisions += (u_int32_t)(cnt - sc->bge_tx_collisions); 3941 sc->bge_tx_collisions = cnt; 3942 3943 cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo); 3944 sc->bge_rx_overruns = cnt; 3945 cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo); 3946 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_inerrors); 3947 sc->bge_rx_inerrors = cnt; 3948 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo); 3949 ifp->if_ierrors += (u_int32_t)(cnt - sc->bge_rx_discards); 3950 sc->bge_rx_discards = cnt; 3951 3952 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo); 3953 ifp->if_oerrors += (u_int32_t)(cnt - sc->bge_tx_discards); 3954 sc->bge_tx_discards = cnt; 3955 3956 #undef READ_STAT 3957 } 3958 3959 /* 3960 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes. 3961 */ 3962 int 3963 bge_compact_dma_runt(struct mbuf *pkt) 3964 { 3965 struct mbuf *m, *prev, *n = NULL; 3966 int totlen, newprevlen; 3967 3968 prev = NULL; 3969 totlen = 0; 3970 3971 for (m = pkt; m != NULL; prev = m,m = m->m_next) { 3972 int mlen = m->m_len; 3973 int shortfall = 8 - mlen ; 3974 3975 totlen += mlen; 3976 if (mlen == 0) 3977 continue; 3978 if (mlen >= 8) 3979 continue; 3980 3981 /* If we get here, mbuf data is too small for DMA engine. 3982 * Try to fix by shuffling data to prev or next in chain. 3983 * If that fails, do a compacting deep-copy of the whole chain. 3984 */ 3985 3986 /* Internal frag. If fits in prev, copy it there. */ 3987 if (prev && m_trailingspace(prev) >= m->m_len) { 3988 bcopy(m->m_data, prev->m_data+prev->m_len, mlen); 3989 prev->m_len += mlen; 3990 m->m_len = 0; 3991 /* XXX stitch chain */ 3992 prev->m_next = m_free(m); 3993 m = prev; 3994 continue; 3995 } else if (m->m_next != NULL && 3996 m_trailingspace(m) >= shortfall && 3997 m->m_next->m_len >= (8 + shortfall)) { 3998 /* m is writable and have enough data in next, pull up. */ 3999 4000 bcopy(m->m_next->m_data, m->m_data+m->m_len, shortfall); 4001 m->m_len += shortfall; 4002 m->m_next->m_len -= shortfall; 4003 m->m_next->m_data += shortfall; 4004 } else if (m->m_next == NULL || 1) { 4005 /* Got a runt at the very end of the packet. 4006 * borrow data from the tail of the preceding mbuf and 4007 * update its length in-place. (The original data is still 4008 * valid, so we can do this even if prev is not writable.) 4009 */ 4010 4011 /* if we'd make prev a runt, just move all of its data. */ 4012 #ifdef DEBUG 4013 KASSERT(prev != NULL /*, ("runt but null PREV")*/); 4014 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/); 4015 #endif 4016 if ((prev->m_len - shortfall) < 8) 4017 shortfall = prev->m_len; 4018 4019 newprevlen = prev->m_len - shortfall; 4020 4021 MGET(n, M_NOWAIT, MT_DATA); 4022 if (n == NULL) 4023 return (ENOBUFS); 4024 KASSERT(m->m_len + shortfall < MLEN 4025 /*, 4026 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/); 4027 4028 /* first copy the data we're stealing from prev */ 4029 bcopy(prev->m_data + newprevlen, n->m_data, shortfall); 4030 4031 /* update prev->m_len accordingly */ 4032 prev->m_len -= shortfall; 4033 4034 /* copy data from runt m */ 4035 bcopy(m->m_data, n->m_data + shortfall, m->m_len); 4036 4037 /* n holds what we stole from prev, plus m */ 4038 n->m_len = shortfall + m->m_len; 4039 4040 /* stitch n into chain and free m */ 4041 n->m_next = m->m_next; 4042 prev->m_next = n; 4043 /* KASSERT(m->m_next == NULL); */ 4044 m->m_next = NULL; 4045 m_free(m); 4046 m = n; /* for continuing loop */ 4047 } 4048 } 4049 return (0); 4050 } 4051 4052 /* 4053 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 4054 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 4055 * but when such padded frames employ the bge IP/TCP checksum offload, 4056 * the hardware checksum assist gives incorrect results (possibly 4057 * from incorporating its own padding into the UDP/TCP checksum; who knows). 4058 * If we pad such runts with zeros, the onboard checksum comes out correct. 4059 */ 4060 int 4061 bge_cksum_pad(struct mbuf *m) 4062 { 4063 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len; 4064 struct mbuf *last; 4065 4066 /* If there's only the packet-header and we can pad there, use it. */ 4067 if (m->m_pkthdr.len == m->m_len && m_trailingspace(m) >= padlen) { 4068 last = m; 4069 } else { 4070 /* 4071 * Walk packet chain to find last mbuf. We will either 4072 * pad there, or append a new mbuf and pad it. 4073 */ 4074 for (last = m; last->m_next != NULL; last = last->m_next) 4075 ; 4076 if (m_trailingspace(last) < padlen) { 4077 /* Allocate new empty mbuf, pad it. Compact later. */ 4078 struct mbuf *n; 4079 4080 MGET(n, M_DONTWAIT, MT_DATA); 4081 if (n == NULL) 4082 return (ENOBUFS); 4083 n->m_len = 0; 4084 last->m_next = n; 4085 last = n; 4086 } 4087 } 4088 4089 /* Now zero the pad area, to avoid the bge cksum-assist bug. */ 4090 memset(mtod(last, caddr_t) + last->m_len, 0, padlen); 4091 last->m_len += padlen; 4092 m->m_pkthdr.len += padlen; 4093 4094 return (0); 4095 } 4096 4097 /* 4098 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 4099 * pointers to descriptors. 4100 */ 4101 int 4102 bge_encap(struct bge_softc *sc, struct mbuf *m, int *txinc) 4103 { 4104 struct bge_tx_bd *f = NULL; 4105 u_int32_t frag, cur; 4106 u_int16_t csum_flags = 0; 4107 bus_dmamap_t dmamap; 4108 int i = 0; 4109 4110 cur = frag = (sc->bge_tx_prodidx + *txinc) % BGE_TX_RING_CNT; 4111 4112 if (m->m_pkthdr.csum_flags) { 4113 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 4114 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 4115 if (m->m_pkthdr.csum_flags & 4116 (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) { 4117 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 4118 if (m->m_pkthdr.len < ETHER_MIN_NOPAD && 4119 bge_cksum_pad(m) != 0) 4120 return (ENOBUFS); 4121 } 4122 } 4123 4124 if (sc->bge_flags & BGE_JUMBO_FRAME && 4125 m->m_pkthdr.len > ETHER_MAX_LEN) 4126 csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME; 4127 4128 if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)) 4129 goto doit; 4130 4131 /* 4132 * bcm5700 Revision B silicon cannot handle DMA descriptors with 4133 * less than eight bytes. If we encounter a teeny mbuf 4134 * at the end of a chain, we can pad. Otherwise, copy. 4135 */ 4136 if (bge_compact_dma_runt(m) != 0) 4137 return (ENOBUFS); 4138 4139 doit: 4140 dmamap = sc->bge_txdma[cur]; 4141 4142 /* 4143 * Start packing the mbufs in this chain into 4144 * the fragment pointers. Stop when we run out 4145 * of fragments or hit the end of the mbuf chain. 4146 */ 4147 switch (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m, 4148 BUS_DMA_NOWAIT)) { 4149 case 0: 4150 break; 4151 case EFBIG: 4152 if (m_defrag(m, M_DONTWAIT) == 0 && 4153 bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m, 4154 BUS_DMA_NOWAIT) == 0) 4155 break; 4156 4157 /* FALLTHROUGH */ 4158 default: 4159 return (ENOBUFS); 4160 } 4161 4162 for (i = 0; i < dmamap->dm_nsegs; i++) { 4163 f = &sc->bge_rdata->bge_tx_ring[frag]; 4164 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 4165 break; 4166 BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr); 4167 f->bge_len = dmamap->dm_segs[i].ds_len; 4168 f->bge_flags = csum_flags; 4169 f->bge_vlan_tag = 0; 4170 #if NVLAN > 0 4171 if (m->m_flags & M_VLANTAG) { 4172 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 4173 f->bge_vlan_tag = m->m_pkthdr.ether_vtag; 4174 } 4175 #endif 4176 cur = frag; 4177 BGE_INC(frag, BGE_TX_RING_CNT); 4178 } 4179 4180 if (i < dmamap->dm_nsegs) 4181 goto fail_unload; 4182 4183 if (frag == sc->bge_tx_saved_considx) 4184 goto fail_unload; 4185 4186 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 4187 BUS_DMASYNC_PREWRITE); 4188 4189 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 4190 sc->bge_cdata.bge_tx_chain[cur] = m; 4191 sc->bge_cdata.bge_tx_map[cur] = dmamap; 4192 4193 *txinc += dmamap->dm_nsegs; 4194 4195 return (0); 4196 4197 fail_unload: 4198 bus_dmamap_unload(sc->bge_dmatag, dmamap); 4199 4200 return (ENOBUFS); 4201 } 4202 4203 /* 4204 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 4205 * to the mbuf data regions directly in the transmit descriptors. 4206 */ 4207 void 4208 bge_start(struct ifqueue *ifq) 4209 { 4210 struct ifnet *ifp = ifq->ifq_if; 4211 struct bge_softc *sc = ifp->if_softc; 4212 struct mbuf *m; 4213 int txinc; 4214 4215 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) { 4216 ifq_purge(ifq); 4217 return; 4218 } 4219 4220 txinc = 0; 4221 while (1) { 4222 /* Check if we have enough free send BDs. */ 4223 if (sc->bge_txcnt + txinc + BGE_NTXSEG + 16 >= 4224 BGE_TX_RING_CNT) { 4225 ifq_set_oactive(ifq); 4226 break; 4227 } 4228 4229 m = ifq_dequeue(ifq); 4230 if (m == NULL) 4231 break; 4232 4233 if (bge_encap(sc, m, &txinc) != 0) { 4234 m_freem(m); 4235 continue; 4236 } 4237 4238 #if NBPFILTER > 0 4239 if (ifp->if_bpf) 4240 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 4241 #endif 4242 } 4243 4244 if (txinc != 0) { 4245 /* Transmit */ 4246 sc->bge_tx_prodidx = (sc->bge_tx_prodidx + txinc) % 4247 BGE_TX_RING_CNT; 4248 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 4249 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 4250 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, 4251 sc->bge_tx_prodidx); 4252 4253 atomic_add_int(&sc->bge_txcnt, txinc); 4254 4255 /* 4256 * Set a timeout in case the chip goes out to lunch. 4257 */ 4258 ifp->if_timer = 5; 4259 } 4260 } 4261 4262 void 4263 bge_init(void *xsc) 4264 { 4265 struct bge_softc *sc = xsc; 4266 struct ifnet *ifp; 4267 u_int16_t *m; 4268 u_int32_t mode; 4269 int s; 4270 4271 s = splnet(); 4272 4273 ifp = &sc->arpcom.ac_if; 4274 4275 /* Cancel pending I/O and flush buffers. */ 4276 bge_stop(sc, 0); 4277 bge_sig_pre_reset(sc, BGE_RESET_START); 4278 bge_reset(sc); 4279 bge_sig_legacy(sc, BGE_RESET_START); 4280 bge_sig_post_reset(sc, BGE_RESET_START); 4281 4282 bge_chipinit(sc); 4283 4284 /* 4285 * Init the various state machines, ring 4286 * control blocks and firmware. 4287 */ 4288 if (bge_blockinit(sc)) { 4289 printf("%s: initialization failure\n", sc->bge_dev.dv_xname); 4290 splx(s); 4291 return; 4292 } 4293 4294 /* Specify MRU. */ 4295 if (BGE_IS_JUMBO_CAPABLE(sc)) 4296 CSR_WRITE_4(sc, BGE_RX_MTU, 4297 BGE_JUMBO_FRAMELEN + ETHER_VLAN_ENCAP_LEN); 4298 else 4299 CSR_WRITE_4(sc, BGE_RX_MTU, 4300 ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN); 4301 4302 /* Load our MAC address. */ 4303 m = (u_int16_t *)&sc->arpcom.ac_enaddr[0]; 4304 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 4305 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 4306 4307 if (!(ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)) { 4308 /* Disable hardware decapsulation of VLAN frames. */ 4309 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG); 4310 } 4311 4312 /* Program promiscuous mode and multicast filters. */ 4313 bge_iff(sc); 4314 4315 /* Init RX ring. */ 4316 bge_init_rx_ring_std(sc); 4317 4318 /* 4319 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 4320 * memory to ensure that the chip has in fact read the first 4321 * entry of the ring. 4322 */ 4323 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 4324 u_int32_t v, i; 4325 for (i = 0; i < 10; i++) { 4326 DELAY(20); 4327 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 4328 if (v == (MCLBYTES - ETHER_ALIGN)) 4329 break; 4330 } 4331 if (i == 10) 4332 printf("%s: 5705 A0 chip failed to load RX ring\n", 4333 sc->bge_dev.dv_xname); 4334 } 4335 4336 /* Init Jumbo RX ring. */ 4337 if (sc->bge_flags & BGE_JUMBO_RING) 4338 bge_init_rx_ring_jumbo(sc); 4339 4340 /* Init our RX return ring index */ 4341 sc->bge_rx_saved_considx = 0; 4342 4343 /* Init our RX/TX stat counters. */ 4344 sc->bge_tx_collisions = 0; 4345 sc->bge_rx_discards = 0; 4346 sc->bge_rx_inerrors = 0; 4347 sc->bge_rx_overruns = 0; 4348 sc->bge_tx_discards = 0; 4349 4350 /* Init TX ring. */ 4351 bge_init_tx_ring(sc); 4352 4353 /* Enable TX MAC state machine lockup fix. */ 4354 mode = CSR_READ_4(sc, BGE_TX_MODE); 4355 if (BGE_IS_5755_PLUS(sc) || 4356 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 4357 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX; 4358 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720 || 4359 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) { 4360 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 4361 mode |= CSR_READ_4(sc, BGE_TX_MODE) & 4362 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 4363 } 4364 4365 /* Turn on transmitter */ 4366 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE); 4367 DELAY(100); 4368 4369 mode = CSR_READ_4(sc, BGE_RX_MODE); 4370 if (BGE_IS_5755_PLUS(sc)) 4371 mode |= BGE_RXMODE_IPV6_ENABLE; 4372 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5762) 4373 mode |= BGE_RXMODE_IPV4_FRAG_FIX; 4374 4375 /* Turn on receiver */ 4376 CSR_WRITE_4(sc, BGE_RX_MODE, mode | BGE_RXMODE_ENABLE); 4377 DELAY(10); 4378 4379 /* 4380 * Set the number of good frames to receive after RX MBUF 4381 * Low Watermark has been reached. After the RX MAC receives 4382 * this number of frames, it will drop subsequent incoming 4383 * frames until the MBUF High Watermark is reached. 4384 */ 4385 if (BGE_IS_57765_PLUS(sc)) 4386 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1); 4387 else 4388 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 4389 4390 /* Tell firmware we're alive. */ 4391 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4392 4393 /* Enable host interrupts. */ 4394 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 4395 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 4396 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 4397 4398 bge_ifmedia_upd(ifp); 4399 4400 ifp->if_flags |= IFF_RUNNING; 4401 ifq_clr_oactive(&ifp->if_snd); 4402 4403 splx(s); 4404 4405 timeout_add_sec(&sc->bge_timeout, 1); 4406 } 4407 4408 /* 4409 * Set media options. 4410 */ 4411 int 4412 bge_ifmedia_upd(struct ifnet *ifp) 4413 { 4414 struct bge_softc *sc = ifp->if_softc; 4415 struct mii_data *mii = &sc->bge_mii; 4416 struct ifmedia *ifm = &sc->bge_ifmedia; 4417 4418 /* If this is a 1000baseX NIC, enable the TBI port. */ 4419 if (sc->bge_flags & BGE_FIBER_TBI) { 4420 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 4421 return (EINVAL); 4422 switch(IFM_SUBTYPE(ifm->ifm_media)) { 4423 case IFM_AUTO: 4424 /* 4425 * The BCM5704 ASIC appears to have a special 4426 * mechanism for programming the autoneg 4427 * advertisement registers in TBI mode. 4428 */ 4429 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 4430 u_int32_t sgdig; 4431 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS); 4432 if (sgdig & BGE_SGDIGSTS_DONE) { 4433 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 4434 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 4435 sgdig |= BGE_SGDIGCFG_AUTO | 4436 BGE_SGDIGCFG_PAUSE_CAP | 4437 BGE_SGDIGCFG_ASYM_PAUSE; 4438 CSR_WRITE_4(sc, BGE_SGDIG_CFG, 4439 sgdig | BGE_SGDIGCFG_SEND); 4440 DELAY(5); 4441 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); 4442 } 4443 } 4444 break; 4445 case IFM_1000_SX: 4446 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 4447 BGE_CLRBIT(sc, BGE_MAC_MODE, 4448 BGE_MACMODE_HALF_DUPLEX); 4449 } else { 4450 BGE_SETBIT(sc, BGE_MAC_MODE, 4451 BGE_MACMODE_HALF_DUPLEX); 4452 } 4453 DELAY(40); 4454 break; 4455 default: 4456 return (EINVAL); 4457 } 4458 /* XXX 802.3x flow control for 1000BASE-SX */ 4459 return (0); 4460 } 4461 4462 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT); 4463 if (mii->mii_instance) { 4464 struct mii_softc *miisc; 4465 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 4466 mii_phy_reset(miisc); 4467 } 4468 mii_mediachg(mii); 4469 4470 /* 4471 * Force an interrupt so that we will call bge_link_upd 4472 * if needed and clear any pending link state attention. 4473 * Without this we are not getting any further interrupts 4474 * for link state changes and thus will not UP the link and 4475 * not be able to send in bge_start. The only way to get 4476 * things working was to receive a packet and get a RX intr. 4477 */ 4478 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 4479 sc->bge_flags & BGE_IS_5788) 4480 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 4481 else 4482 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 4483 4484 return (0); 4485 } 4486 4487 /* 4488 * Report current media status. 4489 */ 4490 void 4491 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 4492 { 4493 struct bge_softc *sc = ifp->if_softc; 4494 struct mii_data *mii = &sc->bge_mii; 4495 4496 if (sc->bge_flags & BGE_FIBER_TBI) { 4497 ifmr->ifm_status = IFM_AVALID; 4498 ifmr->ifm_active = IFM_ETHER; 4499 if (CSR_READ_4(sc, BGE_MAC_STS) & 4500 BGE_MACSTAT_TBI_PCS_SYNCHED) { 4501 ifmr->ifm_status |= IFM_ACTIVE; 4502 } else { 4503 ifmr->ifm_active |= IFM_NONE; 4504 return; 4505 } 4506 ifmr->ifm_active |= IFM_1000_SX; 4507 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 4508 ifmr->ifm_active |= IFM_HDX; 4509 else 4510 ifmr->ifm_active |= IFM_FDX; 4511 return; 4512 } 4513 4514 mii_pollstat(mii); 4515 ifmr->ifm_status = mii->mii_media_status; 4516 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) | 4517 sc->bge_flowflags; 4518 } 4519 4520 int 4521 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 4522 { 4523 struct bge_softc *sc = ifp->if_softc; 4524 struct ifreq *ifr = (struct ifreq *) data; 4525 int s, error = 0; 4526 struct mii_data *mii; 4527 4528 s = splnet(); 4529 4530 switch(command) { 4531 case SIOCSIFADDR: 4532 ifp->if_flags |= IFF_UP; 4533 if (!(ifp->if_flags & IFF_RUNNING)) 4534 bge_init(sc); 4535 break; 4536 4537 case SIOCSIFFLAGS: 4538 if (ifp->if_flags & IFF_UP) { 4539 if (ifp->if_flags & IFF_RUNNING) 4540 error = ENETRESET; 4541 else 4542 bge_init(sc); 4543 } else { 4544 if (ifp->if_flags & IFF_RUNNING) 4545 bge_stop(sc, 0); 4546 } 4547 break; 4548 4549 case SIOCSIFMEDIA: 4550 /* XXX Flow control is not supported for 1000BASE-SX */ 4551 if (sc->bge_flags & BGE_FIBER_TBI) { 4552 ifr->ifr_media &= ~IFM_ETH_FMASK; 4553 sc->bge_flowflags = 0; 4554 } 4555 4556 /* Flow control requires full-duplex mode. */ 4557 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 4558 (ifr->ifr_media & IFM_FDX) == 0) { 4559 ifr->ifr_media &= ~IFM_ETH_FMASK; 4560 } 4561 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 4562 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 4563 /* We can do both TXPAUSE and RXPAUSE. */ 4564 ifr->ifr_media |= 4565 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 4566 } 4567 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 4568 } 4569 /* FALLTHROUGH */ 4570 case SIOCGIFMEDIA: 4571 if (sc->bge_flags & BGE_FIBER_TBI) { 4572 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 4573 command); 4574 } else { 4575 mii = &sc->bge_mii; 4576 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 4577 command); 4578 } 4579 break; 4580 4581 case SIOCGIFRXR: 4582 error = bge_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data); 4583 break; 4584 4585 default: 4586 error = ether_ioctl(ifp, &sc->arpcom, command, data); 4587 } 4588 4589 if (error == ENETRESET) { 4590 if (ifp->if_flags & IFF_RUNNING) 4591 bge_iff(sc); 4592 error = 0; 4593 } 4594 4595 splx(s); 4596 return (error); 4597 } 4598 4599 int 4600 bge_rxrinfo(struct bge_softc *sc, struct if_rxrinfo *ifri) 4601 { 4602 struct if_rxring_info ifr[2]; 4603 u_int n = 0; 4604 4605 memset(ifr, 0, sizeof(ifr)); 4606 4607 if (ISSET(sc->bge_flags, BGE_RXRING_VALID)) { 4608 ifr[n].ifr_size = sc->bge_rx_std_len; 4609 strlcpy(ifr[n].ifr_name, "std", sizeof(ifr[n].ifr_name)); 4610 ifr[n].ifr_info = sc->bge_std_ring; 4611 4612 n++; 4613 } 4614 4615 if (ISSET(sc->bge_flags, BGE_JUMBO_RXRING_VALID)) { 4616 ifr[n].ifr_size = BGE_JLEN; 4617 strlcpy(ifr[n].ifr_name, "jumbo", sizeof(ifr[n].ifr_name)); 4618 ifr[n].ifr_info = sc->bge_jumbo_ring; 4619 4620 n++; 4621 } 4622 4623 return (if_rxr_info_ioctl(ifri, n, ifr)); 4624 } 4625 4626 void 4627 bge_watchdog(struct ifnet *ifp) 4628 { 4629 struct bge_softc *sc; 4630 4631 sc = ifp->if_softc; 4632 4633 printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname); 4634 4635 bge_init(sc); 4636 4637 ifp->if_oerrors++; 4638 } 4639 4640 void 4641 bge_stop_block(struct bge_softc *sc, bus_size_t reg, u_int32_t bit) 4642 { 4643 int i; 4644 4645 BGE_CLRBIT(sc, reg, bit); 4646 4647 for (i = 0; i < BGE_TIMEOUT; i++) { 4648 if ((CSR_READ_4(sc, reg) & bit) == 0) 4649 return; 4650 delay(100); 4651 } 4652 4653 DPRINTFN(5, ("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n", 4654 sc->bge_dev.dv_xname, (u_long) reg, bit)); 4655 } 4656 4657 /* 4658 * Stop the adapter and free any mbufs allocated to the 4659 * RX and TX lists. 4660 */ 4661 void 4662 bge_stop(struct bge_softc *sc, int softonly) 4663 { 4664 struct ifnet *ifp = &sc->arpcom.ac_if; 4665 struct ifmedia_entry *ifm; 4666 struct mii_data *mii; 4667 int mtmp, itmp; 4668 4669 timeout_del(&sc->bge_timeout); 4670 timeout_del(&sc->bge_rxtimeout); 4671 timeout_del(&sc->bge_rxtimeout_jumbo); 4672 4673 ifp->if_flags &= ~IFF_RUNNING; 4674 ifp->if_timer = 0; 4675 4676 if (!softonly) { 4677 /* 4678 * Tell firmware we're shutting down. 4679 */ 4680 /* bge_stop_fw(sc); */ 4681 bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN); 4682 4683 /* 4684 * Disable all of the receiver blocks 4685 */ 4686 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 4687 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 4688 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 4689 if (BGE_IS_5700_FAMILY(sc)) 4690 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 4691 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 4692 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 4693 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 4694 4695 /* 4696 * Disable all of the transmit blocks 4697 */ 4698 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 4699 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 4700 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 4701 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 4702 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 4703 if (BGE_IS_5700_FAMILY(sc)) 4704 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 4705 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 4706 4707 /* 4708 * Shut down all of the memory managers and related 4709 * state machines. 4710 */ 4711 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 4712 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 4713 if (BGE_IS_5700_FAMILY(sc)) 4714 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 4715 4716 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 4717 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 4718 4719 if (!BGE_IS_5705_PLUS(sc)) { 4720 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 4721 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 4722 } 4723 4724 bge_reset(sc); 4725 bge_sig_legacy(sc, BGE_RESET_SHUTDOWN); 4726 bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN); 4727 4728 /* 4729 * Tell firmware we're shutting down. 4730 */ 4731 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4732 } 4733 4734 intr_barrier(sc->bge_intrhand); 4735 ifq_barrier(&ifp->if_snd); 4736 4737 ifq_clr_oactive(&ifp->if_snd); 4738 4739 /* Free the RX lists. */ 4740 bge_free_rx_ring_std(sc); 4741 4742 /* Free jumbo RX list. */ 4743 if (sc->bge_flags & BGE_JUMBO_RING) 4744 bge_free_rx_ring_jumbo(sc); 4745 4746 /* Free TX buffers. */ 4747 bge_free_tx_ring(sc); 4748 4749 /* 4750 * Isolate/power down the PHY, but leave the media selection 4751 * unchanged so that things will be put back to normal when 4752 * we bring the interface back up. 4753 */ 4754 if (!(sc->bge_flags & BGE_FIBER_TBI)) { 4755 mii = &sc->bge_mii; 4756 itmp = ifp->if_flags; 4757 ifp->if_flags |= IFF_UP; 4758 ifm = mii->mii_media.ifm_cur; 4759 mtmp = ifm->ifm_media; 4760 ifm->ifm_media = IFM_ETHER|IFM_NONE; 4761 mii_mediachg(mii); 4762 ifm->ifm_media = mtmp; 4763 ifp->if_flags = itmp; 4764 } 4765 4766 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 4767 4768 if (!softonly) { 4769 /* Clear MAC's link state (PHY may still have link UP). */ 4770 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 4771 } 4772 } 4773 4774 void 4775 bge_link_upd(struct bge_softc *sc) 4776 { 4777 struct ifnet *ifp = &sc->arpcom.ac_if; 4778 struct mii_data *mii = &sc->bge_mii; 4779 u_int32_t status; 4780 int link; 4781 4782 /* Clear 'pending link event' flag */ 4783 BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT); 4784 4785 /* 4786 * Process link state changes. 4787 * Grrr. The link status word in the status block does 4788 * not work correctly on the BCM5700 rev AX and BX chips, 4789 * according to all available information. Hence, we have 4790 * to enable MII interrupts in order to properly obtain 4791 * async link changes. Unfortunately, this also means that 4792 * we have to read the MAC status register to detect link 4793 * changes, thereby adding an additional register access to 4794 * the interrupt handler. 4795 * 4796 */ 4797 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) { 4798 status = CSR_READ_4(sc, BGE_MAC_STS); 4799 if (status & BGE_MACSTAT_MI_INTERRUPT) { 4800 mii_pollstat(mii); 4801 4802 if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 4803 mii->mii_media_status & IFM_ACTIVE && 4804 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 4805 BGE_STS_SETBIT(sc, BGE_STS_LINK); 4806 else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 4807 (!(mii->mii_media_status & IFM_ACTIVE) || 4808 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 4809 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 4810 4811 /* Clear the interrupt */ 4812 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 4813 BGE_EVTENB_MI_INTERRUPT); 4814 bge_miibus_readreg(&sc->bge_dev, sc->bge_phy_addr, 4815 BRGPHY_MII_ISR); 4816 bge_miibus_writereg(&sc->bge_dev, sc->bge_phy_addr, 4817 BRGPHY_MII_IMR, BRGPHY_INTRS); 4818 } 4819 return; 4820 } 4821 4822 if (sc->bge_flags & BGE_FIBER_TBI) { 4823 status = CSR_READ_4(sc, BGE_MAC_STS); 4824 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 4825 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) { 4826 BGE_STS_SETBIT(sc, BGE_STS_LINK); 4827 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 4828 BGE_CLRBIT(sc, BGE_MAC_MODE, 4829 BGE_MACMODE_TBI_SEND_CFGS); 4830 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 4831 status = CSR_READ_4(sc, BGE_MAC_MODE); 4832 link = (status & BGE_MACMODE_HALF_DUPLEX) ? 4833 LINK_STATE_HALF_DUPLEX : 4834 LINK_STATE_FULL_DUPLEX; 4835 ifp->if_baudrate = IF_Gbps(1); 4836 if (ifp->if_link_state != link) { 4837 ifp->if_link_state = link; 4838 if_link_state_change(ifp); 4839 } 4840 } 4841 } else if (BGE_STS_BIT(sc, BGE_STS_LINK)) { 4842 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 4843 link = LINK_STATE_DOWN; 4844 ifp->if_baudrate = 0; 4845 if (ifp->if_link_state != link) { 4846 ifp->if_link_state = link; 4847 if_link_state_change(ifp); 4848 } 4849 } 4850 } else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) { 4851 /* 4852 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit 4853 * in status word always set. Workaround this bug by reading 4854 * PHY link status directly. 4855 */ 4856 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)? 4857 BGE_STS_LINK : 0; 4858 4859 if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) { 4860 mii_pollstat(mii); 4861 4862 if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 4863 mii->mii_media_status & IFM_ACTIVE && 4864 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 4865 BGE_STS_SETBIT(sc, BGE_STS_LINK); 4866 else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 4867 (!(mii->mii_media_status & IFM_ACTIVE) || 4868 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 4869 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 4870 } 4871 } else { 4872 /* 4873 * For controllers that call mii_tick, we have to poll 4874 * link status. 4875 */ 4876 mii_pollstat(mii); 4877 } 4878 4879 /* Clear the attention */ 4880 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 4881 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 4882 BGE_MACSTAT_LINK_CHANGED); 4883 } 4884 4885 #if NKSTAT > 0 4886 4887 struct bge_stat { 4888 char name[KSTAT_KV_NAMELEN]; 4889 enum kstat_kv_unit unit; 4890 bus_size_t reg; 4891 }; 4892 4893 #define MACREG(_f) \ 4894 BGE_MAC_STATS + offsetof(struct bge_mac_stats_regs, _f) 4895 4896 static const struct bge_stat bge_kstat_tpl[] = { 4897 /* MAC stats */ 4898 [bge_stat_out_octets] = { "out octets", KSTAT_KV_U_BYTES, 4899 MACREG(ifHCOutOctets) }, 4900 [bge_stat_collisions] = { "collisions", KSTAT_KV_U_NONE, 0 }, 4901 [bge_stat_xon_sent] = { "xon sent", KSTAT_KV_U_NONE, 4902 MACREG(outXonSent) }, 4903 [bge_stat_xoff_sent] = { "xoff sent", KSTAT_KV_U_NONE, 4904 MACREG(outXonSent) }, 4905 [bge_stat_xmit_errors] = { "xmit errors", KSTAT_KV_U_NONE, 4906 MACREG(dot3StatsInternalMacTransmitErrors) }, 4907 [bge_stat_coll_frames] = { "coll frames", KSTAT_KV_U_PACKETS, 4908 MACREG(dot3StatsSingleCollisionFrames) }, 4909 [bge_stat_multicoll_frames] = { "multicoll frames", KSTAT_KV_U_PACKETS, 4910 MACREG(dot3StatsMultipleCollisionFrames) }, 4911 [bge_stat_deferred_xmit] = { "deferred xmit", KSTAT_KV_U_NONE, 4912 MACREG(dot3StatsDeferredTransmissions) }, 4913 [bge_stat_excess_coll] = { "excess coll", KSTAT_KV_U_NONE, 4914 MACREG(dot3StatsExcessiveCollisions) }, 4915 [bge_stat_late_coll] = { "late coll", KSTAT_KV_U_NONE, 4916 MACREG(dot3StatsLateCollisions) }, 4917 [bge_stat_out_ucast_pkt] = { "out ucast pkts", KSTAT_KV_U_PACKETS, 0 }, 4918 [bge_stat_out_mcast_pkt] = { "out mcast pkts", KSTAT_KV_U_PACKETS, 0 }, 4919 [bge_stat_out_bcast_pkt] = { "out bcast pkts", KSTAT_KV_U_PACKETS, 0 }, 4920 [bge_stat_in_octets] = { "in octets", KSTAT_KV_U_BYTES, 4921 MACREG(ifHCInOctets) }, 4922 [bge_stat_fragments] = { "fragments", KSTAT_KV_U_NONE, 4923 MACREG(etherStatsFragments) }, 4924 [bge_stat_in_ucast_pkt] = { "in ucast pkts", KSTAT_KV_U_PACKETS, 4925 MACREG(ifHCInUcastPkts) }, 4926 [bge_stat_in_mcast_pkt] = { "in mcast pkts", KSTAT_KV_U_PACKETS, 4927 MACREG(ifHCInMulticastPkts) }, 4928 [bge_stat_in_bcast_pkt] = { "in bcast pkts", KSTAT_KV_U_PACKETS, 4929 MACREG(ifHCInBroadcastPkts) }, 4930 [bge_stat_fcs_errors] = { "FCS errors", KSTAT_KV_U_NONE, 4931 MACREG(dot3StatsFCSErrors) }, 4932 [bge_stat_align_errors] = { "align errors", KSTAT_KV_U_NONE, 4933 MACREG(dot3StatsAlignmentErrors) }, 4934 [bge_stat_xon_rcvd] = { "xon rcvd", KSTAT_KV_U_NONE, 4935 MACREG(xonPauseFramesReceived) }, 4936 [bge_stat_xoff_rcvd] = { "xoff rcvd", KSTAT_KV_U_NONE, 4937 MACREG(xoffPauseFramesReceived) }, 4938 [bge_stat_ctrl_frame_rcvd] = { "ctrlframes rcvd", KSTAT_KV_U_NONE, 4939 MACREG(macControlFramesReceived) }, 4940 [bge_stat_xoff_entered] = { "xoff entered", KSTAT_KV_U_NONE, 4941 MACREG(xoffStateEntered) }, 4942 [bge_stat_too_long_frames] = { "too long frames", KSTAT_KV_U_NONE, 4943 MACREG(dot3StatsFramesTooLong) }, 4944 [bge_stat_jabbers] = { "jabbers", KSTAT_KV_U_NONE, 4945 MACREG(etherStatsJabbers) }, 4946 [bge_stat_too_short_pkts] = { "too short pkts", KSTAT_KV_U_NONE, 4947 MACREG(etherStatsUndersizePkts) }, 4948 4949 /* Send Data Initiator stats */ 4950 [bge_stat_dma_rq_full] = { "DMA RQ full", KSTAT_KV_U_NONE, 4951 BGE_LOCSTATS_DMA_RQ_FULL }, 4952 [bge_stat_dma_hprq_full] = { "DMA HPRQ full", KSTAT_KV_U_NONE, 4953 BGE_LOCSTATS_DMA_HIPRIO_RQ_FULL }, 4954 [bge_stat_sdc_queue_full] = { "SDC queue full", KSTAT_KV_U_NONE, 4955 BGE_LOCSTATS_SDC_QUEUE_FULL }, 4956 [bge_stat_nic_sendprod_set] = { "sendprod set", KSTAT_KV_U_NONE, 4957 BGE_LOCSTATS_NIC_SENDPROD_SET }, 4958 [bge_stat_status_updated] = { "stats updated", KSTAT_KV_U_NONE, 4959 BGE_LOCSTATS_STATS_UPDATED }, 4960 [bge_stat_irqs] = { "irqs", KSTAT_KV_U_NONE, BGE_LOCSTATS_IRQS }, 4961 [bge_stat_avoided_irqs] = { "avoided irqs", KSTAT_KV_U_NONE, 4962 BGE_LOCSTATS_AVOIDED_IRQS }, 4963 [bge_stat_tx_thresh_hit] = { "tx thresh hit", KSTAT_KV_U_NONE, 4964 BGE_LOCSTATS_TX_THRESH_HIT }, 4965 4966 /* Receive List Placement stats */ 4967 [bge_stat_filtdrop] = { "filtdrop", KSTAT_KV_U_NONE, 4968 BGE_RXLP_LOCSTAT_FILTDROP }, 4969 [bge_stat_dma_wrq_full] = { "DMA WRQ full", KSTAT_KV_U_NONE, 4970 BGE_RXLP_LOCSTAT_DMA_WRQ_FULL }, 4971 [bge_stat_dma_hpwrq_full] = { "DMA HPWRQ full", KSTAT_KV_U_NONE, 4972 BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL }, 4973 [bge_stat_out_of_bds] = { "out of BDs", KSTAT_KV_U_NONE, 4974 BGE_RXLP_LOCSTAT_OUT_OF_BDS }, 4975 [bge_stat_if_in_drops] = { "if in drops", KSTAT_KV_U_NONE, 0 }, 4976 [bge_stat_if_in_errors] = { "if in errors", KSTAT_KV_U_NONE, 0 }, 4977 [bge_stat_rx_thresh_hit] = { "rx thresh hit", KSTAT_KV_U_NONE, 4978 BGE_RXLP_LOCSTAT_RXTHRESH_HIT }, 4979 }; 4980 4981 int 4982 bge_kstat_read(struct kstat *ks) 4983 { 4984 struct bge_softc *sc = ks->ks_softc; 4985 struct kstat_kv *kvs = ks->ks_data; 4986 int i; 4987 4988 bge_stats_update_regs(sc); 4989 4990 for (i = 0; i < nitems(bge_kstat_tpl); i++) { 4991 if (bge_kstat_tpl[i].reg != 0) 4992 kstat_kv_u32(kvs) += CSR_READ_4(sc, 4993 bge_kstat_tpl[i].reg); 4994 kvs++; 4995 } 4996 4997 getnanouptime(&ks->ks_updated); 4998 return 0; 4999 } 5000 5001 void 5002 bge_kstat_attach(struct bge_softc *sc) 5003 { 5004 struct kstat *ks; 5005 struct kstat_kv *kvs; 5006 int i; 5007 5008 5009 ks = kstat_create(sc->bge_dev.dv_xname, 0, "bge-stats", 0, 5010 KSTAT_T_KV, 0); 5011 if (ks == NULL) 5012 return; 5013 5014 kvs = mallocarray(nitems(bge_kstat_tpl), sizeof(*kvs), M_DEVBUF, 5015 M_ZERO | M_WAITOK); 5016 for (i = 0; i < nitems(bge_kstat_tpl); i++) { 5017 const struct bge_stat *tpl = &bge_kstat_tpl[i]; 5018 kstat_kv_unit_init(&kvs[i], tpl->name, KSTAT_KV_T_UINT32, 5019 tpl->unit); 5020 } 5021 5022 kstat_set_mutex(ks, &sc->bge_kstat_mtx); 5023 ks->ks_softc = sc; 5024 ks->ks_data = kvs; 5025 ks->ks_datalen = nitems(bge_kstat_tpl) * sizeof(*kvs); 5026 ks->ks_read = bge_kstat_read; 5027 5028 sc->bge_kstat = ks; 5029 kstat_install(ks); 5030 } 5031 #endif /* NKSTAT > 0 */ 5032