1 /* $NetBSD: if_bge.c,v 1.236 2013/04/11 11:24:07 msaitoh Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2001 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $ 36 */ 37 38 /* 39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD. 40 * 41 * NetBSD version by: 42 * 43 * Frank van der Linden <fvdl@wasabisystems.com> 44 * Jason Thorpe <thorpej@wasabisystems.com> 45 * Jonathan Stone <jonathan@dsg.stanford.edu> 46 * 47 * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com> 48 * Senior Engineer, Wind River Systems 49 */ 50 51 /* 52 * The Broadcom BCM5700 is based on technology originally developed by 53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 54 * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has 55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 57 * frames, highly configurable RX filtering, and 16 RX and TX queues 58 * (which, along with RX filter rules, can be used for QOS applications). 59 * Other features, such as TCP segmentation, may be available as part 60 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 61 * firmware images can be stored in hardware and need not be compiled 62 * into the driver. 63 * 64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. 66 * 67 * The BCM5701 is a single-chip solution incorporating both the BCM5700 68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 69 * does not support external SSRAM. 70 * 71 * Broadcom also produces a variation of the BCM5700 under the "Altima" 72 * brand name, which is functionally similar but lacks PCI-X support. 73 * 74 * Without external SSRAM, you can only have at most 4 TX rings, 75 * and the use of the mini RX ring is disabled. This seems to imply 76 * that these features are simply not available on the BCM5701. As a 77 * result, this driver does not implement any support for the mini RX 78 * ring. 79 */ 80 81 #include <sys/cdefs.h> 82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.236 2013/04/11 11:24:07 msaitoh Exp $"); 83 84 #include "vlan.h" 85 86 #include <sys/param.h> 87 #include <sys/systm.h> 88 #include <sys/callout.h> 89 #include <sys/sockio.h> 90 #include <sys/mbuf.h> 91 #include <sys/malloc.h> 92 #include <sys/kernel.h> 93 #include <sys/device.h> 94 #include <sys/socket.h> 95 #include <sys/sysctl.h> 96 97 #include <net/if.h> 98 #include <net/if_dl.h> 99 #include <net/if_media.h> 100 #include <net/if_ether.h> 101 102 #include <sys/rnd.h> 103 104 #ifdef INET 105 #include <netinet/in.h> 106 #include <netinet/in_systm.h> 107 #include <netinet/in_var.h> 108 #include <netinet/ip.h> 109 #endif 110 111 /* Headers for TCP Segmentation Offload (TSO) */ 112 #include <netinet/in_systm.h> /* n_time for <netinet/ip.h>... */ 113 #include <netinet/in.h> /* ip_{src,dst}, for <netinet/ip.h> */ 114 #include <netinet/ip.h> /* for struct ip */ 115 #include <netinet/tcp.h> /* for struct tcphdr */ 116 117 118 #include <net/bpf.h> 119 120 #include <dev/pci/pcireg.h> 121 #include <dev/pci/pcivar.h> 122 #include <dev/pci/pcidevs.h> 123 124 #include <dev/mii/mii.h> 125 #include <dev/mii/miivar.h> 126 #include <dev/mii/miidevs.h> 127 #include <dev/mii/brgphyreg.h> 128 129 #include <dev/pci/if_bgereg.h> 130 #include <dev/pci/if_bgevar.h> 131 132 #include <prop/proplib.h> 133 134 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 135 136 137 /* 138 * Tunable thresholds for rx-side bge interrupt mitigation. 139 */ 140 141 /* 142 * The pairs of values below were obtained from empirical measurement 143 * on bcm5700 rev B2; they ar designed to give roughly 1 receive 144 * interrupt for every N packets received, where N is, approximately, 145 * the second value (rx_max_bds) in each pair. The values are chosen 146 * such that moving from one pair to the succeeding pair was observed 147 * to roughly halve interrupt rate under sustained input packet load. 148 * The values were empirically chosen to avoid overflowing internal 149 * limits on the bcm5700: increasing rx_ticks much beyond 600 150 * results in internal wrapping and higher interrupt rates. 151 * The limit of 46 frames was chosen to match NFS workloads. 152 * 153 * These values also work well on bcm5701, bcm5704C, and (less 154 * tested) bcm5703. On other chipsets, (including the Altima chip 155 * family), the larger values may overflow internal chip limits, 156 * leading to increasing interrupt rates rather than lower interrupt 157 * rates. 158 * 159 * Applications using heavy interrupt mitigation (interrupting every 160 * 32 or 46 frames) in both directions may need to increase the TCP 161 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain 162 * full link bandwidth, due to ACKs and window updates lingering 163 * in the RX queue during the 30-to-40-frame interrupt-mitigation window. 164 */ 165 static const struct bge_load_rx_thresh { 166 int rx_ticks; 167 int rx_max_bds; } 168 bge_rx_threshes[] = { 169 { 16, 1 }, /* rx_max_bds = 1 disables interrupt mitigation */ 170 { 32, 2 }, 171 { 50, 4 }, 172 { 100, 8 }, 173 { 192, 16 }, 174 { 416, 32 }, 175 { 598, 46 } 176 }; 177 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0])) 178 179 /* XXX patchable; should be sysctl'able */ 180 static int bge_auto_thresh = 1; 181 static int bge_rx_thresh_lvl; 182 183 static int bge_rxthresh_nodenum; 184 185 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]); 186 187 static uint32_t bge_chipid(const struct pci_attach_args *pa); 188 static int bge_probe(device_t, cfdata_t, void *); 189 static void bge_attach(device_t, device_t, void *); 190 static int bge_detach(device_t, int); 191 static void bge_release_resources(struct bge_softc *); 192 193 static int bge_get_eaddr_fw(struct bge_softc *, uint8_t[]); 194 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]); 195 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]); 196 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]); 197 static int bge_get_eaddr(struct bge_softc *, uint8_t[]); 198 199 static void bge_txeof(struct bge_softc *); 200 static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *); 201 static void bge_rxeof(struct bge_softc *); 202 203 static void bge_asf_driver_up (struct bge_softc *); 204 static void bge_tick(void *); 205 static void bge_stats_update(struct bge_softc *); 206 static void bge_stats_update_regs(struct bge_softc *); 207 static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *); 208 209 static int bge_intr(void *); 210 static void bge_start(struct ifnet *); 211 static int bge_ifflags_cb(struct ethercom *); 212 static int bge_ioctl(struct ifnet *, u_long, void *); 213 static int bge_init(struct ifnet *); 214 static void bge_stop(struct ifnet *, int); 215 static void bge_watchdog(struct ifnet *); 216 static int bge_ifmedia_upd(struct ifnet *); 217 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 218 219 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *); 220 static int bge_read_nvram(struct bge_softc *, uint8_t *, int, int); 221 222 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); 223 static int bge_read_eeprom(struct bge_softc *, void *, int, int); 224 static void bge_setmulti(struct bge_softc *); 225 226 static void bge_handle_events(struct bge_softc *); 227 static int bge_alloc_jumbo_mem(struct bge_softc *); 228 #if 0 /* XXX */ 229 static void bge_free_jumbo_mem(struct bge_softc *); 230 #endif 231 static void *bge_jalloc(struct bge_softc *); 232 static void bge_jfree(struct mbuf *, void *, size_t, void *); 233 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, 234 bus_dmamap_t); 235 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 236 static int bge_init_rx_ring_std(struct bge_softc *); 237 static void bge_free_rx_ring_std(struct bge_softc *); 238 static int bge_init_rx_ring_jumbo(struct bge_softc *); 239 static void bge_free_rx_ring_jumbo(struct bge_softc *); 240 static void bge_free_tx_ring(struct bge_softc *); 241 static int bge_init_tx_ring(struct bge_softc *); 242 243 static int bge_chipinit(struct bge_softc *); 244 static int bge_blockinit(struct bge_softc *); 245 static int bge_phy_addr(struct bge_softc *); 246 static uint32_t bge_readmem_ind(struct bge_softc *, int); 247 static void bge_writemem_ind(struct bge_softc *, int, int); 248 static void bge_writembx(struct bge_softc *, int, int); 249 static void bge_writembx_flush(struct bge_softc *, int, int); 250 static void bge_writemem_direct(struct bge_softc *, int, int); 251 static void bge_writereg_ind(struct bge_softc *, int, int); 252 static void bge_set_max_readrq(struct bge_softc *); 253 254 static int bge_miibus_readreg(device_t, int, int); 255 static void bge_miibus_writereg(device_t, int, int, int); 256 static void bge_miibus_statchg(struct ifnet *); 257 258 #define BGE_RESET_SHUTDOWN 0 259 #define BGE_RESET_START 1 260 #define BGE_RESET_SUSPEND 2 261 static void bge_sig_post_reset(struct bge_softc *, int); 262 static void bge_sig_legacy(struct bge_softc *, int); 263 static void bge_sig_pre_reset(struct bge_softc *, int); 264 static void bge_wait_for_event_ack(struct bge_softc *); 265 static void bge_stop_fw(struct bge_softc *); 266 static int bge_reset(struct bge_softc *); 267 static void bge_link_upd(struct bge_softc *); 268 static void bge_sysctl_init(struct bge_softc *); 269 static int bge_sysctl_verify(SYSCTLFN_PROTO); 270 271 static void bge_ape_lock_init(struct bge_softc *); 272 static void bge_ape_read_fw_ver(struct bge_softc *); 273 static int bge_ape_lock(struct bge_softc *, int); 274 static void bge_ape_unlock(struct bge_softc *, int); 275 static void bge_ape_send_event(struct bge_softc *, uint32_t); 276 static void bge_ape_driver_state_change(struct bge_softc *, int); 277 278 #ifdef BGE_DEBUG 279 #define DPRINTF(x) if (bgedebug) printf x 280 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x 281 #define BGE_TSO_PRINTF(x) do { if (bge_tso_debug) printf x ;} while (0) 282 int bgedebug = 0; 283 int bge_tso_debug = 0; 284 void bge_debug_info(struct bge_softc *); 285 #else 286 #define DPRINTF(x) 287 #define DPRINTFN(n,x) 288 #define BGE_TSO_PRINTF(x) 289 #endif 290 291 #ifdef BGE_EVENT_COUNTERS 292 #define BGE_EVCNT_INCR(ev) (ev).ev_count++ 293 #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val) 294 #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val) 295 #else 296 #define BGE_EVCNT_INCR(ev) /* nothing */ 297 #define BGE_EVCNT_ADD(ev, val) /* nothing */ 298 #define BGE_EVCNT_UPD(ev, val) /* nothing */ 299 #endif 300 301 static const struct bge_product { 302 pci_vendor_id_t bp_vendor; 303 pci_product_id_t bp_product; 304 const char *bp_name; 305 } bge_products[] = { 306 /* 307 * The BCM5700 documentation seems to indicate that the hardware 308 * still has the Alteon vendor ID burned into it, though it 309 * should always be overridden by the value in the EEPROM. We'll 310 * check for it anyway. 311 */ 312 { PCI_VENDOR_ALTEON, 313 PCI_PRODUCT_ALTEON_BCM5700, 314 "Broadcom BCM5700 Gigabit Ethernet", 315 }, 316 { PCI_VENDOR_ALTEON, 317 PCI_PRODUCT_ALTEON_BCM5701, 318 "Broadcom BCM5701 Gigabit Ethernet", 319 }, 320 { PCI_VENDOR_ALTIMA, 321 PCI_PRODUCT_ALTIMA_AC1000, 322 "Altima AC1000 Gigabit Ethernet", 323 }, 324 { PCI_VENDOR_ALTIMA, 325 PCI_PRODUCT_ALTIMA_AC1001, 326 "Altima AC1001 Gigabit Ethernet", 327 }, 328 { PCI_VENDOR_ALTIMA, 329 PCI_PRODUCT_ALTIMA_AC1003, 330 "Altima AC1003 Gigabit Ethernet", 331 }, 332 { PCI_VENDOR_ALTIMA, 333 PCI_PRODUCT_ALTIMA_AC9100, 334 "Altima AC9100 Gigabit Ethernet", 335 }, 336 { PCI_VENDOR_APPLE, 337 PCI_PRODUCT_APPLE_BCM5701, 338 "APPLE BCM5701 Gigabit Ethernet", 339 }, 340 { PCI_VENDOR_BROADCOM, 341 PCI_PRODUCT_BROADCOM_BCM5700, 342 "Broadcom BCM5700 Gigabit Ethernet", 343 }, 344 { PCI_VENDOR_BROADCOM, 345 PCI_PRODUCT_BROADCOM_BCM5701, 346 "Broadcom BCM5701 Gigabit Ethernet", 347 }, 348 { PCI_VENDOR_BROADCOM, 349 PCI_PRODUCT_BROADCOM_BCM5702, 350 "Broadcom BCM5702 Gigabit Ethernet", 351 }, 352 { PCI_VENDOR_BROADCOM, 353 PCI_PRODUCT_BROADCOM_BCM5702X, 354 "Broadcom BCM5702X Gigabit Ethernet" }, 355 { PCI_VENDOR_BROADCOM, 356 PCI_PRODUCT_BROADCOM_BCM5703, 357 "Broadcom BCM5703 Gigabit Ethernet", 358 }, 359 { PCI_VENDOR_BROADCOM, 360 PCI_PRODUCT_BROADCOM_BCM5703X, 361 "Broadcom BCM5703X Gigabit Ethernet", 362 }, 363 { PCI_VENDOR_BROADCOM, 364 PCI_PRODUCT_BROADCOM_BCM5703_ALT, 365 "Broadcom BCM5703 Gigabit Ethernet", 366 }, 367 { PCI_VENDOR_BROADCOM, 368 PCI_PRODUCT_BROADCOM_BCM5704C, 369 "Broadcom BCM5704C Dual Gigabit Ethernet", 370 }, 371 { PCI_VENDOR_BROADCOM, 372 PCI_PRODUCT_BROADCOM_BCM5704S, 373 "Broadcom BCM5704S Dual Gigabit Ethernet", 374 }, 375 { PCI_VENDOR_BROADCOM, 376 PCI_PRODUCT_BROADCOM_BCM5705, 377 "Broadcom BCM5705 Gigabit Ethernet", 378 }, 379 { PCI_VENDOR_BROADCOM, 380 PCI_PRODUCT_BROADCOM_BCM5705F, 381 "Broadcom BCM5705F Gigabit Ethernet", 382 }, 383 { PCI_VENDOR_BROADCOM, 384 PCI_PRODUCT_BROADCOM_BCM5705K, 385 "Broadcom BCM5705K Gigabit Ethernet", 386 }, 387 { PCI_VENDOR_BROADCOM, 388 PCI_PRODUCT_BROADCOM_BCM5705M, 389 "Broadcom BCM5705M Gigabit Ethernet", 390 }, 391 { PCI_VENDOR_BROADCOM, 392 PCI_PRODUCT_BROADCOM_BCM5705M_ALT, 393 "Broadcom BCM5705M Gigabit Ethernet", 394 }, 395 { PCI_VENDOR_BROADCOM, 396 PCI_PRODUCT_BROADCOM_BCM5714, 397 "Broadcom BCM5714 Gigabit Ethernet", 398 }, 399 { PCI_VENDOR_BROADCOM, 400 PCI_PRODUCT_BROADCOM_BCM5714S, 401 "Broadcom BCM5714S Gigabit Ethernet", 402 }, 403 { PCI_VENDOR_BROADCOM, 404 PCI_PRODUCT_BROADCOM_BCM5715, 405 "Broadcom BCM5715 Gigabit Ethernet", 406 }, 407 { PCI_VENDOR_BROADCOM, 408 PCI_PRODUCT_BROADCOM_BCM5715S, 409 "Broadcom BCM5715S Gigabit Ethernet", 410 }, 411 { PCI_VENDOR_BROADCOM, 412 PCI_PRODUCT_BROADCOM_BCM5717, 413 "Broadcom BCM5717 Gigabit Ethernet", 414 }, 415 { PCI_VENDOR_BROADCOM, 416 PCI_PRODUCT_BROADCOM_BCM5718, 417 "Broadcom BCM5718 Gigabit Ethernet", 418 }, 419 { PCI_VENDOR_BROADCOM, 420 PCI_PRODUCT_BROADCOM_BCM5719, 421 "Broadcom BCM5719 Gigabit Ethernet", 422 }, 423 { PCI_VENDOR_BROADCOM, 424 PCI_PRODUCT_BROADCOM_BCM5720, 425 "Broadcom BCM5720 Gigabit Ethernet", 426 }, 427 { PCI_VENDOR_BROADCOM, 428 PCI_PRODUCT_BROADCOM_BCM5721, 429 "Broadcom BCM5721 Gigabit Ethernet", 430 }, 431 { PCI_VENDOR_BROADCOM, 432 PCI_PRODUCT_BROADCOM_BCM5722, 433 "Broadcom BCM5722 Gigabit Ethernet", 434 }, 435 { PCI_VENDOR_BROADCOM, 436 PCI_PRODUCT_BROADCOM_BCM5723, 437 "Broadcom BCM5723 Gigabit Ethernet", 438 }, 439 { PCI_VENDOR_BROADCOM, 440 PCI_PRODUCT_BROADCOM_BCM5724, 441 "Broadcom BCM5724 Gigabit Ethernet", 442 }, 443 { PCI_VENDOR_BROADCOM, 444 PCI_PRODUCT_BROADCOM_BCM5750, 445 "Broadcom BCM5750 Gigabit Ethernet", 446 }, 447 { PCI_VENDOR_BROADCOM, 448 PCI_PRODUCT_BROADCOM_BCM5750M, 449 "Broadcom BCM5750M Gigabit Ethernet", 450 }, 451 { PCI_VENDOR_BROADCOM, 452 PCI_PRODUCT_BROADCOM_BCM5751, 453 "Broadcom BCM5751 Gigabit Ethernet", 454 }, 455 { PCI_VENDOR_BROADCOM, 456 PCI_PRODUCT_BROADCOM_BCM5751F, 457 "Broadcom BCM5751F Gigabit Ethernet", 458 }, 459 { PCI_VENDOR_BROADCOM, 460 PCI_PRODUCT_BROADCOM_BCM5751M, 461 "Broadcom BCM5751M Gigabit Ethernet", 462 }, 463 { PCI_VENDOR_BROADCOM, 464 PCI_PRODUCT_BROADCOM_BCM5752, 465 "Broadcom BCM5752 Gigabit Ethernet", 466 }, 467 { PCI_VENDOR_BROADCOM, 468 PCI_PRODUCT_BROADCOM_BCM5752M, 469 "Broadcom BCM5752M Gigabit Ethernet", 470 }, 471 { PCI_VENDOR_BROADCOM, 472 PCI_PRODUCT_BROADCOM_BCM5753, 473 "Broadcom BCM5753 Gigabit Ethernet", 474 }, 475 { PCI_VENDOR_BROADCOM, 476 PCI_PRODUCT_BROADCOM_BCM5753F, 477 "Broadcom BCM5753F Gigabit Ethernet", 478 }, 479 { PCI_VENDOR_BROADCOM, 480 PCI_PRODUCT_BROADCOM_BCM5753M, 481 "Broadcom BCM5753M Gigabit Ethernet", 482 }, 483 { PCI_VENDOR_BROADCOM, 484 PCI_PRODUCT_BROADCOM_BCM5754, 485 "Broadcom BCM5754 Gigabit Ethernet", 486 }, 487 { PCI_VENDOR_BROADCOM, 488 PCI_PRODUCT_BROADCOM_BCM5754M, 489 "Broadcom BCM5754M Gigabit Ethernet", 490 }, 491 { PCI_VENDOR_BROADCOM, 492 PCI_PRODUCT_BROADCOM_BCM5755, 493 "Broadcom BCM5755 Gigabit Ethernet", 494 }, 495 { PCI_VENDOR_BROADCOM, 496 PCI_PRODUCT_BROADCOM_BCM5755M, 497 "Broadcom BCM5755M Gigabit Ethernet", 498 }, 499 { PCI_VENDOR_BROADCOM, 500 PCI_PRODUCT_BROADCOM_BCM5756, 501 "Broadcom BCM5756 Gigabit Ethernet", 502 }, 503 { PCI_VENDOR_BROADCOM, 504 PCI_PRODUCT_BROADCOM_BCM5761, 505 "Broadcom BCM5761 Gigabit Ethernet", 506 }, 507 { PCI_VENDOR_BROADCOM, 508 PCI_PRODUCT_BROADCOM_BCM5761E, 509 "Broadcom BCM5761E Gigabit Ethernet", 510 }, 511 { PCI_VENDOR_BROADCOM, 512 PCI_PRODUCT_BROADCOM_BCM5761S, 513 "Broadcom BCM5761S Gigabit Ethernet", 514 }, 515 { PCI_VENDOR_BROADCOM, 516 PCI_PRODUCT_BROADCOM_BCM5761SE, 517 "Broadcom BCM5761SE Gigabit Ethernet", 518 }, 519 { PCI_VENDOR_BROADCOM, 520 PCI_PRODUCT_BROADCOM_BCM5764, 521 "Broadcom BCM5764 Gigabit Ethernet", 522 }, 523 { PCI_VENDOR_BROADCOM, 524 PCI_PRODUCT_BROADCOM_BCM5780, 525 "Broadcom BCM5780 Gigabit Ethernet", 526 }, 527 { PCI_VENDOR_BROADCOM, 528 PCI_PRODUCT_BROADCOM_BCM5780S, 529 "Broadcom BCM5780S Gigabit Ethernet", 530 }, 531 { PCI_VENDOR_BROADCOM, 532 PCI_PRODUCT_BROADCOM_BCM5781, 533 "Broadcom BCM5781 Gigabit Ethernet", 534 }, 535 { PCI_VENDOR_BROADCOM, 536 PCI_PRODUCT_BROADCOM_BCM5782, 537 "Broadcom BCM5782 Gigabit Ethernet", 538 }, 539 { PCI_VENDOR_BROADCOM, 540 PCI_PRODUCT_BROADCOM_BCM5784M, 541 "BCM5784M NetLink 1000baseT Ethernet", 542 }, 543 { PCI_VENDOR_BROADCOM, 544 PCI_PRODUCT_BROADCOM_BCM5785F, 545 "BCM5785F NetLink 10/100 Ethernet", 546 }, 547 { PCI_VENDOR_BROADCOM, 548 PCI_PRODUCT_BROADCOM_BCM5785G, 549 "BCM5785G NetLink 1000baseT Ethernet", 550 }, 551 { PCI_VENDOR_BROADCOM, 552 PCI_PRODUCT_BROADCOM_BCM5786, 553 "Broadcom BCM5786 Gigabit Ethernet", 554 }, 555 { PCI_VENDOR_BROADCOM, 556 PCI_PRODUCT_BROADCOM_BCM5787, 557 "Broadcom BCM5787 Gigabit Ethernet", 558 }, 559 { PCI_VENDOR_BROADCOM, 560 PCI_PRODUCT_BROADCOM_BCM5787F, 561 "Broadcom BCM5787F 10/100 Ethernet", 562 }, 563 { PCI_VENDOR_BROADCOM, 564 PCI_PRODUCT_BROADCOM_BCM5787M, 565 "Broadcom BCM5787M Gigabit Ethernet", 566 }, 567 { PCI_VENDOR_BROADCOM, 568 PCI_PRODUCT_BROADCOM_BCM5788, 569 "Broadcom BCM5788 Gigabit Ethernet", 570 }, 571 { PCI_VENDOR_BROADCOM, 572 PCI_PRODUCT_BROADCOM_BCM5789, 573 "Broadcom BCM5789 Gigabit Ethernet", 574 }, 575 { PCI_VENDOR_BROADCOM, 576 PCI_PRODUCT_BROADCOM_BCM5901, 577 "Broadcom BCM5901 Fast Ethernet", 578 }, 579 { PCI_VENDOR_BROADCOM, 580 PCI_PRODUCT_BROADCOM_BCM5901A2, 581 "Broadcom BCM5901A2 Fast Ethernet", 582 }, 583 { PCI_VENDOR_BROADCOM, 584 PCI_PRODUCT_BROADCOM_BCM5903M, 585 "Broadcom BCM5903M Fast Ethernet", 586 }, 587 { PCI_VENDOR_BROADCOM, 588 PCI_PRODUCT_BROADCOM_BCM5906, 589 "Broadcom BCM5906 Fast Ethernet", 590 }, 591 { PCI_VENDOR_BROADCOM, 592 PCI_PRODUCT_BROADCOM_BCM5906M, 593 "Broadcom BCM5906M Fast Ethernet", 594 }, 595 { PCI_VENDOR_BROADCOM, 596 PCI_PRODUCT_BROADCOM_BCM57760, 597 "Broadcom BCM57760 Fast Ethernet", 598 }, 599 { PCI_VENDOR_BROADCOM, 600 PCI_PRODUCT_BROADCOM_BCM57761, 601 "Broadcom BCM57761 Fast Ethernet", 602 }, 603 { PCI_VENDOR_BROADCOM, 604 PCI_PRODUCT_BROADCOM_BCM57762, 605 "Broadcom BCM57762 Gigabit Ethernet", 606 }, 607 { PCI_VENDOR_BROADCOM, 608 PCI_PRODUCT_BROADCOM_BCM57765, 609 "Broadcom BCM57765 Fast Ethernet", 610 }, 611 { PCI_VENDOR_BROADCOM, 612 PCI_PRODUCT_BROADCOM_BCM57766, 613 "Broadcom BCM57766 Fast Ethernet", 614 }, 615 { PCI_VENDOR_BROADCOM, 616 PCI_PRODUCT_BROADCOM_BCM57780, 617 "Broadcom BCM57780 Fast Ethernet", 618 }, 619 { PCI_VENDOR_BROADCOM, 620 PCI_PRODUCT_BROADCOM_BCM57781, 621 "Broadcom BCM57781 Fast Ethernet", 622 }, 623 { PCI_VENDOR_BROADCOM, 624 PCI_PRODUCT_BROADCOM_BCM57782, 625 "Broadcom BCM57782 Fast Ethernet", 626 }, 627 { PCI_VENDOR_BROADCOM, 628 PCI_PRODUCT_BROADCOM_BCM57785, 629 "Broadcom BCM57785 Fast Ethernet", 630 }, 631 { PCI_VENDOR_BROADCOM, 632 PCI_PRODUCT_BROADCOM_BCM57786, 633 "Broadcom BCM57786 Fast Ethernet", 634 }, 635 { PCI_VENDOR_BROADCOM, 636 PCI_PRODUCT_BROADCOM_BCM57788, 637 "Broadcom BCM57788 Fast Ethernet", 638 }, 639 { PCI_VENDOR_BROADCOM, 640 PCI_PRODUCT_BROADCOM_BCM57790, 641 "Broadcom BCM57790 Fast Ethernet", 642 }, 643 { PCI_VENDOR_BROADCOM, 644 PCI_PRODUCT_BROADCOM_BCM57791, 645 "Broadcom BCM57791 Fast Ethernet", 646 }, 647 { PCI_VENDOR_BROADCOM, 648 PCI_PRODUCT_BROADCOM_BCM57795, 649 "Broadcom BCM57795 Fast Ethernet", 650 }, 651 { PCI_VENDOR_SCHNEIDERKOCH, 652 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1, 653 "SysKonnect SK-9Dx1 Gigabit Ethernet", 654 }, 655 { PCI_VENDOR_3COM, 656 PCI_PRODUCT_3COM_3C996, 657 "3Com 3c996 Gigabit Ethernet", 658 }, 659 { PCI_VENDOR_FUJITSU4, 660 PCI_PRODUCT_FUJITSU4_PW008GE4, 661 "Fujitsu PW008GE4 Gigabit Ethernet", 662 }, 663 { PCI_VENDOR_FUJITSU4, 664 PCI_PRODUCT_FUJITSU4_PW008GE5, 665 "Fujitsu PW008GE5 Gigabit Ethernet", 666 }, 667 { PCI_VENDOR_FUJITSU4, 668 PCI_PRODUCT_FUJITSU4_PP250_450_LAN, 669 "Fujitsu Primepower 250/450 Gigabit Ethernet", 670 }, 671 { 0, 672 0, 673 NULL }, 674 }; 675 676 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_JUMBO_CAPABLE) 677 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_5700_FAMILY) 678 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_5705_PLUS) 679 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_5714_FAMILY) 680 #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_575X_PLUS) 681 #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_5755_PLUS) 682 #define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_5717_PLUS) 683 #define BGE_IS_57765_PLUS(sc) ((sc)->bge_flags & BGE_57765_PLUS) 684 685 static const struct bge_revision { 686 uint32_t br_chipid; 687 const char *br_name; 688 } bge_revisions[] = { 689 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, 690 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, 691 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, 692 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, 693 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, 694 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, 695 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, 696 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, 697 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, 698 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, 699 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, 700 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, 701 { BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" }, 702 { BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" }, 703 { BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" }, 704 { BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" }, 705 { BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" }, 706 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, 707 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, 708 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, 709 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, 710 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, 711 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, 712 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, 713 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, 714 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, 715 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, 716 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, 717 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, 718 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, 719 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, 720 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, 721 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, 722 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, 723 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, 724 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, 725 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, 726 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, 727 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, 728 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, 729 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, 730 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, 731 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" }, 732 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" }, 733 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" }, 734 { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" }, 735 { BGE_CHIPID_BCM5720_A0, "BCM5720 A0" }, 736 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" }, 737 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" }, 738 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" }, 739 { BGE_CHIPID_BCM5755_C0, "BCM5755 C0" }, 740 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" }, 741 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" }, 742 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" }, 743 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" }, 744 /* 5754 and 5787 share the same ASIC ID */ 745 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" }, 746 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" }, 747 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" }, 748 { BGE_CHIPID_BCM5906_A0, "BCM5906 A0" }, 749 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" }, 750 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" }, 751 { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" }, 752 { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" }, 753 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" }, 754 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" }, 755 756 { 0, NULL } 757 }; 758 759 /* 760 * Some defaults for major revisions, so that newer steppings 761 * that we don't know about have a shot at working. 762 */ 763 static const struct bge_revision bge_majorrevs[] = { 764 { BGE_ASICREV_BCM5700, "unknown BCM5700" }, 765 { BGE_ASICREV_BCM5701, "unknown BCM5701" }, 766 { BGE_ASICREV_BCM5703, "unknown BCM5703" }, 767 { BGE_ASICREV_BCM5704, "unknown BCM5704" }, 768 { BGE_ASICREV_BCM5705, "unknown BCM5705" }, 769 { BGE_ASICREV_BCM5750, "unknown BCM5750" }, 770 { BGE_ASICREV_BCM5714, "unknown BCM5714" }, 771 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, 772 { BGE_ASICREV_BCM5752, "unknown BCM5752" }, 773 { BGE_ASICREV_BCM5780, "unknown BCM5780" }, 774 { BGE_ASICREV_BCM5755, "unknown BCM5755" }, 775 { BGE_ASICREV_BCM5761, "unknown BCM5761" }, 776 { BGE_ASICREV_BCM5784, "unknown BCM5784" }, 777 { BGE_ASICREV_BCM5785, "unknown BCM5785" }, 778 /* 5754 and 5787 share the same ASIC ID */ 779 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" }, 780 { BGE_ASICREV_BCM5906, "unknown BCM5906" }, 781 { BGE_ASICREV_BCM57765, "unknown BCM57765" }, 782 { BGE_ASICREV_BCM57766, "unknown BCM57766" }, 783 { BGE_ASICREV_BCM57780, "unknown BCM57780" }, 784 { BGE_ASICREV_BCM5717, "unknown BCM5717" }, 785 { BGE_ASICREV_BCM5719, "unknown BCM5719" }, 786 { BGE_ASICREV_BCM5720, "unknown BCM5720" }, 787 788 { 0, NULL } 789 }; 790 791 static int bge_allow_asf = 1; 792 793 CFATTACH_DECL3_NEW(bge, sizeof(struct bge_softc), 794 bge_probe, bge_attach, bge_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 795 796 static uint32_t 797 bge_readmem_ind(struct bge_softc *sc, int off) 798 { 799 pcireg_t val; 800 801 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 && 802 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) 803 return 0; 804 805 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off); 806 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA); 807 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0); 808 return val; 809 } 810 811 static void 812 bge_writemem_ind(struct bge_softc *sc, int off, int val) 813 { 814 815 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off); 816 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA, val); 817 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0); 818 } 819 820 /* 821 * PCI Express only 822 */ 823 static void 824 bge_set_max_readrq(struct bge_softc *sc) 825 { 826 pcireg_t val; 827 828 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap 829 + PCI_PCIE_DCSR); 830 val &= ~PCI_PCIE_DCSR_MAX_READ_REQ; 831 switch (sc->bge_expmrq) { 832 case 2048: 833 val |= BGE_PCIE_DEVCTL_MAX_READRQ_2048; 834 break; 835 case 4096: 836 val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096; 837 break; 838 default: 839 panic("incorrect expmrq value(%d)", sc->bge_expmrq); 840 break; 841 } 842 pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap 843 + PCI_PCIE_DCSR, val); 844 } 845 846 #ifdef notdef 847 static uint32_t 848 bge_readreg_ind(struct bge_softc *sc, int off) 849 { 850 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off); 851 return (pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA)); 852 } 853 #endif 854 855 static void 856 bge_writereg_ind(struct bge_softc *sc, int off, int val) 857 { 858 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off); 859 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA, val); 860 } 861 862 static void 863 bge_writemem_direct(struct bge_softc *sc, int off, int val) 864 { 865 CSR_WRITE_4(sc, off, val); 866 } 867 868 static void 869 bge_writembx(struct bge_softc *sc, int off, int val) 870 { 871 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 872 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; 873 874 CSR_WRITE_4(sc, off, val); 875 } 876 877 static void 878 bge_writembx_flush(struct bge_softc *sc, int off, int val) 879 { 880 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 881 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; 882 883 CSR_WRITE_4_FLUSH(sc, off, val); 884 } 885 886 /* 887 * Clear all stale locks and select the lock for this driver instance. 888 */ 889 void 890 bge_ape_lock_init(struct bge_softc *sc) 891 { 892 struct pci_attach_args *pa = &(sc->bge_pa); 893 uint32_t bit, regbase; 894 int i; 895 896 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 897 regbase = BGE_APE_LOCK_GRANT; 898 else 899 regbase = BGE_APE_PER_LOCK_GRANT; 900 901 /* Clear any stale locks. */ 902 for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) { 903 switch (i) { 904 case BGE_APE_LOCK_PHY0: 905 case BGE_APE_LOCK_PHY1: 906 case BGE_APE_LOCK_PHY2: 907 case BGE_APE_LOCK_PHY3: 908 bit = BGE_APE_LOCK_GRANT_DRIVER0; 909 break; 910 default: 911 if (pa->pa_function == 0) 912 bit = BGE_APE_LOCK_GRANT_DRIVER0; 913 else 914 bit = (1 << pa->pa_function); 915 } 916 APE_WRITE_4(sc, regbase + 4 * i, bit); 917 } 918 919 /* Select the PHY lock based on the device's function number. */ 920 switch (pa->pa_function) { 921 case 0: 922 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0; 923 break; 924 case 1: 925 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1; 926 break; 927 case 2: 928 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2; 929 break; 930 case 3: 931 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3; 932 break; 933 default: 934 printf("%s: PHY lock not supported on function\n", 935 device_xname(sc->bge_dev)); 936 break; 937 } 938 } 939 940 /* 941 * Check for APE firmware, set flags, and print version info. 942 */ 943 void 944 bge_ape_read_fw_ver(struct bge_softc *sc) 945 { 946 const char *fwtype; 947 uint32_t apedata, features; 948 949 /* Check for a valid APE signature in shared memory. */ 950 apedata = APE_READ_4(sc, BGE_APE_SEG_SIG); 951 if (apedata != BGE_APE_SEG_SIG_MAGIC) { 952 sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE; 953 return; 954 } 955 956 /* Check if APE firmware is running. */ 957 apedata = APE_READ_4(sc, BGE_APE_FW_STATUS); 958 if ((apedata & BGE_APE_FW_STATUS_READY) == 0) { 959 printf("%s: APE signature found but FW status not ready! " 960 "0x%08x\n", device_xname(sc->bge_dev), apedata); 961 return; 962 } 963 964 sc->bge_mfw_flags |= BGE_MFW_ON_APE; 965 966 /* Fetch the APE firwmare type and version. */ 967 apedata = APE_READ_4(sc, BGE_APE_FW_VERSION); 968 features = APE_READ_4(sc, BGE_APE_FW_FEATURES); 969 if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) { 970 sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI; 971 fwtype = "NCSI"; 972 } else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) { 973 sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH; 974 fwtype = "DASH"; 975 } else 976 fwtype = "UNKN"; 977 978 /* Print the APE firmware version. */ 979 printf(", APE firmware %s %d.%d.%d.%d", fwtype, 980 (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT, 981 (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT, 982 (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT, 983 (apedata & BGE_APE_FW_VERSION_BLDMSK)); 984 } 985 986 int 987 bge_ape_lock(struct bge_softc *sc, int locknum) 988 { 989 struct pci_attach_args *pa = &(sc->bge_pa); 990 uint32_t bit, gnt, req, status; 991 int i, off; 992 993 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 994 return (0); 995 996 /* Lock request/grant registers have different bases. */ 997 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) { 998 req = BGE_APE_LOCK_REQ; 999 gnt = BGE_APE_LOCK_GRANT; 1000 } else { 1001 req = BGE_APE_PER_LOCK_REQ; 1002 gnt = BGE_APE_PER_LOCK_GRANT; 1003 } 1004 1005 off = 4 * locknum; 1006 1007 switch (locknum) { 1008 case BGE_APE_LOCK_GPIO: 1009 /* Lock required when using GPIO. */ 1010 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 1011 return (0); 1012 if (pa->pa_function == 0) 1013 bit = BGE_APE_LOCK_REQ_DRIVER0; 1014 else 1015 bit = (1 << pa->pa_function); 1016 break; 1017 case BGE_APE_LOCK_GRC: 1018 /* Lock required to reset the device. */ 1019 if (pa->pa_function == 0) 1020 bit = BGE_APE_LOCK_REQ_DRIVER0; 1021 else 1022 bit = (1 << pa->pa_function); 1023 break; 1024 case BGE_APE_LOCK_MEM: 1025 /* Lock required when accessing certain APE memory. */ 1026 if (pa->pa_function == 0) 1027 bit = BGE_APE_LOCK_REQ_DRIVER0; 1028 else 1029 bit = (1 << pa->pa_function); 1030 break; 1031 case BGE_APE_LOCK_PHY0: 1032 case BGE_APE_LOCK_PHY1: 1033 case BGE_APE_LOCK_PHY2: 1034 case BGE_APE_LOCK_PHY3: 1035 /* Lock required when accessing PHYs. */ 1036 bit = BGE_APE_LOCK_REQ_DRIVER0; 1037 break; 1038 default: 1039 return (EINVAL); 1040 } 1041 1042 /* Request a lock. */ 1043 APE_WRITE_4_FLUSH(sc, req + off, bit); 1044 1045 /* Wait up to 1 second to acquire lock. */ 1046 for (i = 0; i < 20000; i++) { 1047 status = APE_READ_4(sc, gnt + off); 1048 if (status == bit) 1049 break; 1050 DELAY(50); 1051 } 1052 1053 /* Handle any errors. */ 1054 if (status != bit) { 1055 printf("%s: APE lock %d request failed! " 1056 "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n", 1057 device_xname(sc->bge_dev), 1058 locknum, req + off, bit & 0xFFFF, gnt + off, 1059 status & 0xFFFF); 1060 /* Revoke the lock request. */ 1061 APE_WRITE_4(sc, gnt + off, bit); 1062 return (EBUSY); 1063 } 1064 1065 return (0); 1066 } 1067 1068 void 1069 bge_ape_unlock(struct bge_softc *sc, int locknum) 1070 { 1071 struct pci_attach_args *pa = &(sc->bge_pa); 1072 uint32_t bit, gnt; 1073 int off; 1074 1075 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 1076 return; 1077 1078 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 1079 gnt = BGE_APE_LOCK_GRANT; 1080 else 1081 gnt = BGE_APE_PER_LOCK_GRANT; 1082 1083 off = 4 * locknum; 1084 1085 switch (locknum) { 1086 case BGE_APE_LOCK_GPIO: 1087 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 1088 return; 1089 if (pa->pa_function == 0) 1090 bit = BGE_APE_LOCK_GRANT_DRIVER0; 1091 else 1092 bit = (1 << pa->pa_function); 1093 break; 1094 case BGE_APE_LOCK_GRC: 1095 if (pa->pa_function == 0) 1096 bit = BGE_APE_LOCK_GRANT_DRIVER0; 1097 else 1098 bit = (1 << pa->pa_function); 1099 break; 1100 case BGE_APE_LOCK_MEM: 1101 if (pa->pa_function == 0) 1102 bit = BGE_APE_LOCK_GRANT_DRIVER0; 1103 else 1104 bit = (1 << pa->pa_function); 1105 break; 1106 case BGE_APE_LOCK_PHY0: 1107 case BGE_APE_LOCK_PHY1: 1108 case BGE_APE_LOCK_PHY2: 1109 case BGE_APE_LOCK_PHY3: 1110 bit = BGE_APE_LOCK_GRANT_DRIVER0; 1111 break; 1112 default: 1113 return; 1114 } 1115 1116 /* Write and flush for consecutive bge_ape_lock() */ 1117 APE_WRITE_4_FLUSH(sc, gnt + off, bit); 1118 } 1119 1120 /* 1121 * Send an event to the APE firmware. 1122 */ 1123 void 1124 bge_ape_send_event(struct bge_softc *sc, uint32_t event) 1125 { 1126 uint32_t apedata; 1127 int i; 1128 1129 /* NCSI does not support APE events. */ 1130 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 1131 return; 1132 1133 /* Wait up to 1ms for APE to service previous event. */ 1134 for (i = 10; i > 0; i--) { 1135 if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0) 1136 break; 1137 apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS); 1138 if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) { 1139 APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event | 1140 BGE_APE_EVENT_STATUS_EVENT_PENDING); 1141 bge_ape_unlock(sc, BGE_APE_LOCK_MEM); 1142 APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1); 1143 break; 1144 } 1145 bge_ape_unlock(sc, BGE_APE_LOCK_MEM); 1146 DELAY(100); 1147 } 1148 if (i == 0) { 1149 printf("%s: APE event 0x%08x send timed out\n", 1150 device_xname(sc->bge_dev), event); 1151 } 1152 } 1153 1154 void 1155 bge_ape_driver_state_change(struct bge_softc *sc, int kind) 1156 { 1157 uint32_t apedata, event; 1158 1159 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) 1160 return; 1161 1162 switch (kind) { 1163 case BGE_RESET_START: 1164 /* If this is the first load, clear the load counter. */ 1165 apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG); 1166 if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC) 1167 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0); 1168 else { 1169 apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT); 1170 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata); 1171 } 1172 APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG, 1173 BGE_APE_HOST_SEG_SIG_MAGIC); 1174 APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN, 1175 BGE_APE_HOST_SEG_LEN_MAGIC); 1176 1177 /* Add some version info if bge(4) supports it. */ 1178 APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID, 1179 BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0)); 1180 APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR, 1181 BGE_APE_HOST_BEHAV_NO_PHYLOCK); 1182 APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS, 1183 BGE_APE_HOST_HEARTBEAT_INT_DISABLE); 1184 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, 1185 BGE_APE_HOST_DRVR_STATE_START); 1186 event = BGE_APE_EVENT_STATUS_STATE_START; 1187 break; 1188 case BGE_RESET_SHUTDOWN: 1189 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, 1190 BGE_APE_HOST_DRVR_STATE_UNLOAD); 1191 event = BGE_APE_EVENT_STATUS_STATE_UNLOAD; 1192 break; 1193 case BGE_RESET_SUSPEND: 1194 event = BGE_APE_EVENT_STATUS_STATE_SUSPEND; 1195 break; 1196 default: 1197 return; 1198 } 1199 1200 bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT | 1201 BGE_APE_EVENT_STATUS_STATE_CHNGE); 1202 } 1203 1204 static uint8_t 1205 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 1206 { 1207 uint32_t access, byte = 0; 1208 int i; 1209 1210 /* Lock. */ 1211 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 1212 for (i = 0; i < 8000; i++) { 1213 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) 1214 break; 1215 DELAY(20); 1216 } 1217 if (i == 8000) 1218 return 1; 1219 1220 /* Enable access. */ 1221 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); 1222 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); 1223 1224 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); 1225 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); 1226 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 1227 DELAY(10); 1228 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { 1229 DELAY(10); 1230 break; 1231 } 1232 } 1233 1234 if (i == BGE_TIMEOUT * 10) { 1235 aprint_error_dev(sc->bge_dev, "nvram read timed out\n"); 1236 return 1; 1237 } 1238 1239 /* Get result. */ 1240 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); 1241 1242 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; 1243 1244 /* Disable access. */ 1245 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); 1246 1247 /* Unlock. */ 1248 CSR_WRITE_4_FLUSH(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); 1249 1250 return 0; 1251 } 1252 1253 /* 1254 * Read a sequence of bytes from NVRAM. 1255 */ 1256 static int 1257 bge_read_nvram(struct bge_softc *sc, uint8_t *dest, int off, int cnt) 1258 { 1259 int error = 0, i; 1260 uint8_t byte = 0; 1261 1262 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906) 1263 return 1; 1264 1265 for (i = 0; i < cnt; i++) { 1266 error = bge_nvram_getbyte(sc, off + i, &byte); 1267 if (error) 1268 break; 1269 *(dest + i) = byte; 1270 } 1271 1272 return (error ? 1 : 0); 1273 } 1274 1275 /* 1276 * Read a byte of data stored in the EEPROM at address 'addr.' The 1277 * BCM570x supports both the traditional bitbang interface and an 1278 * auto access interface for reading the EEPROM. We use the auto 1279 * access method. 1280 */ 1281 static uint8_t 1282 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 1283 { 1284 int i; 1285 uint32_t byte = 0; 1286 1287 /* 1288 * Enable use of auto EEPROM access so we can avoid 1289 * having to use the bitbang method. 1290 */ 1291 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 1292 1293 /* Reset the EEPROM, load the clock period. */ 1294 CSR_WRITE_4(sc, BGE_EE_ADDR, 1295 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 1296 DELAY(20); 1297 1298 /* Issue the read EEPROM command. */ 1299 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 1300 1301 /* Wait for completion */ 1302 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 1303 DELAY(10); 1304 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 1305 break; 1306 } 1307 1308 if (i == BGE_TIMEOUT * 10) { 1309 aprint_error_dev(sc->bge_dev, "eeprom read timed out\n"); 1310 return 1; 1311 } 1312 1313 /* Get result. */ 1314 byte = CSR_READ_4(sc, BGE_EE_DATA); 1315 1316 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 1317 1318 return 0; 1319 } 1320 1321 /* 1322 * Read a sequence of bytes from the EEPROM. 1323 */ 1324 static int 1325 bge_read_eeprom(struct bge_softc *sc, void *destv, int off, int cnt) 1326 { 1327 int error = 0, i; 1328 uint8_t byte = 0; 1329 char *dest = destv; 1330 1331 for (i = 0; i < cnt; i++) { 1332 error = bge_eeprom_getbyte(sc, off + i, &byte); 1333 if (error) 1334 break; 1335 *(dest + i) = byte; 1336 } 1337 1338 return (error ? 1 : 0); 1339 } 1340 1341 static int 1342 bge_miibus_readreg(device_t dev, int phy, int reg) 1343 { 1344 struct bge_softc *sc = device_private(dev); 1345 uint32_t val; 1346 uint32_t autopoll; 1347 int i; 1348 1349 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) 1350 return 0; 1351 1352 /* Reading with autopolling on may trigger PCI errors */ 1353 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 1354 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1355 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL); 1356 BGE_CLRBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1357 DELAY(80); 1358 } 1359 1360 CSR_WRITE_4_FLUSH(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | 1361 BGE_MIPHY(phy) | BGE_MIREG(reg)); 1362 1363 for (i = 0; i < BGE_TIMEOUT; i++) { 1364 delay(10); 1365 val = CSR_READ_4(sc, BGE_MI_COMM); 1366 if (!(val & BGE_MICOMM_BUSY)) { 1367 DELAY(5); 1368 val = CSR_READ_4(sc, BGE_MI_COMM); 1369 break; 1370 } 1371 } 1372 1373 if (i == BGE_TIMEOUT) { 1374 aprint_error_dev(sc->bge_dev, "PHY read timed out\n"); 1375 val = 0; 1376 goto done; 1377 } 1378 1379 done: 1380 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1381 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 1382 BGE_SETBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1383 DELAY(80); 1384 } 1385 1386 bge_ape_unlock(sc, sc->bge_phy_ape_lock); 1387 1388 if (val & BGE_MICOMM_READFAIL) 1389 return 0; 1390 1391 return (val & 0xFFFF); 1392 } 1393 1394 static void 1395 bge_miibus_writereg(device_t dev, int phy, int reg, int val) 1396 { 1397 struct bge_softc *sc = device_private(dev); 1398 uint32_t autopoll; 1399 int i; 1400 1401 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) 1402 return; 1403 1404 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 && 1405 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL)) 1406 return; 1407 1408 /* Reading with autopolling on may trigger PCI errors */ 1409 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 1410 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1411 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL); 1412 BGE_CLRBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1413 DELAY(80); 1414 } 1415 1416 CSR_WRITE_4_FLUSH(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | 1417 BGE_MIPHY(phy) | BGE_MIREG(reg) | val); 1418 1419 for (i = 0; i < BGE_TIMEOUT; i++) { 1420 delay(10); 1421 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { 1422 delay(5); 1423 CSR_READ_4(sc, BGE_MI_COMM); 1424 break; 1425 } 1426 } 1427 1428 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1429 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 1430 BGE_SETBIT_FLUSH(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1431 delay(80); 1432 } 1433 1434 bge_ape_unlock(sc, sc->bge_phy_ape_lock); 1435 1436 if (i == BGE_TIMEOUT) 1437 aprint_error_dev(sc->bge_dev, "PHY read timed out\n"); 1438 } 1439 1440 static void 1441 bge_miibus_statchg(struct ifnet *ifp) 1442 { 1443 struct bge_softc *sc = ifp->if_softc; 1444 struct mii_data *mii = &sc->bge_mii; 1445 uint32_t mac_mode, rx_mode, tx_mode; 1446 1447 /* 1448 * Get flow control negotiation result. 1449 */ 1450 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1451 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) { 1452 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1453 mii->mii_media_active &= ~IFM_ETH_FMASK; 1454 } 1455 1456 /* Set the port mode (MII/GMII) to match the link speed. */ 1457 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & 1458 ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX); 1459 tx_mode = CSR_READ_4(sc, BGE_TX_MODE); 1460 rx_mode = CSR_READ_4(sc, BGE_RX_MODE); 1461 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 1462 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 1463 mac_mode |= BGE_PORTMODE_GMII; 1464 else 1465 mac_mode |= BGE_PORTMODE_MII; 1466 1467 tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE; 1468 rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE; 1469 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 1470 if (sc->bge_flowflags & IFM_ETH_TXPAUSE) 1471 tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE; 1472 if (sc->bge_flowflags & IFM_ETH_RXPAUSE) 1473 rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE; 1474 } else 1475 mac_mode |= BGE_MACMODE_HALF_DUPLEX; 1476 1477 CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, mac_mode); 1478 DELAY(40); 1479 CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode); 1480 CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode); 1481 } 1482 1483 /* 1484 * Update rx threshold levels to values in a particular slot 1485 * of the interrupt-mitigation table bge_rx_threshes. 1486 */ 1487 static void 1488 bge_set_thresh(struct ifnet *ifp, int lvl) 1489 { 1490 struct bge_softc *sc = ifp->if_softc; 1491 int s; 1492 1493 /* For now, just save the new Rx-intr thresholds and record 1494 * that a threshold update is pending. Updating the hardware 1495 * registers here (even at splhigh()) is observed to 1496 * occasionaly cause glitches where Rx-interrupts are not 1497 * honoured for up to 10 seconds. jonathan@NetBSD.org, 2003-04-05 1498 */ 1499 s = splnet(); 1500 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks; 1501 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds; 1502 sc->bge_pending_rxintr_change = 1; 1503 splx(s); 1504 } 1505 1506 1507 /* 1508 * Update Rx thresholds of all bge devices 1509 */ 1510 static void 1511 bge_update_all_threshes(int lvl) 1512 { 1513 struct ifnet *ifp; 1514 const char * const namebuf = "bge"; 1515 int namelen; 1516 1517 if (lvl < 0) 1518 lvl = 0; 1519 else if (lvl >= NBGE_RX_THRESH) 1520 lvl = NBGE_RX_THRESH - 1; 1521 1522 namelen = strlen(namebuf); 1523 /* 1524 * Now search all the interfaces for this name/number 1525 */ 1526 IFNET_FOREACH(ifp) { 1527 if (strncmp(ifp->if_xname, namebuf, namelen) != 0) 1528 continue; 1529 /* We got a match: update if doing auto-threshold-tuning */ 1530 if (bge_auto_thresh) 1531 bge_set_thresh(ifp, lvl); 1532 } 1533 } 1534 1535 /* 1536 * Handle events that have triggered interrupts. 1537 */ 1538 static void 1539 bge_handle_events(struct bge_softc *sc) 1540 { 1541 1542 return; 1543 } 1544 1545 /* 1546 * Memory management for jumbo frames. 1547 */ 1548 1549 static int 1550 bge_alloc_jumbo_mem(struct bge_softc *sc) 1551 { 1552 char *ptr, *kva; 1553 bus_dma_segment_t seg; 1554 int i, rseg, state, error; 1555 struct bge_jpool_entry *entry; 1556 1557 state = error = 0; 1558 1559 /* Grab a big chunk o' storage. */ 1560 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0, 1561 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 1562 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n"); 1563 return ENOBUFS; 1564 } 1565 1566 state = 1; 1567 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, (void **)&kva, 1568 BUS_DMA_NOWAIT)) { 1569 aprint_error_dev(sc->bge_dev, 1570 "can't map DMA buffers (%d bytes)\n", (int)BGE_JMEM); 1571 error = ENOBUFS; 1572 goto out; 1573 } 1574 1575 state = 2; 1576 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0, 1577 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) { 1578 aprint_error_dev(sc->bge_dev, "can't create DMA map\n"); 1579 error = ENOBUFS; 1580 goto out; 1581 } 1582 1583 state = 3; 1584 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 1585 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) { 1586 aprint_error_dev(sc->bge_dev, "can't load DMA map\n"); 1587 error = ENOBUFS; 1588 goto out; 1589 } 1590 1591 state = 4; 1592 sc->bge_cdata.bge_jumbo_buf = (void *)kva; 1593 DPRINTFN(1,("bge_jumbo_buf = %p\n", sc->bge_cdata.bge_jumbo_buf)); 1594 1595 SLIST_INIT(&sc->bge_jfree_listhead); 1596 SLIST_INIT(&sc->bge_jinuse_listhead); 1597 1598 /* 1599 * Now divide it up into 9K pieces and save the addresses 1600 * in an array. 1601 */ 1602 ptr = sc->bge_cdata.bge_jumbo_buf; 1603 for (i = 0; i < BGE_JSLOTS; i++) { 1604 sc->bge_cdata.bge_jslots[i] = ptr; 1605 ptr += BGE_JLEN; 1606 entry = malloc(sizeof(struct bge_jpool_entry), 1607 M_DEVBUF, M_NOWAIT); 1608 if (entry == NULL) { 1609 aprint_error_dev(sc->bge_dev, 1610 "no memory for jumbo buffer queue!\n"); 1611 error = ENOBUFS; 1612 goto out; 1613 } 1614 entry->slot = i; 1615 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 1616 entry, jpool_entries); 1617 } 1618 out: 1619 if (error != 0) { 1620 switch (state) { 1621 case 4: 1622 bus_dmamap_unload(sc->bge_dmatag, 1623 sc->bge_cdata.bge_rx_jumbo_map); 1624 case 3: 1625 bus_dmamap_destroy(sc->bge_dmatag, 1626 sc->bge_cdata.bge_rx_jumbo_map); 1627 case 2: 1628 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 1629 case 1: 1630 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 1631 break; 1632 default: 1633 break; 1634 } 1635 } 1636 1637 return error; 1638 } 1639 1640 /* 1641 * Allocate a jumbo buffer. 1642 */ 1643 static void * 1644 bge_jalloc(struct bge_softc *sc) 1645 { 1646 struct bge_jpool_entry *entry; 1647 1648 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 1649 1650 if (entry == NULL) { 1651 aprint_error_dev(sc->bge_dev, "no free jumbo buffers\n"); 1652 return NULL; 1653 } 1654 1655 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 1656 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 1657 return (sc->bge_cdata.bge_jslots[entry->slot]); 1658 } 1659 1660 /* 1661 * Release a jumbo buffer. 1662 */ 1663 static void 1664 bge_jfree(struct mbuf *m, void *buf, size_t size, void *arg) 1665 { 1666 struct bge_jpool_entry *entry; 1667 struct bge_softc *sc; 1668 int i, s; 1669 1670 /* Extract the softc struct pointer. */ 1671 sc = (struct bge_softc *)arg; 1672 1673 if (sc == NULL) 1674 panic("bge_jfree: can't find softc pointer!"); 1675 1676 /* calculate the slot this buffer belongs to */ 1677 1678 i = ((char *)buf 1679 - (char *)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 1680 1681 if ((i < 0) || (i >= BGE_JSLOTS)) 1682 panic("bge_jfree: asked to free buffer that we don't manage!"); 1683 1684 s = splvm(); 1685 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 1686 if (entry == NULL) 1687 panic("bge_jfree: buffer not in use!"); 1688 entry->slot = i; 1689 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 1690 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 1691 1692 if (__predict_true(m != NULL)) 1693 pool_cache_put(mb_cache, m); 1694 splx(s); 1695 } 1696 1697 1698 /* 1699 * Initialize a standard receive ring descriptor. 1700 */ 1701 static int 1702 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m, 1703 bus_dmamap_t dmamap) 1704 { 1705 struct mbuf *m_new = NULL; 1706 struct bge_rx_bd *r; 1707 int error; 1708 1709 if (dmamap == NULL) { 1710 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, 1711 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap); 1712 if (error != 0) 1713 return error; 1714 } 1715 1716 sc->bge_cdata.bge_rx_std_map[i] = dmamap; 1717 1718 if (m == NULL) { 1719 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1720 if (m_new == NULL) 1721 return ENOBUFS; 1722 1723 MCLGET(m_new, M_DONTWAIT); 1724 if (!(m_new->m_flags & M_EXT)) { 1725 m_freem(m_new); 1726 return ENOBUFS; 1727 } 1728 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1729 1730 } else { 1731 m_new = m; 1732 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1733 m_new->m_data = m_new->m_ext.ext_buf; 1734 } 1735 if (!(sc->bge_flags & BGE_RX_ALIGNBUG)) 1736 m_adj(m_new, ETHER_ALIGN); 1737 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new, 1738 BUS_DMA_READ|BUS_DMA_NOWAIT)) 1739 return ENOBUFS; 1740 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 1741 BUS_DMASYNC_PREREAD); 1742 1743 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 1744 r = &sc->bge_rdata->bge_rx_std_ring[i]; 1745 BGE_HOSTADDR(r->bge_addr, dmamap->dm_segs[0].ds_addr); 1746 r->bge_flags = BGE_RXBDFLAG_END; 1747 r->bge_len = m_new->m_len; 1748 r->bge_idx = i; 1749 1750 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1751 offsetof(struct bge_ring_data, bge_rx_std_ring) + 1752 i * sizeof (struct bge_rx_bd), 1753 sizeof (struct bge_rx_bd), 1754 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1755 1756 return 0; 1757 } 1758 1759 /* 1760 * Initialize a jumbo receive ring descriptor. This allocates 1761 * a jumbo buffer from the pool managed internally by the driver. 1762 */ 1763 static int 1764 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m) 1765 { 1766 struct mbuf *m_new = NULL; 1767 struct bge_rx_bd *r; 1768 void *buf = NULL; 1769 1770 if (m == NULL) { 1771 1772 /* Allocate the mbuf. */ 1773 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1774 if (m_new == NULL) 1775 return ENOBUFS; 1776 1777 /* Allocate the jumbo buffer */ 1778 buf = bge_jalloc(sc); 1779 if (buf == NULL) { 1780 m_freem(m_new); 1781 aprint_error_dev(sc->bge_dev, 1782 "jumbo allocation failed -- packet dropped!\n"); 1783 return ENOBUFS; 1784 } 1785 1786 /* Attach the buffer to the mbuf. */ 1787 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 1788 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF, 1789 bge_jfree, sc); 1790 m_new->m_flags |= M_EXT_RW; 1791 } else { 1792 m_new = m; 1793 buf = m_new->m_data = m_new->m_ext.ext_buf; 1794 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 1795 } 1796 if (!(sc->bge_flags & BGE_RX_ALIGNBUG)) 1797 m_adj(m_new, ETHER_ALIGN); 1798 bus_dmamap_sync(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 1799 mtod(m_new, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, BGE_JLEN, 1800 BUS_DMASYNC_PREREAD); 1801 /* Set up the descriptor. */ 1802 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 1803 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 1804 BGE_HOSTADDR(r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new)); 1805 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 1806 r->bge_len = m_new->m_len; 1807 r->bge_idx = i; 1808 1809 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1810 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 1811 i * sizeof (struct bge_rx_bd), 1812 sizeof (struct bge_rx_bd), 1813 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1814 1815 return 0; 1816 } 1817 1818 /* 1819 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 1820 * that's 1MB or memory, which is a lot. For now, we fill only the first 1821 * 256 ring entries and hope that our CPU is fast enough to keep up with 1822 * the NIC. 1823 */ 1824 static int 1825 bge_init_rx_ring_std(struct bge_softc *sc) 1826 { 1827 int i; 1828 1829 if (sc->bge_flags & BGE_RXRING_VALID) 1830 return 0; 1831 1832 for (i = 0; i < BGE_SSLOTS; i++) { 1833 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 1834 return ENOBUFS; 1835 } 1836 1837 sc->bge_std = i - 1; 1838 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 1839 1840 sc->bge_flags |= BGE_RXRING_VALID; 1841 1842 return 0; 1843 } 1844 1845 static void 1846 bge_free_rx_ring_std(struct bge_softc *sc) 1847 { 1848 int i; 1849 1850 if (!(sc->bge_flags & BGE_RXRING_VALID)) 1851 return; 1852 1853 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1854 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 1855 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 1856 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 1857 bus_dmamap_destroy(sc->bge_dmatag, 1858 sc->bge_cdata.bge_rx_std_map[i]); 1859 } 1860 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0, 1861 sizeof(struct bge_rx_bd)); 1862 } 1863 1864 sc->bge_flags &= ~BGE_RXRING_VALID; 1865 } 1866 1867 static int 1868 bge_init_rx_ring_jumbo(struct bge_softc *sc) 1869 { 1870 int i; 1871 volatile struct bge_rcb *rcb; 1872 1873 if (sc->bge_flags & BGE_JUMBO_RXRING_VALID) 1874 return 0; 1875 1876 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1877 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 1878 return ENOBUFS; 1879 } 1880 1881 sc->bge_jumbo = i - 1; 1882 sc->bge_flags |= BGE_JUMBO_RXRING_VALID; 1883 1884 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1885 rcb->bge_maxlen_flags = 0; 1886 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1887 1888 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1889 1890 return 0; 1891 } 1892 1893 static void 1894 bge_free_rx_ring_jumbo(struct bge_softc *sc) 1895 { 1896 int i; 1897 1898 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID)) 1899 return; 1900 1901 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1902 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 1903 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 1904 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 1905 } 1906 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0, 1907 sizeof(struct bge_rx_bd)); 1908 } 1909 1910 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID; 1911 } 1912 1913 static void 1914 bge_free_tx_ring(struct bge_softc *sc) 1915 { 1916 int i; 1917 struct txdmamap_pool_entry *dma; 1918 1919 if (!(sc->bge_flags & BGE_TXRING_VALID)) 1920 return; 1921 1922 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1923 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 1924 m_freem(sc->bge_cdata.bge_tx_chain[i]); 1925 sc->bge_cdata.bge_tx_chain[i] = NULL; 1926 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i], 1927 link); 1928 sc->txdma[i] = 0; 1929 } 1930 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0, 1931 sizeof(struct bge_tx_bd)); 1932 } 1933 1934 while ((dma = SLIST_FIRST(&sc->txdma_list))) { 1935 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 1936 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap); 1937 free(dma, M_DEVBUF); 1938 } 1939 1940 sc->bge_flags &= ~BGE_TXRING_VALID; 1941 } 1942 1943 static int 1944 bge_init_tx_ring(struct bge_softc *sc) 1945 { 1946 int i; 1947 bus_dmamap_t dmamap; 1948 struct txdmamap_pool_entry *dma; 1949 1950 if (sc->bge_flags & BGE_TXRING_VALID) 1951 return 0; 1952 1953 sc->bge_txcnt = 0; 1954 sc->bge_tx_saved_considx = 0; 1955 1956 /* Initialize transmit producer index for host-memory send ring. */ 1957 sc->bge_tx_prodidx = 0; 1958 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1959 /* 5700 b2 errata */ 1960 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 1961 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1962 1963 /* NIC-memory send ring not used; initialize to zero. */ 1964 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1965 /* 5700 b2 errata */ 1966 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 1967 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1968 1969 SLIST_INIT(&sc->txdma_list); 1970 for (i = 0; i < BGE_RSLOTS; i++) { 1971 if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX, 1972 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT, 1973 &dmamap)) 1974 return ENOBUFS; 1975 if (dmamap == NULL) 1976 panic("dmamap NULL in bge_init_tx_ring"); 1977 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT); 1978 if (dma == NULL) { 1979 aprint_error_dev(sc->bge_dev, 1980 "can't alloc txdmamap_pool_entry\n"); 1981 bus_dmamap_destroy(sc->bge_dmatag, dmamap); 1982 return ENOMEM; 1983 } 1984 dma->dmamap = dmamap; 1985 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 1986 } 1987 1988 sc->bge_flags |= BGE_TXRING_VALID; 1989 1990 return 0; 1991 } 1992 1993 static void 1994 bge_setmulti(struct bge_softc *sc) 1995 { 1996 struct ethercom *ac = &sc->ethercom; 1997 struct ifnet *ifp = &ac->ec_if; 1998 struct ether_multi *enm; 1999 struct ether_multistep step; 2000 uint32_t hashes[4] = { 0, 0, 0, 0 }; 2001 uint32_t h; 2002 int i; 2003 2004 if (ifp->if_flags & IFF_PROMISC) 2005 goto allmulti; 2006 2007 /* Now program new ones. */ 2008 ETHER_FIRST_MULTI(step, ac, enm); 2009 while (enm != NULL) { 2010 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 2011 /* 2012 * We must listen to a range of multicast addresses. 2013 * For now, just accept all multicasts, rather than 2014 * trying to set only those filter bits needed to match 2015 * the range. (At this time, the only use of address 2016 * ranges is for IP multicast routing, for which the 2017 * range is big enough to require all bits set.) 2018 */ 2019 goto allmulti; 2020 } 2021 2022 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 2023 2024 /* Just want the 7 least-significant bits. */ 2025 h &= 0x7f; 2026 2027 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 2028 ETHER_NEXT_MULTI(step, enm); 2029 } 2030 2031 ifp->if_flags &= ~IFF_ALLMULTI; 2032 goto setit; 2033 2034 allmulti: 2035 ifp->if_flags |= IFF_ALLMULTI; 2036 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff; 2037 2038 setit: 2039 for (i = 0; i < 4; i++) 2040 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 2041 } 2042 2043 static void 2044 bge_sig_pre_reset(struct bge_softc *sc, int type) 2045 { 2046 2047 /* 2048 * Some chips don't like this so only do this if ASF is enabled 2049 */ 2050 if (sc->bge_asf_mode) 2051 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); 2052 2053 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 2054 switch (type) { 2055 case BGE_RESET_START: 2056 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 2057 BGE_FW_DRV_STATE_START); 2058 break; 2059 case BGE_RESET_SHUTDOWN: 2060 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 2061 BGE_FW_DRV_STATE_UNLOAD); 2062 break; 2063 case BGE_RESET_SUSPEND: 2064 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 2065 BGE_FW_DRV_STATE_SUSPEND); 2066 break; 2067 } 2068 } 2069 2070 if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND) 2071 bge_ape_driver_state_change(sc, type); 2072 } 2073 2074 static void 2075 bge_sig_post_reset(struct bge_softc *sc, int type) 2076 { 2077 2078 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 2079 switch (type) { 2080 case BGE_RESET_START: 2081 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 2082 BGE_FW_DRV_STATE_START_DONE); 2083 /* START DONE */ 2084 break; 2085 case BGE_RESET_SHUTDOWN: 2086 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 2087 BGE_FW_DRV_STATE_UNLOAD_DONE); 2088 break; 2089 } 2090 } 2091 2092 if (type == BGE_RESET_SHUTDOWN) 2093 bge_ape_driver_state_change(sc, type); 2094 } 2095 2096 static void 2097 bge_sig_legacy(struct bge_softc *sc, int type) 2098 { 2099 2100 if (sc->bge_asf_mode) { 2101 switch (type) { 2102 case BGE_RESET_START: 2103 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 2104 BGE_FW_DRV_STATE_START); 2105 break; 2106 case BGE_RESET_SHUTDOWN: 2107 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, 2108 BGE_FW_DRV_STATE_UNLOAD); 2109 break; 2110 } 2111 } 2112 } 2113 2114 static void 2115 bge_wait_for_event_ack(struct bge_softc *sc) 2116 { 2117 int i; 2118 2119 /* wait up to 2500usec */ 2120 for (i = 0; i < 250; i++) { 2121 if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) & 2122 BGE_RX_CPU_DRV_EVENT)) 2123 break; 2124 DELAY(10); 2125 } 2126 } 2127 2128 static void 2129 bge_stop_fw(struct bge_softc *sc) 2130 { 2131 2132 if (sc->bge_asf_mode) { 2133 bge_wait_for_event_ack(sc); 2134 2135 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE); 2136 CSR_WRITE_4_FLUSH(sc, BGE_RX_CPU_EVENT, 2137 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT); 2138 2139 bge_wait_for_event_ack(sc); 2140 } 2141 } 2142 2143 static int 2144 bge_poll_fw(struct bge_softc *sc) 2145 { 2146 uint32_t val; 2147 int i; 2148 2149 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 2150 for (i = 0; i < BGE_TIMEOUT; i++) { 2151 val = CSR_READ_4(sc, BGE_VCPU_STATUS); 2152 if (val & BGE_VCPU_STATUS_INIT_DONE) 2153 break; 2154 DELAY(100); 2155 } 2156 if (i >= BGE_TIMEOUT) { 2157 aprint_error_dev(sc->bge_dev, "reset timed out\n"); 2158 return -1; 2159 } 2160 } else if ((sc->bge_flags & BGE_NO_EEPROM) == 0) { 2161 /* 2162 * Poll the value location we just wrote until 2163 * we see the 1's complement of the magic number. 2164 * This indicates that the firmware initialization 2165 * is complete. 2166 * XXX 1000ms for Flash and 10000ms for SEEPROM. 2167 */ 2168 for (i = 0; i < BGE_TIMEOUT; i++) { 2169 val = bge_readmem_ind(sc, BGE_SRAM_FW_MB); 2170 if (val == ~BGE_SRAM_FW_MB_MAGIC) 2171 break; 2172 DELAY(10); 2173 } 2174 2175 if (i >= BGE_TIMEOUT) { 2176 aprint_error_dev(sc->bge_dev, 2177 "firmware handshake timed out, val = %x\n", val); 2178 return -1; 2179 } 2180 } 2181 2182 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) { 2183 /* tg3 says we have to wait extra time */ 2184 delay(10 * 1000); 2185 } 2186 2187 return 0; 2188 } 2189 2190 int 2191 bge_phy_addr(struct bge_softc *sc) 2192 { 2193 struct pci_attach_args *pa = &(sc->bge_pa); 2194 int phy_addr = 1; 2195 2196 /* 2197 * PHY address mapping for various devices. 2198 * 2199 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr | 2200 * ---------+-------+-------+-------+-------+ 2201 * BCM57XX | 1 | X | X | X | 2202 * BCM5704 | 1 | X | 1 | X | 2203 * BCM5717 | 1 | 8 | 2 | 9 | 2204 * BCM5719 | 1 | 8 | 2 | 9 | 2205 * BCM5720 | 1 | 8 | 2 | 9 | 2206 * 2207 * | F2 Cu | F2 Sr | F3 Cu | F3 Sr | 2208 * ---------+-------+-------+-------+-------+ 2209 * BCM57XX | X | X | X | X | 2210 * BCM5704 | X | X | X | X | 2211 * BCM5717 | X | X | X | X | 2212 * BCM5719 | 3 | 10 | 4 | 11 | 2213 * BCM5720 | X | X | X | X | 2214 * 2215 * Other addresses may respond but they are not 2216 * IEEE compliant PHYs and should be ignored. 2217 */ 2218 switch (BGE_ASICREV(sc->bge_chipid)) { 2219 case BGE_ASICREV_BCM5717: 2220 case BGE_ASICREV_BCM5719: 2221 case BGE_ASICREV_BCM5720: 2222 phy_addr = pa->pa_function; 2223 if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) { 2224 phy_addr += (CSR_READ_4(sc, BGE_SGDIG_STS) & 2225 BGE_SGDIGSTS_IS_SERDES) ? 8 : 1; 2226 } else { 2227 phy_addr += (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) & 2228 BGE_CPMU_PHY_STRAP_IS_SERDES) ? 8 : 1; 2229 } 2230 } 2231 2232 return phy_addr; 2233 } 2234 2235 /* 2236 * Do endian, PCI and DMA initialization. Also check the on-board ROM 2237 * self-test results. 2238 */ 2239 static int 2240 bge_chipinit(struct bge_softc *sc) 2241 { 2242 uint32_t dma_rw_ctl, mode_ctl, reg; 2243 int i; 2244 2245 /* Set endianness before we access any non-PCI registers. */ 2246 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 2247 BGE_INIT); 2248 2249 /* 2250 * Clear the MAC statistics block in the NIC's 2251 * internal memory. 2252 */ 2253 for (i = BGE_STATS_BLOCK; 2254 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 2255 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0); 2256 2257 for (i = BGE_STATUS_BLOCK; 2258 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 2259 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0); 2260 2261 /* 5717 workaround from tg3 */ 2262 if (sc->bge_chipid == BGE_CHIPID_BCM5717_A0) { 2263 /* Save */ 2264 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 2265 2266 /* Temporary modify MODE_CTL to control TLP */ 2267 reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK; 2268 CSR_WRITE_4(sc, BGE_MODE_CTL, reg | BGE_MODECTL_PCIE_TLPADDR1); 2269 2270 /* Control TLP */ 2271 reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG + 2272 BGE_TLP_PHYCTL1); 2273 CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_PHYCTL1, 2274 reg | BGE_TLP_PHYCTL1_EN_L1PLLPD); 2275 2276 /* Restore */ 2277 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 2278 } 2279 2280 /* XXX Should we use 57765_FAMILY? */ 2281 if (BGE_IS_57765_PLUS(sc)) { 2282 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) { 2283 /* Save */ 2284 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 2285 2286 /* Temporary modify MODE_CTL to control TLP */ 2287 reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK; 2288 CSR_WRITE_4(sc, BGE_MODE_CTL, 2289 reg | BGE_MODECTL_PCIE_TLPADDR1); 2290 2291 /* Control TLP */ 2292 reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG + 2293 BGE_TLP_PHYCTL5); 2294 CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_PHYCTL5, 2295 reg | BGE_TLP_PHYCTL5_DIS_L2CLKREQ); 2296 2297 /* Restore */ 2298 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 2299 } 2300 if (BGE_CHIPREV(sc->bge_chipid) != BGE_CHIPREV_57765_AX) { 2301 reg = CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL); 2302 CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, 2303 reg | BGE_CPMU_PADRNG_CTL_RDIV2); 2304 2305 /* Save */ 2306 mode_ctl = CSR_READ_4(sc, BGE_MODE_CTL); 2307 2308 /* Temporary modify MODE_CTL to control TLP */ 2309 reg = mode_ctl & ~BGE_MODECTL_PCIE_TLPADDRMASK; 2310 CSR_WRITE_4(sc, BGE_MODE_CTL, 2311 reg | BGE_MODECTL_PCIE_TLPADDR0); 2312 2313 /* Control TLP */ 2314 reg = CSR_READ_4(sc, BGE_TLP_CONTROL_REG + 2315 BGE_TLP_FTSMAX); 2316 reg &= ~BGE_TLP_FTSMAX_MSK; 2317 CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG + BGE_TLP_FTSMAX, 2318 reg | BGE_TLP_FTSMAX_VAL); 2319 2320 /* Restore */ 2321 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 2322 } 2323 2324 reg = CSR_READ_4(sc, BGE_CPMU_LSPD_10MB_CLK); 2325 reg &= ~BGE_CPMU_LSPD_10MB_MACCLK_MASK; 2326 reg |= BGE_CPMU_LSPD_10MB_MACCLK_6_25; 2327 CSR_WRITE_4(sc, BGE_CPMU_LSPD_10MB_CLK, reg); 2328 } 2329 2330 /* Set up the PCI DMA control register. */ 2331 dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD; 2332 if (sc->bge_flags & BGE_PCIE) { 2333 /* Read watermark not used, 128 bytes for write. */ 2334 DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n", 2335 device_xname(sc->bge_dev))); 2336 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 2337 } else if (sc->bge_flags & BGE_PCIX) { 2338 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", 2339 device_xname(sc->bge_dev))); 2340 /* PCI-X bus */ 2341 if (BGE_IS_5714_FAMILY(sc)) { 2342 /* 256 bytes for read and write. */ 2343 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) | 2344 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2); 2345 2346 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780) 2347 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 2348 else 2349 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL; 2350 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 2351 /* 1536 bytes for read, 384 bytes for write. */ 2352 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 2353 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 2354 } else { 2355 /* 384 bytes for read and write. */ 2356 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) | 2357 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) | 2358 (0x0F); 2359 } 2360 2361 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 2362 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 2363 uint32_t tmp; 2364 2365 /* Set ONEDMA_ATONCE for hardware workaround. */ 2366 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; 2367 if (tmp == 6 || tmp == 7) 2368 dma_rw_ctl |= 2369 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 2370 2371 /* Set PCI-X DMA write workaround. */ 2372 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE; 2373 } 2374 } else { 2375 /* Conventional PCI bus: 256 bytes for read and write. */ 2376 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", 2377 device_xname(sc->bge_dev))); 2378 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 2379 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); 2380 2381 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 && 2382 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750) 2383 dma_rw_ctl |= 0x0F; 2384 } 2385 2386 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 2387 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) 2388 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM | 2389 BGE_PCIDMARWCTL_ASRT_ALL_BE; 2390 2391 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 2392 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 2393 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 2394 2395 if (BGE_IS_5717_PLUS(sc)) { 2396 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT; 2397 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) 2398 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK; 2399 2400 /* 2401 * Enable HW workaround for controllers that misinterpret 2402 * a status tag update and leave interrupts permanently 2403 * disabled. 2404 */ 2405 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 && 2406 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57765) 2407 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA; 2408 } 2409 2410 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, 2411 dma_rw_ctl); 2412 2413 /* 2414 * Set up general mode register. 2415 */ 2416 mode_ctl = BGE_DMA_SWAP_OPTIONS; 2417 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { 2418 /* Retain Host-2-BMC settings written by APE firmware. */ 2419 mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) & 2420 (BGE_MODECTL_BYTESWAP_B2HRX_DATA | 2421 BGE_MODECTL_WORDSWAP_B2HRX_DATA | 2422 BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE); 2423 } 2424 mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS | 2425 BGE_MODECTL_TX_NO_PHDR_CSUM; 2426 2427 /* 2428 * BCM5701 B5 have a bug causing data corruption when using 2429 * 64-bit DMA reads, which can be terminated early and then 2430 * completed later as 32-bit accesses, in combination with 2431 * certain bridges. 2432 */ 2433 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 && 2434 sc->bge_chipid == BGE_CHIPID_BCM5701_B5) 2435 mode_ctl |= BGE_MODECTL_FORCE_PCI32; 2436 2437 /* 2438 * Tell the firmware the driver is running 2439 */ 2440 if (sc->bge_asf_mode & ASF_STACKUP) 2441 mode_ctl |= BGE_MODECTL_STACKUP; 2442 2443 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); 2444 2445 /* 2446 * Disable memory write invalidate. Apparently it is not supported 2447 * properly by these devices. 2448 */ 2449 PCI_CLRBIT(sc->sc_pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, 2450 PCI_COMMAND_INVALIDATE_ENABLE); 2451 2452 #ifdef __brokenalpha__ 2453 /* 2454 * Must insure that we do not cross an 8K (bytes) boundary 2455 * for DMA reads. Our highest limit is 1K bytes. This is a 2456 * restriction on some ALPHA platforms with early revision 2457 * 21174 PCI chipsets, such as the AlphaPC 164lx 2458 */ 2459 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 2460 #endif 2461 2462 /* Set the timer prescaler (always 66MHz) */ 2463 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); 2464 2465 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 2466 DELAY(40); /* XXX */ 2467 2468 /* Put PHY into ready state */ 2469 BGE_CLRBIT_FLUSH(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); 2470 DELAY(40); 2471 } 2472 2473 return 0; 2474 } 2475 2476 static int 2477 bge_blockinit(struct bge_softc *sc) 2478 { 2479 volatile struct bge_rcb *rcb; 2480 bus_size_t rcb_addr; 2481 struct ifnet *ifp = &sc->ethercom.ec_if; 2482 bge_hostaddr taddr; 2483 uint32_t dmactl, val; 2484 int i, limit; 2485 2486 /* 2487 * Initialize the memory window pointer register so that 2488 * we can access the first 32K of internal NIC RAM. This will 2489 * allow us to set up the TX send ring RCBs and the RX return 2490 * ring RCBs, plus other things which live in NIC memory. 2491 */ 2492 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0); 2493 2494 if (!BGE_IS_5705_PLUS(sc)) { 2495 /* 57XX step 33 */ 2496 /* Configure mbuf memory pool */ 2497 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 2498 BGE_BUFFPOOL_1); 2499 2500 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 2501 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 2502 else 2503 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 2504 2505 /* 57XX step 34 */ 2506 /* Configure DMA resource pool */ 2507 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 2508 BGE_DMA_DESCRIPTORS); 2509 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 2510 } 2511 2512 /* 5718 step 11, 57XX step 35 */ 2513 /* 2514 * Configure mbuf pool watermarks. New broadcom docs strongly 2515 * recommend these. 2516 */ 2517 if (BGE_IS_5717_PLUS(sc)) { 2518 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 2519 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a); 2520 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0); 2521 } else if (BGE_IS_5705_PLUS(sc)) { 2522 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 2523 2524 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 2525 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); 2526 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); 2527 } else { 2528 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 2529 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 2530 } 2531 } else { 2532 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 2533 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 2534 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 2535 } 2536 2537 /* 57XX step 36 */ 2538 /* Configure DMA resource watermarks */ 2539 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 2540 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 2541 2542 /* 5718 step 13, 57XX step 38 */ 2543 /* Enable buffer manager */ 2544 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_ATTN; 2545 /* 2546 * Change the arbitration algorithm of TXMBUF read request to 2547 * round-robin instead of priority based for BCM5719. When 2548 * TXFIFO is almost empty, RDMA will hold its request until 2549 * TXFIFO is not almost empty. 2550 */ 2551 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) 2552 val |= BGE_BMANMODE_NO_TX_UNDERRUN; 2553 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2554 sc->bge_chipid == BGE_CHIPID_BCM5719_A0 || 2555 sc->bge_chipid == BGE_CHIPID_BCM5720_A0) 2556 val |= BGE_BMANMODE_LOMBUF_ATTN; 2557 CSR_WRITE_4(sc, BGE_BMAN_MODE, val); 2558 2559 /* 57XX step 39 */ 2560 /* Poll for buffer manager start indication */ 2561 for (i = 0; i < BGE_TIMEOUT * 2; i++) { 2562 DELAY(10); 2563 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 2564 break; 2565 } 2566 2567 if (i == BGE_TIMEOUT * 2) { 2568 aprint_error_dev(sc->bge_dev, 2569 "buffer manager failed to start\n"); 2570 return ENXIO; 2571 } 2572 2573 /* 57XX step 40 */ 2574 /* Enable flow-through queues */ 2575 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 2576 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 2577 2578 /* Wait until queue initialization is complete */ 2579 for (i = 0; i < BGE_TIMEOUT * 2; i++) { 2580 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 2581 break; 2582 DELAY(10); 2583 } 2584 2585 if (i == BGE_TIMEOUT * 2) { 2586 aprint_error_dev(sc->bge_dev, 2587 "flow-through queue init failed\n"); 2588 return ENXIO; 2589 } 2590 2591 /* 2592 * Summary of rings supported by the controller: 2593 * 2594 * Standard Receive Producer Ring 2595 * - This ring is used to feed receive buffers for "standard" 2596 * sized frames (typically 1536 bytes) to the controller. 2597 * 2598 * Jumbo Receive Producer Ring 2599 * - This ring is used to feed receive buffers for jumbo sized 2600 * frames (i.e. anything bigger than the "standard" frames) 2601 * to the controller. 2602 * 2603 * Mini Receive Producer Ring 2604 * - This ring is used to feed receive buffers for "mini" 2605 * sized frames to the controller. 2606 * - This feature required external memory for the controller 2607 * but was never used in a production system. Should always 2608 * be disabled. 2609 * 2610 * Receive Return Ring 2611 * - After the controller has placed an incoming frame into a 2612 * receive buffer that buffer is moved into a receive return 2613 * ring. The driver is then responsible to passing the 2614 * buffer up to the stack. Many versions of the controller 2615 * support multiple RR rings. 2616 * 2617 * Send Ring 2618 * - This ring is used for outgoing frames. Many versions of 2619 * the controller support multiple send rings. 2620 */ 2621 2622 /* 5718 step 15, 57XX step 41 */ 2623 /* Initialize the standard RX ring control block */ 2624 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 2625 BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); 2626 /* 5718 step 16 */ 2627 if (BGE_IS_5717_PLUS(sc)) { 2628 /* 2629 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32) 2630 * Bits 15-2 : Maximum RX frame size 2631 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled 2632 * Bit 0 : Reserved 2633 */ 2634 rcb->bge_maxlen_flags = 2635 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2); 2636 } else if (BGE_IS_5705_PLUS(sc)) { 2637 /* 2638 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32) 2639 * Bits 15-2 : Reserved (should be 0) 2640 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 2641 * Bit 0 : Reserved 2642 */ 2643 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 2644 } else { 2645 /* 2646 * Ring size is always XXX entries 2647 * Bits 31-16: Maximum RX frame size 2648 * Bits 15-2 : Reserved (should be 0) 2649 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled 2650 * Bit 0 : Reserved 2651 */ 2652 rcb->bge_maxlen_flags = 2653 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 2654 } 2655 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2656 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2657 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 2658 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717; 2659 else 2660 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 2661 /* Write the standard receive producer ring control block. */ 2662 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 2663 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 2664 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 2665 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 2666 2667 /* Reset the standard receive producer ring producer index. */ 2668 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); 2669 2670 /* 57XX step 42 */ 2671 /* 2672 * Initialize the jumbo RX ring control block 2673 * We set the 'ring disabled' bit in the flags 2674 * field until we're actually ready to start 2675 * using this ring (i.e. once we set the MTU 2676 * high enough to require it). 2677 */ 2678 if (BGE_IS_JUMBO_CAPABLE(sc)) { 2679 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 2680 BGE_HOSTADDR(rcb->bge_hostaddr, 2681 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); 2682 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 2683 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED); 2684 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2685 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2686 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 2687 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717; 2688 else 2689 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 2690 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 2691 rcb->bge_hostaddr.bge_addr_hi); 2692 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 2693 rcb->bge_hostaddr.bge_addr_lo); 2694 /* Program the jumbo receive producer ring RCB parameters. */ 2695 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 2696 rcb->bge_maxlen_flags); 2697 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 2698 /* Reset the jumbo receive producer ring producer index. */ 2699 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 2700 } 2701 2702 /* 57XX step 43 */ 2703 /* Disable the mini receive producer ring RCB. */ 2704 if (BGE_IS_5700_FAMILY(sc)) { 2705 /* Set up dummy disabled mini ring RCB */ 2706 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 2707 rcb->bge_maxlen_flags = 2708 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 2709 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 2710 rcb->bge_maxlen_flags); 2711 /* Reset the mini receive producer ring producer index. */ 2712 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 2713 2714 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2715 offsetof(struct bge_ring_data, bge_info), 2716 sizeof (struct bge_gib), 2717 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2718 } 2719 2720 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */ 2721 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 2722 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 || 2723 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 || 2724 sc->bge_chipid == BGE_CHIPID_BCM5906_A2) 2725 CSR_WRITE_4(sc, BGE_ISO_PKT_TX, 2726 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2); 2727 } 2728 /* 5718 step 14, 57XX step 44 */ 2729 /* 2730 * The BD ring replenish thresholds control how often the 2731 * hardware fetches new BD's from the producer rings in host 2732 * memory. Setting the value too low on a busy system can 2733 * starve the hardware and recue the throughpout. 2734 * 2735 * Set the BD ring replenish thresholds. The recommended 2736 * values are 1/8th the number of descriptors allocated to 2737 * each ring, but since we try to avoid filling the entire 2738 * ring we set these to the minimal value of 8. This needs to 2739 * be done on several of the supported chip revisions anyway, 2740 * to work around HW bugs. 2741 */ 2742 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, 8); 2743 if (BGE_IS_JUMBO_CAPABLE(sc)) 2744 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 8); 2745 2746 /* 5718 step 18 */ 2747 if (BGE_IS_5717_PLUS(sc)) { 2748 CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4); 2749 CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4); 2750 } 2751 2752 /* 57XX step 45 */ 2753 /* 2754 * Disable all send rings by setting the 'ring disabled' bit 2755 * in the flags field of all the TX send ring control blocks, 2756 * located in NIC memory. 2757 */ 2758 if (BGE_IS_5700_FAMILY(sc)) { 2759 /* 5700 to 5704 had 16 send rings. */ 2760 limit = BGE_TX_RINGS_EXTSSRAM_MAX; 2761 } else 2762 limit = 1; 2763 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 2764 for (i = 0; i < limit; i++) { 2765 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2766 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 2767 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 2768 rcb_addr += sizeof(struct bge_rcb); 2769 } 2770 2771 /* 57XX step 46 and 47 */ 2772 /* Configure send ring RCB 0 (we use only the first ring) */ 2773 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 2774 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 2775 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 2776 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 2777 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2778 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2779 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 2780 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, BGE_SEND_RING_5717); 2781 else 2782 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 2783 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 2784 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2785 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 2786 2787 /* 57XX step 48 */ 2788 /* 2789 * Disable all receive return rings by setting the 2790 * 'ring diabled' bit in the flags field of all the receive 2791 * return ring control blocks, located in NIC memory. 2792 */ 2793 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2794 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 2795 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { 2796 /* Should be 17, use 16 until we get an SRAM map. */ 2797 limit = 16; 2798 } else if (BGE_IS_5700_FAMILY(sc)) 2799 limit = BGE_RX_RINGS_MAX; 2800 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 2801 BGE_IS_57765_PLUS(sc)) 2802 limit = 4; 2803 else 2804 limit = 1; 2805 /* Disable all receive return rings */ 2806 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 2807 for (i = 0; i < limit; i++) { 2808 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 2809 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 2810 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2811 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 2812 BGE_RCB_FLAG_RING_DISABLED)); 2813 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 2814 bge_writembx(sc, BGE_MBX_RX_CONS0_LO + 2815 (i * (sizeof(uint64_t))), 0); 2816 rcb_addr += sizeof(struct bge_rcb); 2817 } 2818 2819 /* 57XX step 49 */ 2820 /* 2821 * Set up receive return ring 0. Note that the NIC address 2822 * for RX return rings is 0x0. The return rings live entirely 2823 * within the host, so the nicaddr field in the RCB isn't used. 2824 */ 2825 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 2826 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 2827 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 2828 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 2829 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 2830 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2831 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 2832 2833 /* 5718 step 24, 57XX step 53 */ 2834 /* Set random backoff seed for TX */ 2835 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 2836 (CLLADDR(ifp->if_sadl)[0] + CLLADDR(ifp->if_sadl)[1] + 2837 CLLADDR(ifp->if_sadl)[2] + CLLADDR(ifp->if_sadl)[3] + 2838 CLLADDR(ifp->if_sadl)[4] + CLLADDR(ifp->if_sadl)[5]) & 2839 BGE_TX_BACKOFF_SEED_MASK); 2840 2841 /* 5718 step 26, 57XX step 55 */ 2842 /* Set inter-packet gap */ 2843 val = 0x2620; 2844 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 2845 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) & 2846 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK); 2847 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val); 2848 2849 /* 5718 step 27, 57XX step 56 */ 2850 /* 2851 * Specify which ring to use for packets that don't match 2852 * any RX rules. 2853 */ 2854 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 2855 2856 /* 5718 step 28, 57XX step 57 */ 2857 /* 2858 * Configure number of RX lists. One interrupt distribution 2859 * list, sixteen active lists, one bad frames class. 2860 */ 2861 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 2862 2863 /* 5718 step 29, 57XX step 58 */ 2864 /* Inialize RX list placement stats mask. */ 2865 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 2866 /* 5718 step 30, 57XX step 59 */ 2867 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 2868 2869 /* 5718 step 33, 57XX step 62 */ 2870 /* Disable host coalescing until we get it set up */ 2871 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 2872 2873 /* 5718 step 34, 57XX step 63 */ 2874 /* Poll to make sure it's shut down. */ 2875 for (i = 0; i < BGE_TIMEOUT * 2; i++) { 2876 DELAY(10); 2877 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 2878 break; 2879 } 2880 2881 if (i == BGE_TIMEOUT * 2) { 2882 aprint_error_dev(sc->bge_dev, 2883 "host coalescing engine failed to idle\n"); 2884 return ENXIO; 2885 } 2886 2887 /* 5718 step 35, 36, 37 */ 2888 /* Set up host coalescing defaults */ 2889 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 2890 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 2891 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 2892 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 2893 if (!(BGE_IS_5705_PLUS(sc))) { 2894 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 2895 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 2896 } 2897 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 2898 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 2899 2900 /* Set up address of statistics block */ 2901 if (BGE_IS_5700_FAMILY(sc)) { 2902 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 2903 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 2904 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 2905 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi); 2906 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo); 2907 } 2908 2909 /* 5718 step 38 */ 2910 /* Set up address of status block */ 2911 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block)); 2912 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 2913 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi); 2914 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo); 2915 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 2916 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 2917 2918 /* Set up status block size. */ 2919 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 && 2920 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) { 2921 val = BGE_STATBLKSZ_FULL; 2922 bzero(&sc->bge_rdata->bge_status_block, BGE_STATUS_BLK_SZ); 2923 } else { 2924 val = BGE_STATBLKSZ_32BYTE; 2925 bzero(&sc->bge_rdata->bge_status_block, 32); 2926 } 2927 2928 /* 5718 step 39, 57XX step 73 */ 2929 /* Turn on host coalescing state machine */ 2930 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE); 2931 2932 /* 5718 step 40, 57XX step 74 */ 2933 /* Turn on RX BD completion state machine and enable attentions */ 2934 CSR_WRITE_4(sc, BGE_RBDC_MODE, 2935 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN); 2936 2937 /* 5718 step 41, 57XX step 75 */ 2938 /* Turn on RX list placement state machine */ 2939 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 2940 2941 /* 57XX step 76 */ 2942 /* Turn on RX list selector state machine. */ 2943 if (!(BGE_IS_5705_PLUS(sc))) 2944 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 2945 2946 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | 2947 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | 2948 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | 2949 BGE_MACMODE_FRMHDR_DMA_ENB; 2950 2951 if (sc->bge_flags & BGE_PHY_FIBER_TBI) 2952 val |= BGE_PORTMODE_TBI; 2953 else if (sc->bge_flags & BGE_PHY_FIBER_MII) 2954 val |= BGE_PORTMODE_GMII; 2955 else 2956 val |= BGE_PORTMODE_MII; 2957 2958 /* 5718 step 42 and 43, 57XX step 77 and 78 */ 2959 /* Allow APE to send/receive frames. */ 2960 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) 2961 val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; 2962 2963 /* Turn on DMA, clear stats */ 2964 CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, val); 2965 /* 5718 step 44 */ 2966 DELAY(40); 2967 2968 /* 5718 step 45, 57XX step 79 */ 2969 /* Set misc. local control, enable interrupts on attentions */ 2970 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 2971 if (BGE_IS_5717_PLUS(sc)) { 2972 CSR_READ_4(sc, BGE_MISC_LOCAL_CTL); /* Flush */ 2973 /* 5718 step 46 */ 2974 DELAY(100); 2975 } 2976 2977 /* 57XX step 81 */ 2978 /* Turn on DMA completion state machine */ 2979 if (!(BGE_IS_5705_PLUS(sc))) 2980 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 2981 2982 /* 5718 step 47, 57XX step 82 */ 2983 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS; 2984 2985 /* 5718 step 48 */ 2986 /* Enable host coalescing bug fix. */ 2987 if (BGE_IS_5755_PLUS(sc)) 2988 val |= BGE_WDMAMODE_STATUS_TAG_FIX; 2989 2990 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785) 2991 val |= BGE_WDMAMODE_BURST_ALL_DATA; 2992 2993 /* Turn on write DMA state machine */ 2994 CSR_WRITE_4_FLUSH(sc, BGE_WDMA_MODE, val); 2995 /* 5718 step 49 */ 2996 DELAY(40); 2997 2998 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 2999 3000 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717) 3001 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS; 3002 3003 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 3004 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 3005 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) 3006 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | 3007 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | 3008 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; 3009 3010 if (sc->bge_flags & BGE_PCIE) 3011 val |= BGE_RDMAMODE_FIFO_LONG_BURST; 3012 if (sc->bge_flags & BGE_TSO) 3013 val |= BGE_RDMAMODE_TSO4_ENABLE; 3014 3015 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { 3016 val |= CSR_READ_4(sc, BGE_RDMA_MODE) & 3017 BGE_RDMAMODE_H2BNC_VLAN_DET; 3018 /* 3019 * Allow multiple outstanding read requests from 3020 * non-LSO read DMA engine. 3021 */ 3022 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS; 3023 } 3024 3025 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 3026 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 3027 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 3028 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780 || 3029 BGE_IS_5717_PLUS(sc)) { /* XXX 57765? */ 3030 dmactl = CSR_READ_4(sc, BGE_RDMA_RSRVCTRL); 3031 /* 3032 * Adjust tx margin to prevent TX data corruption and 3033 * fix internal FIFO overflow. 3034 */ 3035 if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0) { 3036 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK | 3037 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK | 3038 BGE_RDMA_RSRVCTRL_TXMRGN_MASK); 3039 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K | 3040 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K | 3041 BGE_RDMA_RSRVCTRL_TXMRGN_320B; 3042 } 3043 /* 3044 * Enable fix for read DMA FIFO overruns. 3045 * The fix is to limit the number of RX BDs 3046 * the hardware would fetch at a fime. 3047 */ 3048 CSR_WRITE_4(sc, BGE_RDMA_RSRVCTRL, dmactl | 3049 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 3050 } 3051 3052 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719) { 3053 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 3054 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | 3055 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | 3056 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 3057 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { 3058 /* 3059 * Allow 4KB burst length reads for non-LSO frames. 3060 * Enable 512B burst length reads for buffer descriptors. 3061 */ 3062 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, 3063 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | 3064 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 | 3065 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); 3066 } 3067 3068 /* Turn on read DMA state machine */ 3069 CSR_WRITE_4_FLUSH(sc, BGE_RDMA_MODE, val); 3070 /* 5718 step 52 */ 3071 delay(40); 3072 3073 /* 5718 step 56, 57XX step 84 */ 3074 /* Turn on RX data completion state machine */ 3075 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 3076 3077 /* Turn on RX data and RX BD initiator state machine */ 3078 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 3079 3080 /* 57XX step 85 */ 3081 /* Turn on Mbuf cluster free state machine */ 3082 if (!BGE_IS_5705_PLUS(sc)) 3083 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 3084 3085 /* 5718 step 57, 57XX step 86 */ 3086 /* Turn on send data completion state machine */ 3087 val = BGE_SDCMODE_ENABLE; 3088 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 3089 val |= BGE_SDCMODE_CDELAY; 3090 CSR_WRITE_4(sc, BGE_SDC_MODE, val); 3091 3092 /* 5718 step 58 */ 3093 /* Turn on send BD completion state machine */ 3094 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 3095 3096 /* 57XX step 88 */ 3097 /* Turn on RX BD initiator state machine */ 3098 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 3099 3100 /* 5718 step 60, 57XX step 90 */ 3101 /* Turn on send data initiator state machine */ 3102 if (sc->bge_flags & BGE_TSO) { 3103 /* XXX: magic value from Linux driver */ 3104 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 3105 BGE_SDIMODE_HW_LSO_PRE_DMA); 3106 } else 3107 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 3108 3109 /* 5718 step 61, 57XX step 91 */ 3110 /* Turn on send BD initiator state machine */ 3111 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 3112 3113 /* 5718 step 62, 57XX step 92 */ 3114 /* Turn on send BD selector state machine */ 3115 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 3116 3117 /* 5718 step 31, 57XX step 60 */ 3118 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 3119 /* 5718 step 32, 57XX step 61 */ 3120 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 3121 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER); 3122 3123 /* ack/clear link change events */ 3124 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 3125 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 3126 BGE_MACSTAT_LINK_CHANGED); 3127 CSR_WRITE_4(sc, BGE_MI_STS, 0); 3128 3129 /* 3130 * Enable attention when the link has changed state for 3131 * devices that use auto polling. 3132 */ 3133 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 3134 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 3135 } else { 3136 /* 5718 step 68 */ 3137 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 3138 /* 5718 step 69 (optionally) */ 3139 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL | (10 << 16)); 3140 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) 3141 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 3142 BGE_EVTENB_MI_INTERRUPT); 3143 } 3144 3145 /* 3146 * Clear any pending link state attention. 3147 * Otherwise some link state change events may be lost until attention 3148 * is cleared by bge_intr() -> bge_link_upd() sequence. 3149 * It's not necessary on newer BCM chips - perhaps enabling link 3150 * state change attentions implies clearing pending attention. 3151 */ 3152 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 3153 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 3154 BGE_MACSTAT_LINK_CHANGED); 3155 3156 /* Enable link state change attentions. */ 3157 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 3158 3159 return 0; 3160 } 3161 3162 static const struct bge_revision * 3163 bge_lookup_rev(uint32_t chipid) 3164 { 3165 const struct bge_revision *br; 3166 3167 for (br = bge_revisions; br->br_name != NULL; br++) { 3168 if (br->br_chipid == chipid) 3169 return br; 3170 } 3171 3172 for (br = bge_majorrevs; br->br_name != NULL; br++) { 3173 if (br->br_chipid == BGE_ASICREV(chipid)) 3174 return br; 3175 } 3176 3177 return NULL; 3178 } 3179 3180 static const struct bge_product * 3181 bge_lookup(const struct pci_attach_args *pa) 3182 { 3183 const struct bge_product *bp; 3184 3185 for (bp = bge_products; bp->bp_name != NULL; bp++) { 3186 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor && 3187 PCI_PRODUCT(pa->pa_id) == bp->bp_product) 3188 return bp; 3189 } 3190 3191 return NULL; 3192 } 3193 3194 static uint32_t 3195 bge_chipid(const struct pci_attach_args *pa) 3196 { 3197 uint32_t id; 3198 3199 id = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) 3200 >> BGE_PCIMISCCTL_ASICREV_SHIFT; 3201 3202 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) { 3203 switch (PCI_PRODUCT(pa->pa_id)) { 3204 case PCI_PRODUCT_BROADCOM_BCM5717: 3205 case PCI_PRODUCT_BROADCOM_BCM5718: 3206 case PCI_PRODUCT_BROADCOM_BCM5719: 3207 case PCI_PRODUCT_BROADCOM_BCM5720: 3208 case PCI_PRODUCT_BROADCOM_BCM5724: /* ??? */ 3209 id = pci_conf_read(pa->pa_pc, pa->pa_tag, 3210 BGE_PCI_GEN2_PRODID_ASICREV); 3211 break; 3212 case PCI_PRODUCT_BROADCOM_BCM57761: 3213 case PCI_PRODUCT_BROADCOM_BCM57762: 3214 case PCI_PRODUCT_BROADCOM_BCM57765: 3215 case PCI_PRODUCT_BROADCOM_BCM57766: 3216 case PCI_PRODUCT_BROADCOM_BCM57781: 3217 case PCI_PRODUCT_BROADCOM_BCM57785: 3218 case PCI_PRODUCT_BROADCOM_BCM57791: 3219 case PCI_PRODUCT_BROADCOM_BCM57795: 3220 id = pci_conf_read(pa->pa_pc, pa->pa_tag, 3221 BGE_PCI_GEN15_PRODID_ASICREV); 3222 break; 3223 default: 3224 id = pci_conf_read(pa->pa_pc, pa->pa_tag, 3225 BGE_PCI_PRODID_ASICREV); 3226 break; 3227 } 3228 } 3229 3230 return id; 3231 } 3232 3233 /* 3234 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 3235 * against our list and return its name if we find a match. Note 3236 * that since the Broadcom controller contains VPD support, we 3237 * can get the device name string from the controller itself instead 3238 * of the compiled-in string. This is a little slow, but it guarantees 3239 * we'll always announce the right product name. 3240 */ 3241 static int 3242 bge_probe(device_t parent, cfdata_t match, void *aux) 3243 { 3244 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 3245 3246 if (bge_lookup(pa) != NULL) 3247 return 1; 3248 3249 return 0; 3250 } 3251 3252 static void 3253 bge_attach(device_t parent, device_t self, void *aux) 3254 { 3255 struct bge_softc *sc = device_private(self); 3256 struct pci_attach_args *pa = aux; 3257 prop_dictionary_t dict; 3258 const struct bge_product *bp; 3259 const struct bge_revision *br; 3260 pci_chipset_tag_t pc; 3261 pci_intr_handle_t ih; 3262 const char *intrstr = NULL; 3263 uint32_t hwcfg = 0; 3264 uint32_t command; 3265 struct ifnet *ifp; 3266 uint32_t misccfg; 3267 void * kva; 3268 u_char eaddr[ETHER_ADDR_LEN]; 3269 pcireg_t memtype, subid, reg; 3270 bus_addr_t memaddr; 3271 uint32_t pm_ctl; 3272 bool no_seeprom; 3273 int capmask; 3274 3275 bp = bge_lookup(pa); 3276 KASSERT(bp != NULL); 3277 3278 sc->sc_pc = pa->pa_pc; 3279 sc->sc_pcitag = pa->pa_tag; 3280 sc->bge_dev = self; 3281 3282 sc->bge_pa = *pa; 3283 pc = sc->sc_pc; 3284 subid = pci_conf_read(pc, sc->sc_pcitag, PCI_SUBSYS_ID_REG); 3285 3286 aprint_naive(": Ethernet controller\n"); 3287 aprint_normal(": %s\n", bp->bp_name); 3288 3289 /* 3290 * Map control/status registers. 3291 */ 3292 DPRINTFN(5, ("Map control/status regs\n")); 3293 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 3294 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 3295 pci_conf_write(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, command); 3296 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 3297 3298 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 3299 aprint_error_dev(sc->bge_dev, 3300 "failed to enable memory mapping!\n"); 3301 return; 3302 } 3303 3304 DPRINTFN(5, ("pci_mem_find\n")); 3305 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_pcitag, BGE_PCI_BAR0); 3306 switch (memtype) { 3307 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 3308 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 3309 if (pci_mapreg_map(pa, BGE_PCI_BAR0, 3310 memtype, 0, &sc->bge_btag, &sc->bge_bhandle, 3311 &memaddr, &sc->bge_bsize) == 0) 3312 break; 3313 default: 3314 aprint_error_dev(sc->bge_dev, "can't find mem space\n"); 3315 return; 3316 } 3317 3318 DPRINTFN(5, ("pci_intr_map\n")); 3319 if (pci_intr_map(pa, &ih)) { 3320 aprint_error_dev(sc->bge_dev, "couldn't map interrupt\n"); 3321 return; 3322 } 3323 3324 DPRINTFN(5, ("pci_intr_string\n")); 3325 intrstr = pci_intr_string(pc, ih); 3326 3327 DPRINTFN(5, ("pci_intr_establish\n")); 3328 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc); 3329 3330 if (sc->bge_intrhand == NULL) { 3331 aprint_error_dev(sc->bge_dev, 3332 "couldn't establish interrupt%s%s\n", 3333 intrstr ? " at " : "", intrstr ? intrstr : ""); 3334 return; 3335 } 3336 aprint_normal_dev(sc->bge_dev, "interrupting at %s\n", intrstr); 3337 3338 /* Save various chip information. */ 3339 sc->bge_chipid = bge_chipid(pa); 3340 sc->bge_phy_addr = bge_phy_addr(sc); 3341 3342 if ((pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PCIEXPRESS, 3343 &sc->bge_pciecap, NULL) != 0) 3344 || (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785)) { 3345 /* PCIe */ 3346 sc->bge_flags |= BGE_PCIE; 3347 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5719 || 3348 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 3349 sc->bge_expmrq = 2048; 3350 else 3351 sc->bge_expmrq = 4096; 3352 bge_set_max_readrq(sc); 3353 } else if ((pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE) & 3354 BGE_PCISTATE_PCI_BUSMODE) == 0) { 3355 /* PCI-X */ 3356 sc->bge_flags |= BGE_PCIX; 3357 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIX, 3358 &sc->bge_pcixcap, NULL) == 0) 3359 aprint_error_dev(sc->bge_dev, 3360 "unable to find PCIX capability\n"); 3361 } 3362 3363 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) { 3364 /* 3365 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) 3366 * can clobber the chip's PCI config-space power control 3367 * registers, leaving the card in D3 powersave state. We do 3368 * not have memory-mapped registers in this state, so force 3369 * device into D0 state before starting initialization. 3370 */ 3371 pm_ctl = pci_conf_read(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD); 3372 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3); 3373 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */ 3374 pci_conf_write(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD, pm_ctl); 3375 DELAY(1000); /* 27 usec is allegedly sufficent */ 3376 } 3377 3378 /* Save chipset family. */ 3379 switch (BGE_ASICREV(sc->bge_chipid)) { 3380 case BGE_ASICREV_BCM57765: 3381 case BGE_ASICREV_BCM57766: 3382 sc->bge_flags |= BGE_57765_PLUS; 3383 /* FALLTHROUGH */ 3384 case BGE_ASICREV_BCM5717: 3385 case BGE_ASICREV_BCM5719: 3386 case BGE_ASICREV_BCM5720: 3387 sc->bge_flags |= BGE_5717_PLUS | BGE_5755_PLUS | BGE_575X_PLUS | 3388 BGE_5705_PLUS; 3389 break; 3390 case BGE_ASICREV_BCM5755: 3391 case BGE_ASICREV_BCM5761: 3392 case BGE_ASICREV_BCM5784: 3393 case BGE_ASICREV_BCM5785: 3394 case BGE_ASICREV_BCM5787: 3395 case BGE_ASICREV_BCM57780: 3396 sc->bge_flags |= BGE_5755_PLUS | BGE_575X_PLUS | BGE_5705_PLUS; 3397 break; 3398 case BGE_ASICREV_BCM5700: 3399 case BGE_ASICREV_BCM5701: 3400 case BGE_ASICREV_BCM5703: 3401 case BGE_ASICREV_BCM5704: 3402 sc->bge_flags |= BGE_5700_FAMILY | BGE_JUMBO_CAPABLE; 3403 break; 3404 case BGE_ASICREV_BCM5714_A0: 3405 case BGE_ASICREV_BCM5780: 3406 case BGE_ASICREV_BCM5714: 3407 sc->bge_flags |= BGE_5714_FAMILY; 3408 /* FALLTHROUGH */ 3409 case BGE_ASICREV_BCM5750: 3410 case BGE_ASICREV_BCM5752: 3411 case BGE_ASICREV_BCM5906: 3412 sc->bge_flags |= BGE_575X_PLUS; 3413 /* FALLTHROUGH */ 3414 case BGE_ASICREV_BCM5705: 3415 sc->bge_flags |= BGE_5705_PLUS; 3416 break; 3417 } 3418 3419 /* Identify chips with APE processor. */ 3420 switch (BGE_ASICREV(sc->bge_chipid)) { 3421 case BGE_ASICREV_BCM5717: 3422 case BGE_ASICREV_BCM5719: 3423 case BGE_ASICREV_BCM5720: 3424 case BGE_ASICREV_BCM5761: 3425 sc->bge_flags |= BGE_APE; 3426 break; 3427 } 3428 3429 /* Chips with APE need BAR2 access for APE registers/memory. */ 3430 if ((sc->bge_flags & BGE_APE) != 0) { 3431 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR2); 3432 if (pci_mapreg_map(pa, BGE_PCI_BAR2, memtype, 0, 3433 &sc->bge_apetag, &sc->bge_apehandle, NULL, 3434 &sc->bge_apesize)) { 3435 aprint_error_dev(sc->bge_dev, 3436 "couldn't map BAR2 memory\n"); 3437 return; 3438 } 3439 3440 /* Enable APE register/memory access by host driver. */ 3441 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE); 3442 reg |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | 3443 BGE_PCISTATE_ALLOW_APE_SHMEM_WR | 3444 BGE_PCISTATE_ALLOW_APE_PSPACE_WR; 3445 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE, reg); 3446 3447 bge_ape_lock_init(sc); 3448 bge_ape_read_fw_ver(sc); 3449 } 3450 3451 /* Identify the chips that use an CPMU. */ 3452 if (BGE_IS_5717_PLUS(sc) || 3453 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 3454 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 3455 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 3456 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) 3457 sc->bge_flags |= BGE_CPMU_PRESENT; 3458 3459 if ((sc->bge_flags & BGE_CPMU_PRESENT) != 0) 3460 CSR_WRITE_4(sc, BGE_MI_MODE, BGE_MIMODE_500KHZ_CONST); 3461 else 3462 CSR_WRITE_4(sc, BGE_MI_MODE, BGE_MIMODE_BASE); 3463 3464 /* 3465 * When using the BCM5701 in PCI-X mode, data corruption has 3466 * been observed in the first few bytes of some received packets. 3467 * Aligning the packet buffer in memory eliminates the corruption. 3468 * Unfortunately, this misaligns the packet payloads. On platforms 3469 * which do not support unaligned accesses, we will realign the 3470 * payloads by copying the received packets. 3471 */ 3472 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 && 3473 sc->bge_flags & BGE_PCIX) 3474 sc->bge_flags |= BGE_RX_ALIGNBUG; 3475 3476 if (BGE_IS_5700_FAMILY(sc)) 3477 sc->bge_flags |= BGE_JUMBO_CAPABLE; 3478 3479 misccfg = CSR_READ_4(sc, BGE_MISC_CFG); 3480 misccfg &= BGE_MISCCFG_BOARD_ID_MASK; 3481 3482 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 3483 (misccfg == BGE_MISCCFG_BOARD_ID_5788 || 3484 misccfg == BGE_MISCCFG_BOARD_ID_5788M)) 3485 sc->bge_flags |= BGE_IS_5788; 3486 3487 /* 3488 * Some controllers seem to require a special firmware to use 3489 * TSO. But the firmware is not available to FreeBSD and Linux 3490 * claims that the TSO performed by the firmware is slower than 3491 * hardware based TSO. Moreover the firmware based TSO has one 3492 * known bug which can't handle TSO if ethernet header + IP/TCP 3493 * header is greater than 80 bytes. The workaround for the TSO 3494 * bug exist but it seems it's too expensive than not using 3495 * TSO at all. Some hardwares also have the TSO bug so limit 3496 * the TSO to the controllers that are not affected TSO issues 3497 * (e.g. 5755 or higher). 3498 */ 3499 if (BGE_IS_5755_PLUS(sc)) { 3500 /* 3501 * BCM5754 and BCM5787 shares the same ASIC id so 3502 * explicit device id check is required. 3503 */ 3504 if ((PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754) && 3505 (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754M)) 3506 sc->bge_flags |= BGE_TSO; 3507 } 3508 3509 capmask = 0xffffffff; /* XXX BMSR_DEFCAPMASK */ 3510 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 && 3511 (misccfg == 0x4000 || misccfg == 0x8000)) || 3512 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 3513 PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && 3514 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 || 3515 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 || 3516 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) || 3517 (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && 3518 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F || 3519 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F || 3520 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) || 3521 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 || 3522 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57791 || 3523 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57795 || 3524 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 3525 capmask &= ~BMSR_EXTSTAT; 3526 sc->bge_flags |= BGE_PHY_NO_WIRESPEED; 3527 } 3528 3529 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 3530 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 3531 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && 3532 sc->bge_chipid != BGE_CHIPID_BCM5705_A1))) 3533 sc->bge_flags |= BGE_PHY_NO_WIRESPEED; 3534 3535 /* Set various PHY bug flags. */ 3536 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || 3537 sc->bge_chipid == BGE_CHIPID_BCM5701_B0) 3538 sc->bge_flags |= BGE_PHY_CRC_BUG; 3539 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX || 3540 BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX) 3541 sc->bge_flags |= BGE_PHY_ADC_BUG; 3542 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) 3543 sc->bge_flags |= BGE_PHY_5704_A0_BUG; 3544 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 3545 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) && 3546 PCI_VENDOR(subid) == PCI_VENDOR_DELL) 3547 sc->bge_flags |= BGE_PHY_NO_3LED; 3548 if (BGE_IS_5705_PLUS(sc) && 3549 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 && 3550 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 && 3551 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780 && 3552 !BGE_IS_5717_PLUS(sc)) { 3553 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 3554 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 3555 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 3556 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) { 3557 if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 && 3558 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756) 3559 sc->bge_flags |= BGE_PHY_JITTER_BUG; 3560 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M) 3561 sc->bge_flags |= BGE_PHY_ADJUST_TRIM; 3562 } else 3563 sc->bge_flags |= BGE_PHY_BER_BUG; 3564 } 3565 3566 /* 3567 * SEEPROM check. 3568 * First check if firmware knows we do not have SEEPROM. 3569 */ 3570 if (prop_dictionary_get_bool(device_properties(self), 3571 "without-seeprom", &no_seeprom) && no_seeprom) 3572 sc->bge_flags |= BGE_NO_EEPROM; 3573 3574 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 3575 sc->bge_flags |= BGE_NO_EEPROM; 3576 3577 /* Now check the 'ROM failed' bit on the RX CPU */ 3578 else if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) 3579 sc->bge_flags |= BGE_NO_EEPROM; 3580 3581 sc->bge_asf_mode = 0; 3582 /* No ASF if APE present. */ 3583 if ((sc->bge_flags & BGE_APE) == 0) { 3584 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == 3585 BGE_SRAM_DATA_SIG_MAGIC)) { 3586 if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG) & 3587 BGE_HWCFG_ASF) { 3588 sc->bge_asf_mode |= ASF_ENABLE; 3589 sc->bge_asf_mode |= ASF_STACKUP; 3590 if (BGE_IS_575X_PLUS(sc)) 3591 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; 3592 } 3593 } 3594 } 3595 3596 #if 0 3597 /* 3598 * Reset NVRAM before bge_reset(). It's required to acquire NVRAM 3599 * lock in bge_reset(). 3600 */ 3601 CSR_WRITE_4(sc, BGE_EE_ADDR, 3602 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 3603 delay(1000); 3604 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 3605 #endif 3606 3607 bge_stop_fw(sc); 3608 bge_sig_pre_reset(sc, BGE_RESET_START); 3609 if (bge_reset(sc)) 3610 aprint_error_dev(sc->bge_dev, "chip reset failed\n"); 3611 3612 bge_sig_legacy(sc, BGE_RESET_START); 3613 bge_sig_post_reset(sc, BGE_RESET_START); 3614 3615 if (bge_chipinit(sc)) { 3616 aprint_error_dev(sc->bge_dev, "chip initialization failed\n"); 3617 bge_release_resources(sc); 3618 return; 3619 } 3620 3621 /* 3622 * Get station address from the EEPROM. 3623 */ 3624 if (bge_get_eaddr(sc, eaddr)) { 3625 aprint_error_dev(sc->bge_dev, 3626 "failed to read station address\n"); 3627 bge_release_resources(sc); 3628 return; 3629 } 3630 3631 br = bge_lookup_rev(sc->bge_chipid); 3632 3633 if (br == NULL) { 3634 aprint_normal_dev(sc->bge_dev, "unknown ASIC (0x%x)", 3635 sc->bge_chipid); 3636 } else { 3637 aprint_normal_dev(sc->bge_dev, "ASIC %s (0x%x)", 3638 br->br_name, sc->bge_chipid); 3639 } 3640 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr)); 3641 3642 /* Allocate the general information block and ring buffers. */ 3643 if (pci_dma64_available(pa)) 3644 sc->bge_dmatag = pa->pa_dmat64; 3645 else 3646 sc->bge_dmatag = pa->pa_dmat; 3647 DPRINTFN(5, ("bus_dmamem_alloc\n")); 3648 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 3649 PAGE_SIZE, 0, &sc->bge_ring_seg, 1, 3650 &sc->bge_ring_rseg, BUS_DMA_NOWAIT)) { 3651 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n"); 3652 return; 3653 } 3654 DPRINTFN(5, ("bus_dmamem_map\n")); 3655 if (bus_dmamem_map(sc->bge_dmatag, &sc->bge_ring_seg, 3656 sc->bge_ring_rseg, sizeof(struct bge_ring_data), &kva, 3657 BUS_DMA_NOWAIT)) { 3658 aprint_error_dev(sc->bge_dev, 3659 "can't map DMA buffers (%zu bytes)\n", 3660 sizeof(struct bge_ring_data)); 3661 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, 3662 sc->bge_ring_rseg); 3663 return; 3664 } 3665 DPRINTFN(5, ("bus_dmamem_create\n")); 3666 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 3667 sizeof(struct bge_ring_data), 0, 3668 BUS_DMA_NOWAIT, &sc->bge_ring_map)) { 3669 aprint_error_dev(sc->bge_dev, "can't create DMA map\n"); 3670 bus_dmamem_unmap(sc->bge_dmatag, kva, 3671 sizeof(struct bge_ring_data)); 3672 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, 3673 sc->bge_ring_rseg); 3674 return; 3675 } 3676 DPRINTFN(5, ("bus_dmamem_load\n")); 3677 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 3678 sizeof(struct bge_ring_data), NULL, 3679 BUS_DMA_NOWAIT)) { 3680 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 3681 bus_dmamem_unmap(sc->bge_dmatag, kva, 3682 sizeof(struct bge_ring_data)); 3683 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, 3684 sc->bge_ring_rseg); 3685 return; 3686 } 3687 3688 DPRINTFN(5, ("bzero\n")); 3689 sc->bge_rdata = (struct bge_ring_data *)kva; 3690 3691 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data)); 3692 3693 /* Try to allocate memory for jumbo buffers. */ 3694 if (BGE_IS_JUMBO_CAPABLE(sc)) { 3695 if (bge_alloc_jumbo_mem(sc)) { 3696 aprint_error_dev(sc->bge_dev, 3697 "jumbo buffer allocation failed\n"); 3698 } else 3699 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 3700 } 3701 3702 /* Set default tuneable values. */ 3703 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 3704 sc->bge_rx_coal_ticks = 150; 3705 sc->bge_rx_max_coal_bds = 64; 3706 sc->bge_tx_coal_ticks = 300; 3707 sc->bge_tx_max_coal_bds = 400; 3708 if (BGE_IS_5705_PLUS(sc)) { 3709 sc->bge_tx_coal_ticks = (12 * 5); 3710 sc->bge_tx_max_coal_bds = (12 * 5); 3711 aprint_verbose_dev(sc->bge_dev, 3712 "setting short Tx thresholds\n"); 3713 } 3714 3715 if (BGE_IS_5717_PLUS(sc)) 3716 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 3717 else if (BGE_IS_5705_PLUS(sc)) 3718 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 3719 else 3720 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 3721 3722 /* Set up ifnet structure */ 3723 ifp = &sc->ethercom.ec_if; 3724 ifp->if_softc = sc; 3725 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 3726 ifp->if_ioctl = bge_ioctl; 3727 ifp->if_stop = bge_stop; 3728 ifp->if_start = bge_start; 3729 ifp->if_init = bge_init; 3730 ifp->if_watchdog = bge_watchdog; 3731 IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN)); 3732 IFQ_SET_READY(&ifp->if_snd); 3733 DPRINTFN(5, ("strcpy if_xname\n")); 3734 strcpy(ifp->if_xname, device_xname(sc->bge_dev)); 3735 3736 if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) 3737 sc->ethercom.ec_if.if_capabilities |= 3738 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx; 3739 #if 1 /* XXX TCP/UDP checksum offload breaks with pf(4) */ 3740 sc->ethercom.ec_if.if_capabilities |= 3741 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 3742 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 3743 #endif 3744 sc->ethercom.ec_capabilities |= 3745 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 3746 3747 if (sc->bge_flags & BGE_TSO) 3748 sc->ethercom.ec_if.if_capabilities |= IFCAP_TSOv4; 3749 3750 /* 3751 * Do MII setup. 3752 */ 3753 DPRINTFN(5, ("mii setup\n")); 3754 sc->bge_mii.mii_ifp = ifp; 3755 sc->bge_mii.mii_readreg = bge_miibus_readreg; 3756 sc->bge_mii.mii_writereg = bge_miibus_writereg; 3757 sc->bge_mii.mii_statchg = bge_miibus_statchg; 3758 3759 /* 3760 * Figure out what sort of media we have by checking the hardware 3761 * config word in the first 32k of NIC internal memory, or fall back to 3762 * the config word in the EEPROM. Note: on some BCM5700 cards, 3763 * this value appears to be unset. If that's the case, we have to rely 3764 * on identifying the NIC by its PCI subsystem ID, as we do below for 3765 * the SysKonnect SK-9D41. 3766 */ 3767 if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC) { 3768 hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG); 3769 } else if (!(sc->bge_flags & BGE_NO_EEPROM)) { 3770 bge_read_eeprom(sc, (void *)&hwcfg, 3771 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 3772 hwcfg = be32toh(hwcfg); 3773 } 3774 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 3775 if (PCI_PRODUCT(pa->pa_id) == SK_SUBSYSID_9D41 || 3776 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) { 3777 if (BGE_IS_5714_FAMILY(sc)) 3778 sc->bge_flags |= BGE_PHY_FIBER_MII; 3779 else 3780 sc->bge_flags |= BGE_PHY_FIBER_TBI; 3781 } 3782 3783 /* set phyflags and chipid before mii_attach() */ 3784 dict = device_properties(self); 3785 prop_dictionary_set_uint32(dict, "phyflags", sc->bge_flags); 3786 prop_dictionary_set_uint32(dict, "chipid", sc->bge_chipid); 3787 3788 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 3789 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 3790 bge_ifmedia_sts); 3791 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER |IFM_1000_SX, 0, NULL); 3792 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX|IFM_FDX, 3793 0, NULL); 3794 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); 3795 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO); 3796 /* Pretend the user requested this setting */ 3797 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 3798 } else { 3799 /* 3800 * Do transceiver setup and tell the firmware the 3801 * driver is down so we can try to get access the 3802 * probe if ASF is running. Retry a couple of times 3803 * if we get a conflict with the ASF firmware accessing 3804 * the PHY. 3805 */ 3806 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3807 bge_asf_driver_up(sc); 3808 3809 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd, 3810 bge_ifmedia_sts); 3811 mii_attach(sc->bge_dev, &sc->bge_mii, capmask, 3812 sc->bge_phy_addr, MII_OFFSET_ANY, 3813 MIIF_DOPAUSE); 3814 3815 if (LIST_EMPTY(&sc->bge_mii.mii_phys)) { 3816 aprint_error_dev(sc->bge_dev, "no PHY found!\n"); 3817 ifmedia_add(&sc->bge_mii.mii_media, 3818 IFM_ETHER|IFM_MANUAL, 0, NULL); 3819 ifmedia_set(&sc->bge_mii.mii_media, 3820 IFM_ETHER|IFM_MANUAL); 3821 } else 3822 ifmedia_set(&sc->bge_mii.mii_media, 3823 IFM_ETHER|IFM_AUTO); 3824 3825 /* 3826 * Now tell the firmware we are going up after probing the PHY 3827 */ 3828 if (sc->bge_asf_mode & ASF_STACKUP) 3829 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3830 } 3831 3832 /* 3833 * Call MI attach routine. 3834 */ 3835 DPRINTFN(5, ("if_attach\n")); 3836 if_attach(ifp); 3837 DPRINTFN(5, ("ether_ifattach\n")); 3838 ether_ifattach(ifp, eaddr); 3839 ether_set_ifflags_cb(&sc->ethercom, bge_ifflags_cb); 3840 rnd_attach_source(&sc->rnd_source, device_xname(sc->bge_dev), 3841 RND_TYPE_NET, 0); 3842 #ifdef BGE_EVENT_COUNTERS 3843 /* 3844 * Attach event counters. 3845 */ 3846 evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR, 3847 NULL, device_xname(sc->bge_dev), "intr"); 3848 evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC, 3849 NULL, device_xname(sc->bge_dev), "tx_xoff"); 3850 evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC, 3851 NULL, device_xname(sc->bge_dev), "tx_xon"); 3852 evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC, 3853 NULL, device_xname(sc->bge_dev), "rx_xoff"); 3854 evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC, 3855 NULL, device_xname(sc->bge_dev), "rx_xon"); 3856 evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC, 3857 NULL, device_xname(sc->bge_dev), "rx_macctl"); 3858 evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC, 3859 NULL, device_xname(sc->bge_dev), "xoffentered"); 3860 #endif /* BGE_EVENT_COUNTERS */ 3861 DPRINTFN(5, ("callout_init\n")); 3862 callout_init(&sc->bge_timeout, 0); 3863 3864 if (pmf_device_register(self, NULL, NULL)) 3865 pmf_class_network_register(self, ifp); 3866 else 3867 aprint_error_dev(self, "couldn't establish power handler\n"); 3868 3869 bge_sysctl_init(sc); 3870 3871 #ifdef BGE_DEBUG 3872 bge_debug_info(sc); 3873 #endif 3874 } 3875 3876 /* 3877 * Stop all chip I/O so that the kernel's probe routines don't 3878 * get confused by errant DMAs when rebooting. 3879 */ 3880 static int 3881 bge_detach(device_t self, int flags __unused) 3882 { 3883 struct bge_softc *sc = device_private(self); 3884 struct ifnet *ifp = &sc->ethercom.ec_if; 3885 int s; 3886 3887 s = splnet(); 3888 /* Stop the interface. Callouts are stopped in it. */ 3889 bge_stop(ifp, 1); 3890 splx(s); 3891 3892 mii_detach(&sc->bge_mii, MII_PHY_ANY, MII_OFFSET_ANY); 3893 3894 /* Delete all remaining media. */ 3895 ifmedia_delete_instance(&sc->bge_mii.mii_media, IFM_INST_ANY); 3896 3897 ether_ifdetach(ifp); 3898 if_detach(ifp); 3899 3900 bge_release_resources(sc); 3901 3902 return 0; 3903 } 3904 3905 static void 3906 bge_release_resources(struct bge_softc *sc) 3907 { 3908 3909 /* Disestablish the interrupt handler */ 3910 if (sc->bge_intrhand != NULL) { 3911 pci_intr_disestablish(sc->sc_pc, sc->bge_intrhand); 3912 sc->bge_intrhand = NULL; 3913 } 3914 3915 bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map); 3916 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 3917 bus_dmamem_unmap(sc->bge_dmatag, (void *)sc->bge_rdata, 3918 sizeof(struct bge_ring_data)); 3919 bus_dmamem_free(sc->bge_dmatag, &sc->bge_ring_seg, sc->bge_ring_rseg); 3920 3921 /* Unmap the device registers */ 3922 if (sc->bge_bsize != 0) { 3923 bus_space_unmap(sc->bge_btag, sc->bge_bhandle, sc->bge_bsize); 3924 sc->bge_bsize = 0; 3925 } 3926 3927 /* Unmap the APE registers */ 3928 if (sc->bge_apesize != 0) { 3929 bus_space_unmap(sc->bge_apetag, sc->bge_apehandle, 3930 sc->bge_apesize); 3931 sc->bge_apesize = 0; 3932 } 3933 } 3934 3935 static int 3936 bge_reset(struct bge_softc *sc) 3937 { 3938 uint32_t cachesize, command; 3939 uint32_t reset, mac_mode, mac_mode_mask; 3940 pcireg_t devctl, reg; 3941 int i, val; 3942 void (*write_op)(struct bge_softc *, int, int); 3943 3944 mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE; 3945 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) 3946 mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; 3947 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask; 3948 3949 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) && 3950 (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)) { 3951 if (sc->bge_flags & BGE_PCIE) 3952 write_op = bge_writemem_direct; 3953 else 3954 write_op = bge_writemem_ind; 3955 } else 3956 write_op = bge_writereg_ind; 3957 3958 #if 0 3959 /* 57XX step 4 */ 3960 /* Acquire the NVM lock */ 3961 if ((sc->bge_flags & BGE_NO_EEPROM) == 0 && 3962 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5700 && 3963 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5701) { 3964 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 3965 for (i = 0; i < 8000; i++) { 3966 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & 3967 BGE_NVRAMSWARB_GNT1) 3968 break; 3969 DELAY(20); 3970 } 3971 if (i == 8000) { 3972 printf("%s: NVRAM lock timedout!\n", 3973 device_xname(sc->bge_dev)); 3974 } 3975 } 3976 #endif 3977 /* Take APE lock when performing reset. */ 3978 bge_ape_lock(sc, BGE_APE_LOCK_GRC); 3979 3980 /* 57XX step 3 */ 3981 /* Save some important PCI state. */ 3982 cachesize = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ); 3983 /* 5718 reset step 3 */ 3984 command = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD); 3985 3986 /* 5718 reset step 5, 57XX step 5b-5d */ 3987 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 3988 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 3989 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW); 3990 3991 /* XXX ???: Disable fastboot on controllers that support it. */ 3992 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || 3993 BGE_IS_5755_PLUS(sc)) 3994 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0); 3995 3996 /* 5718 reset step 2, 57XX step 6 */ 3997 /* 3998 * Write the magic number to SRAM at offset 0xB50. 3999 * When firmware finishes its initialization it will 4000 * write ~BGE_MAGIC_NUMBER to the same location. 4001 */ 4002 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); 4003 4004 /* 5718 reset step 6, 57XX step 7 */ 4005 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ; 4006 /* 4007 * XXX: from FreeBSD/Linux; no documentation 4008 */ 4009 if (sc->bge_flags & BGE_PCIE) { 4010 if (BGE_ASICREV(sc->bge_chipid != BGE_ASICREV_BCM5785) && 4011 !BGE_IS_57765_PLUS(sc) && 4012 (CSR_READ_4(sc, BGE_PHY_TEST_CTRL_REG) == 4013 (BGE_PHY_PCIE_LTASS_MODE | BGE_PHY_PCIE_SCRAM_MODE))) { 4014 /* PCI Express 1.0 system */ 4015 CSR_WRITE_4(sc, BGE_PHY_TEST_CTRL_REG, 4016 BGE_PHY_PCIE_SCRAM_MODE); 4017 } 4018 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 4019 /* 4020 * Prevent PCI Express link training 4021 * during global reset. 4022 */ 4023 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); 4024 reset |= (1 << 29); 4025 } 4026 } 4027 4028 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 4029 i = CSR_READ_4(sc, BGE_VCPU_STATUS); 4030 CSR_WRITE_4(sc, BGE_VCPU_STATUS, 4031 i | BGE_VCPU_STATUS_DRV_RESET); 4032 i = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); 4033 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, 4034 i & ~BGE_VCPU_EXT_CTRL_HALT_CPU); 4035 } 4036 4037 /* 4038 * Set GPHY Power Down Override to leave GPHY 4039 * powered up in D0 uninitialized. 4040 */ 4041 if (BGE_IS_5705_PLUS(sc) && 4042 (sc->bge_flags & BGE_CPMU_PRESENT) == 0) 4043 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE; 4044 4045 /* Issue global reset */ 4046 write_op(sc, BGE_MISC_CFG, reset); 4047 4048 /* 5718 reset step 7, 57XX step 8 */ 4049 if (sc->bge_flags & BGE_PCIE) 4050 delay(100*1000); /* too big */ 4051 else 4052 delay(1000); 4053 4054 if (sc->bge_flags & BGE_PCIE) { 4055 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 4056 DELAY(500000); 4057 /* XXX: Magic Numbers */ 4058 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 4059 BGE_PCI_UNKNOWN0); 4060 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 4061 BGE_PCI_UNKNOWN0, 4062 reg | (1 << 15)); 4063 } 4064 devctl = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 4065 sc->bge_pciecap + PCI_PCIE_DCSR); 4066 /* Clear enable no snoop and disable relaxed ordering. */ 4067 devctl &= ~(PCI_PCIE_DCSR_ENA_RELAX_ORD | 4068 PCI_PCIE_DCSR_ENA_NO_SNOOP); 4069 4070 /* Set PCIE max payload size to 128 for older PCIe devices */ 4071 if ((sc->bge_flags & BGE_CPMU_PRESENT) == 0) 4072 devctl &= ~(0x00e0); 4073 /* Clear device status register. Write 1b to clear */ 4074 devctl |= PCI_PCIE_DCSR_URD | PCI_PCIE_DCSR_FED 4075 | PCI_PCIE_DCSR_NFED | PCI_PCIE_DCSR_CED; 4076 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 4077 sc->bge_pciecap + PCI_PCIE_DCSR, devctl); 4078 bge_set_max_readrq(sc); 4079 } 4080 4081 /* From Linux: dummy read to flush PCI posted writes */ 4082 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD); 4083 4084 /* 4085 * Reset some of the PCI state that got zapped by reset 4086 * To modify the PCISTATE register, BGE_PCIMISCCTL_PCISTATE_RW must be 4087 * set, too. 4088 */ 4089 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 4090 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 4091 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW); 4092 val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE; 4093 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 && 4094 (sc->bge_flags & BGE_PCIX) != 0) 4095 val |= BGE_PCISTATE_RETRY_SAME_DMA; 4096 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) 4097 val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | 4098 BGE_PCISTATE_ALLOW_APE_SHMEM_WR | 4099 BGE_PCISTATE_ALLOW_APE_PSPACE_WR; 4100 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE, val); 4101 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ, cachesize); 4102 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, command); 4103 4104 /* Step 11: disable PCI-X Relaxed Ordering. */ 4105 if (sc->bge_flags & BGE_PCIX) { 4106 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap 4107 + PCI_PCIX_CMD); 4108 pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap 4109 + PCI_PCIX_CMD, reg & ~PCI_PCIX_CMD_RELAXED_ORDER); 4110 } 4111 4112 /* 5718 reset step 10, 57XX step 12 */ 4113 /* Enable memory arbiter. */ 4114 if (BGE_IS_5714_FAMILY(sc)) { 4115 val = CSR_READ_4(sc, BGE_MARB_MODE); 4116 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); 4117 } else 4118 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 4119 4120 /* XXX 5721, 5751 and 5752 */ 4121 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750) { 4122 /* Step 19: */ 4123 BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, 1 << 29 | 1 << 25); 4124 /* Step 20: */ 4125 BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, BGE_TLP_DATA_FIFO_PROTECT); 4126 } 4127 4128 /* 5718 reset step 12, 57XX step 15 and 16 */ 4129 /* Fix up byte swapping */ 4130 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS); 4131 4132 /* 5718 reset step 13, 57XX step 17 */ 4133 /* 4134 * Wait for the bootcode to complete initialization. 4135 * See BCM5718 programmer's guide's "step 13, Device reset Procedure, 4136 * Section 7". For 57XX, it's optional. 4137 */ 4138 if (BGE_IS_5717_PLUS(sc)) { 4139 for (i = 0; i < 1000*1000; i++) { 4140 val = bge_readmem_ind(sc, BGE_SRAM_FW_MB); 4141 if (val == BGE_SRAM_FW_MB_RESET_MAGIC) 4142 break; 4143 DELAY(10); 4144 } 4145 } 4146 4147 /* 57XX step 21 */ 4148 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_BX) { 4149 pcireg_t msidata; 4150 4151 msidata = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 4152 BGE_PCI_MSI_DATA); 4153 msidata |= ((1 << 13 | 1 << 12 | 1 << 10) << 16); 4154 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MSI_DATA, 4155 msidata); 4156 } 4157 4158 /* 57XX step 18 */ 4159 /* Wirte mac mode. 4160 * XXX Write 0x0c for 5703S and 5704S 4161 */ 4162 val = CSR_READ_4(sc, BGE_MAC_MODE); 4163 val = (val & ~mac_mode_mask) | mac_mode; 4164 CSR_WRITE_4_FLUSH(sc, BGE_MAC_MODE, val); 4165 DELAY(40); 4166 4167 bge_ape_unlock(sc, BGE_APE_LOCK_GRC); 4168 4169 /* 57XX step 17 */ 4170 /* Poll until the firmware initialization is complete */ 4171 bge_poll_fw(sc); 4172 4173 /* 4174 * The 5704 in TBI mode apparently needs some special 4175 * adjustment to insure the SERDES drive level is set 4176 * to 1.2V. 4177 */ 4178 if (sc->bge_flags & BGE_PHY_FIBER_TBI && 4179 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 4180 uint32_t serdescfg; 4181 4182 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 4183 serdescfg = (serdescfg & ~0xFFF) | 0x880; 4184 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 4185 } 4186 4187 if (sc->bge_flags & BGE_PCIE && 4188 !BGE_IS_57765_PLUS(sc) && 4189 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 && 4190 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785) { 4191 uint32_t v; 4192 4193 /* Enable PCI Express bug fix */ 4194 v = CSR_READ_4(sc, BGE_TLP_CONTROL_REG); 4195 CSR_WRITE_4(sc, BGE_TLP_CONTROL_REG, 4196 v | BGE_TLP_DATA_FIFO_PROTECT); 4197 } 4198 4199 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) 4200 BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE, 4201 CPMU_CLCK_ORIDE_MAC_ORIDE_EN); 4202 4203 return 0; 4204 } 4205 4206 /* 4207 * Frame reception handling. This is called if there's a frame 4208 * on the receive return list. 4209 * 4210 * Note: we have to be able to handle two possibilities here: 4211 * 1) the frame is from the jumbo receive ring 4212 * 2) the frame is from the standard receive ring 4213 */ 4214 4215 static void 4216 bge_rxeof(struct bge_softc *sc) 4217 { 4218 struct ifnet *ifp; 4219 uint16_t rx_prod, rx_cons; 4220 int stdcnt = 0, jumbocnt = 0; 4221 bus_dmamap_t dmamap; 4222 bus_addr_t offset, toff; 4223 bus_size_t tlen; 4224 int tosync; 4225 4226 rx_cons = sc->bge_rx_saved_considx; 4227 rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx; 4228 4229 /* Nothing to do */ 4230 if (rx_cons == rx_prod) 4231 return; 4232 4233 ifp = &sc->ethercom.ec_if; 4234 4235 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4236 offsetof(struct bge_ring_data, bge_status_block), 4237 sizeof (struct bge_status_block), 4238 BUS_DMASYNC_POSTREAD); 4239 4240 offset = offsetof(struct bge_ring_data, bge_rx_return_ring); 4241 tosync = rx_prod - rx_cons; 4242 4243 if (tosync != 0) 4244 rnd_add_uint32(&sc->rnd_source, tosync); 4245 4246 toff = offset + (rx_cons * sizeof (struct bge_rx_bd)); 4247 4248 if (tosync < 0) { 4249 tlen = (sc->bge_return_ring_cnt - rx_cons) * 4250 sizeof (struct bge_rx_bd); 4251 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4252 toff, tlen, BUS_DMASYNC_POSTREAD); 4253 tosync = -tosync; 4254 } 4255 4256 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4257 offset, tosync * sizeof (struct bge_rx_bd), 4258 BUS_DMASYNC_POSTREAD); 4259 4260 while (rx_cons != rx_prod) { 4261 struct bge_rx_bd *cur_rx; 4262 uint32_t rxidx; 4263 struct mbuf *m = NULL; 4264 4265 cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons]; 4266 4267 rxidx = cur_rx->bge_idx; 4268 BGE_INC(rx_cons, sc->bge_return_ring_cnt); 4269 4270 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 4271 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 4272 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 4273 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 4274 jumbocnt++; 4275 bus_dmamap_sync(sc->bge_dmatag, 4276 sc->bge_cdata.bge_rx_jumbo_map, 4277 mtod(m, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, 4278 BGE_JLEN, BUS_DMASYNC_POSTREAD); 4279 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 4280 ifp->if_ierrors++; 4281 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 4282 continue; 4283 } 4284 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 4285 NULL)== ENOBUFS) { 4286 ifp->if_ierrors++; 4287 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 4288 continue; 4289 } 4290 } else { 4291 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 4292 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 4293 4294 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 4295 stdcnt++; 4296 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; 4297 sc->bge_cdata.bge_rx_std_map[rxidx] = 0; 4298 if (dmamap == NULL) { 4299 ifp->if_ierrors++; 4300 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 4301 continue; 4302 } 4303 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, 4304 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 4305 bus_dmamap_unload(sc->bge_dmatag, dmamap); 4306 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 4307 ifp->if_ierrors++; 4308 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 4309 continue; 4310 } 4311 if (bge_newbuf_std(sc, sc->bge_std, 4312 NULL, dmamap) == ENOBUFS) { 4313 ifp->if_ierrors++; 4314 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 4315 continue; 4316 } 4317 } 4318 4319 ifp->if_ipackets++; 4320 #ifndef __NO_STRICT_ALIGNMENT 4321 /* 4322 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect, 4323 * the Rx buffer has the layer-2 header unaligned. 4324 * If our CPU requires alignment, re-align by copying. 4325 */ 4326 if (sc->bge_flags & BGE_RX_ALIGNBUG) { 4327 memmove(mtod(m, char *) + ETHER_ALIGN, m->m_data, 4328 cur_rx->bge_len); 4329 m->m_data += ETHER_ALIGN; 4330 } 4331 #endif 4332 4333 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 4334 m->m_pkthdr.rcvif = ifp; 4335 4336 /* 4337 * Handle BPF listeners. Let the BPF user see the packet. 4338 */ 4339 bpf_mtap(ifp, m); 4340 4341 bge_rxcsum(sc, cur_rx, m); 4342 4343 /* 4344 * If we received a packet with a vlan tag, pass it 4345 * to vlan_input() instead of ether_input(). 4346 */ 4347 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 4348 VLAN_INPUT_TAG(ifp, m, cur_rx->bge_vlan_tag, continue); 4349 } 4350 4351 (*ifp->if_input)(ifp, m); 4352 } 4353 4354 sc->bge_rx_saved_considx = rx_cons; 4355 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 4356 if (stdcnt) 4357 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 4358 if (jumbocnt) 4359 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 4360 } 4361 4362 static void 4363 bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m) 4364 { 4365 4366 if (BGE_IS_5717_PLUS(sc)) { 4367 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) { 4368 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) != 0) 4369 m->m_pkthdr.csum_flags = M_CSUM_IPv4; 4370 if ((cur_rx->bge_error_flag & 4371 BGE_RXERRFLAG_IP_CSUM_NOK) != 0) 4372 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 4373 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { 4374 m->m_pkthdr.csum_data = 4375 cur_rx->bge_tcp_udp_csum; 4376 m->m_pkthdr.csum_flags |= 4377 (M_CSUM_TCPv4|M_CSUM_UDPv4| 4378 M_CSUM_DATA); 4379 } 4380 } 4381 } else { 4382 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) != 0) 4383 m->m_pkthdr.csum_flags = M_CSUM_IPv4; 4384 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0) 4385 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 4386 /* 4387 * Rx transport checksum-offload may also 4388 * have bugs with packets which, when transmitted, 4389 * were `runts' requiring padding. 4390 */ 4391 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 4392 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/ 4393 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) { 4394 m->m_pkthdr.csum_data = 4395 cur_rx->bge_tcp_udp_csum; 4396 m->m_pkthdr.csum_flags |= 4397 (M_CSUM_TCPv4|M_CSUM_UDPv4| 4398 M_CSUM_DATA); 4399 } 4400 } 4401 } 4402 4403 static void 4404 bge_txeof(struct bge_softc *sc) 4405 { 4406 struct bge_tx_bd *cur_tx = NULL; 4407 struct ifnet *ifp; 4408 struct txdmamap_pool_entry *dma; 4409 bus_addr_t offset, toff; 4410 bus_size_t tlen; 4411 int tosync; 4412 struct mbuf *m; 4413 4414 ifp = &sc->ethercom.ec_if; 4415 4416 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4417 offsetof(struct bge_ring_data, bge_status_block), 4418 sizeof (struct bge_status_block), 4419 BUS_DMASYNC_POSTREAD); 4420 4421 offset = offsetof(struct bge_ring_data, bge_tx_ring); 4422 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx - 4423 sc->bge_tx_saved_considx; 4424 4425 if (tosync != 0) 4426 rnd_add_uint32(&sc->rnd_source, tosync); 4427 4428 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd)); 4429 4430 if (tosync < 0) { 4431 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) * 4432 sizeof (struct bge_tx_bd); 4433 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4434 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 4435 tosync = -tosync; 4436 } 4437 4438 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 4439 offset, tosync * sizeof (struct bge_tx_bd), 4440 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 4441 4442 /* 4443 * Go through our tx ring and free mbufs for those 4444 * frames that have been sent. 4445 */ 4446 while (sc->bge_tx_saved_considx != 4447 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 4448 uint32_t idx = 0; 4449 4450 idx = sc->bge_tx_saved_considx; 4451 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 4452 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 4453 ifp->if_opackets++; 4454 m = sc->bge_cdata.bge_tx_chain[idx]; 4455 if (m != NULL) { 4456 sc->bge_cdata.bge_tx_chain[idx] = NULL; 4457 dma = sc->txdma[idx]; 4458 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0, 4459 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 4460 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap); 4461 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 4462 sc->txdma[idx] = NULL; 4463 4464 m_freem(m); 4465 } 4466 sc->bge_txcnt--; 4467 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 4468 ifp->if_timer = 0; 4469 } 4470 4471 if (cur_tx != NULL) 4472 ifp->if_flags &= ~IFF_OACTIVE; 4473 } 4474 4475 static int 4476 bge_intr(void *xsc) 4477 { 4478 struct bge_softc *sc; 4479 struct ifnet *ifp; 4480 uint32_t statusword; 4481 4482 sc = xsc; 4483 ifp = &sc->ethercom.ec_if; 4484 4485 /* It is possible for the interrupt to arrive before 4486 * the status block is updated prior to the interrupt. 4487 * Reading the PCI State register will confirm whether the 4488 * interrupt is ours and will flush the status block. 4489 */ 4490 4491 /* read status word from status block */ 4492 statusword = sc->bge_rdata->bge_status_block.bge_status; 4493 4494 if ((statusword & BGE_STATFLAG_UPDATED) || 4495 (!(CSR_READ_4(sc, BGE_PCI_PCISTATE) & BGE_PCISTATE_INTR_NOT_ACTIVE))) { 4496 /* Ack interrupt and stop others from occuring. */ 4497 bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 1); 4498 4499 BGE_EVCNT_INCR(sc->bge_ev_intr); 4500 4501 /* clear status word */ 4502 sc->bge_rdata->bge_status_block.bge_status = 0; 4503 4504 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 4505 statusword & BGE_STATFLAG_LINKSTATE_CHANGED || 4506 BGE_STS_BIT(sc, BGE_STS_LINK_EVT)) 4507 bge_link_upd(sc); 4508 4509 if (ifp->if_flags & IFF_RUNNING) { 4510 /* Check RX return ring producer/consumer */ 4511 bge_rxeof(sc); 4512 4513 /* Check TX ring producer/consumer */ 4514 bge_txeof(sc); 4515 } 4516 4517 if (sc->bge_pending_rxintr_change) { 4518 uint32_t rx_ticks = sc->bge_rx_coal_ticks; 4519 uint32_t rx_bds = sc->bge_rx_max_coal_bds; 4520 uint32_t junk; 4521 4522 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks); 4523 DELAY(10); 4524 junk = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); 4525 4526 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds); 4527 DELAY(10); 4528 junk = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); 4529 4530 sc->bge_pending_rxintr_change = 0; 4531 } 4532 bge_handle_events(sc); 4533 4534 /* Re-enable interrupts. */ 4535 bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 0); 4536 4537 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) 4538 bge_start(ifp); 4539 4540 return 1; 4541 } else 4542 return 0; 4543 } 4544 4545 static void 4546 bge_asf_driver_up(struct bge_softc *sc) 4547 { 4548 if (sc->bge_asf_mode & ASF_STACKUP) { 4549 /* Send ASF heartbeat aprox. every 2s */ 4550 if (sc->bge_asf_count) 4551 sc->bge_asf_count --; 4552 else { 4553 sc->bge_asf_count = 2; 4554 4555 bge_wait_for_event_ack(sc); 4556 4557 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, 4558 BGE_FW_CMD_DRV_ALIVE); 4559 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4); 4560 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB, 4561 BGE_FW_HB_TIMEOUT_SEC); 4562 CSR_WRITE_4_FLUSH(sc, BGE_RX_CPU_EVENT, 4563 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | 4564 BGE_RX_CPU_DRV_EVENT); 4565 } 4566 } 4567 } 4568 4569 static void 4570 bge_tick(void *xsc) 4571 { 4572 struct bge_softc *sc = xsc; 4573 struct mii_data *mii = &sc->bge_mii; 4574 int s; 4575 4576 s = splnet(); 4577 4578 if (BGE_IS_5705_PLUS(sc)) 4579 bge_stats_update_regs(sc); 4580 else 4581 bge_stats_update(sc); 4582 4583 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 4584 /* 4585 * Since in TBI mode auto-polling can't be used we should poll 4586 * link status manually. Here we register pending link event 4587 * and trigger interrupt. 4588 */ 4589 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT); 4590 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 4591 } else { 4592 /* 4593 * Do not touch PHY if we have link up. This could break 4594 * IPMI/ASF mode or produce extra input errors. 4595 * (extra input errors was reported for bcm5701 & bcm5704). 4596 */ 4597 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) 4598 mii_tick(mii); 4599 } 4600 4601 bge_asf_driver_up(sc); 4602 4603 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 4604 4605 splx(s); 4606 } 4607 4608 static void 4609 bge_stats_update_regs(struct bge_softc *sc) 4610 { 4611 struct ifnet *ifp = &sc->ethercom.ec_if; 4612 4613 ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS + 4614 offsetof(struct bge_mac_stats_regs, etherStatsCollisions)); 4615 4616 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); 4617 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS); 4618 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); 4619 } 4620 4621 static void 4622 bge_stats_update(struct bge_softc *sc) 4623 { 4624 struct ifnet *ifp = &sc->ethercom.ec_if; 4625 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 4626 4627 #define READ_STAT(sc, stats, stat) \ 4628 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 4629 4630 ifp->if_collisions += 4631 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) + 4632 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) + 4633 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) + 4634 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) - 4635 ifp->if_collisions; 4636 4637 BGE_EVCNT_UPD(sc->bge_ev_tx_xoff, 4638 READ_STAT(sc, stats, outXoffSent.bge_addr_lo)); 4639 BGE_EVCNT_UPD(sc->bge_ev_tx_xon, 4640 READ_STAT(sc, stats, outXonSent.bge_addr_lo)); 4641 BGE_EVCNT_UPD(sc->bge_ev_rx_xoff, 4642 READ_STAT(sc, stats, 4643 xoffPauseFramesReceived.bge_addr_lo)); 4644 BGE_EVCNT_UPD(sc->bge_ev_rx_xon, 4645 READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo)); 4646 BGE_EVCNT_UPD(sc->bge_ev_rx_macctl, 4647 READ_STAT(sc, stats, 4648 macControlFramesReceived.bge_addr_lo)); 4649 BGE_EVCNT_UPD(sc->bge_ev_xoffentered, 4650 READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo)); 4651 4652 #undef READ_STAT 4653 4654 #ifdef notdef 4655 ifp->if_collisions += 4656 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 4657 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 4658 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 4659 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 4660 ifp->if_collisions; 4661 #endif 4662 } 4663 4664 /* 4665 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 4666 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 4667 * but when such padded frames employ the bge IP/TCP checksum offload, 4668 * the hardware checksum assist gives incorrect results (possibly 4669 * from incorporating its own padding into the UDP/TCP checksum; who knows). 4670 * If we pad such runts with zeros, the onboard checksum comes out correct. 4671 */ 4672 static inline int 4673 bge_cksum_pad(struct mbuf *pkt) 4674 { 4675 struct mbuf *last = NULL; 4676 int padlen; 4677 4678 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len; 4679 4680 /* if there's only the packet-header and we can pad there, use it. */ 4681 if (pkt->m_pkthdr.len == pkt->m_len && 4682 M_TRAILINGSPACE(pkt) >= padlen) { 4683 last = pkt; 4684 } else { 4685 /* 4686 * Walk packet chain to find last mbuf. We will either 4687 * pad there, or append a new mbuf and pad it 4688 * (thus perhaps avoiding the bcm5700 dma-min bug). 4689 */ 4690 for (last = pkt; last->m_next != NULL; last = last->m_next) { 4691 continue; /* do nothing */ 4692 } 4693 4694 /* `last' now points to last in chain. */ 4695 if (M_TRAILINGSPACE(last) < padlen) { 4696 /* Allocate new empty mbuf, pad it. Compact later. */ 4697 struct mbuf *n; 4698 MGET(n, M_DONTWAIT, MT_DATA); 4699 if (n == NULL) 4700 return ENOBUFS; 4701 n->m_len = 0; 4702 last->m_next = n; 4703 last = n; 4704 } 4705 } 4706 4707 KDASSERT(!M_READONLY(last)); 4708 KDASSERT(M_TRAILINGSPACE(last) >= padlen); 4709 4710 /* Now zero the pad area, to avoid the bge cksum-assist bug */ 4711 memset(mtod(last, char *) + last->m_len, 0, padlen); 4712 last->m_len += padlen; 4713 pkt->m_pkthdr.len += padlen; 4714 return 0; 4715 } 4716 4717 /* 4718 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes. 4719 */ 4720 static inline int 4721 bge_compact_dma_runt(struct mbuf *pkt) 4722 { 4723 struct mbuf *m, *prev; 4724 int totlen, prevlen; 4725 4726 prev = NULL; 4727 totlen = 0; 4728 prevlen = -1; 4729 4730 for (m = pkt; m != NULL; prev = m,m = m->m_next) { 4731 int mlen = m->m_len; 4732 int shortfall = 8 - mlen ; 4733 4734 totlen += mlen; 4735 if (mlen == 0) 4736 continue; 4737 if (mlen >= 8) 4738 continue; 4739 4740 /* If we get here, mbuf data is too small for DMA engine. 4741 * Try to fix by shuffling data to prev or next in chain. 4742 * If that fails, do a compacting deep-copy of the whole chain. 4743 */ 4744 4745 /* Internal frag. If fits in prev, copy it there. */ 4746 if (prev && M_TRAILINGSPACE(prev) >= m->m_len) { 4747 memcpy(prev->m_data + prev->m_len, m->m_data, mlen); 4748 prev->m_len += mlen; 4749 m->m_len = 0; 4750 /* XXX stitch chain */ 4751 prev->m_next = m_free(m); 4752 m = prev; 4753 continue; 4754 } 4755 else if (m->m_next != NULL && 4756 M_TRAILINGSPACE(m) >= shortfall && 4757 m->m_next->m_len >= (8 + shortfall)) { 4758 /* m is writable and have enough data in next, pull up. */ 4759 4760 memcpy(m->m_data + m->m_len, m->m_next->m_data, 4761 shortfall); 4762 m->m_len += shortfall; 4763 m->m_next->m_len -= shortfall; 4764 m->m_next->m_data += shortfall; 4765 } 4766 else if (m->m_next == NULL || 1) { 4767 /* Got a runt at the very end of the packet. 4768 * borrow data from the tail of the preceding mbuf and 4769 * update its length in-place. (The original data is still 4770 * valid, so we can do this even if prev is not writable.) 4771 */ 4772 4773 /* if we'd make prev a runt, just move all of its data. */ 4774 KASSERT(prev != NULL /*, ("runt but null PREV")*/); 4775 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/); 4776 4777 if ((prev->m_len - shortfall) < 8) 4778 shortfall = prev->m_len; 4779 4780 #ifdef notyet /* just do the safe slow thing for now */ 4781 if (!M_READONLY(m)) { 4782 if (M_LEADINGSPACE(m) < shorfall) { 4783 void *m_dat; 4784 m_dat = (m->m_flags & M_PKTHDR) ? 4785 m->m_pktdat : m->dat; 4786 memmove(m_dat, mtod(m, void*), m->m_len); 4787 m->m_data = m_dat; 4788 } 4789 } else 4790 #endif /* just do the safe slow thing */ 4791 { 4792 struct mbuf * n = NULL; 4793 int newprevlen = prev->m_len - shortfall; 4794 4795 MGET(n, M_NOWAIT, MT_DATA); 4796 if (n == NULL) 4797 return ENOBUFS; 4798 KASSERT(m->m_len + shortfall < MLEN 4799 /*, 4800 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/); 4801 4802 /* first copy the data we're stealing from prev */ 4803 memcpy(n->m_data, prev->m_data + newprevlen, 4804 shortfall); 4805 4806 /* update prev->m_len accordingly */ 4807 prev->m_len -= shortfall; 4808 4809 /* copy data from runt m */ 4810 memcpy(n->m_data + shortfall, m->m_data, 4811 m->m_len); 4812 4813 /* n holds what we stole from prev, plus m */ 4814 n->m_len = shortfall + m->m_len; 4815 4816 /* stitch n into chain and free m */ 4817 n->m_next = m->m_next; 4818 prev->m_next = n; 4819 /* KASSERT(m->m_next == NULL); */ 4820 m->m_next = NULL; 4821 m_free(m); 4822 m = n; /* for continuing loop */ 4823 } 4824 } 4825 prevlen = m->m_len; 4826 } 4827 return 0; 4828 } 4829 4830 /* 4831 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 4832 * pointers to descriptors. 4833 */ 4834 static int 4835 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx) 4836 { 4837 struct bge_tx_bd *f = NULL; 4838 uint32_t frag, cur; 4839 uint16_t csum_flags = 0; 4840 uint16_t txbd_tso_flags = 0; 4841 struct txdmamap_pool_entry *dma; 4842 bus_dmamap_t dmamap; 4843 int i = 0; 4844 struct m_tag *mtag; 4845 int use_tso, maxsegsize, error; 4846 4847 cur = frag = *txidx; 4848 4849 if (m_head->m_pkthdr.csum_flags) { 4850 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4) 4851 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 4852 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) 4853 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 4854 } 4855 4856 /* 4857 * If we were asked to do an outboard checksum, and the NIC 4858 * has the bug where it sometimes adds in the Ethernet padding, 4859 * explicitly pad with zeros so the cksum will be correct either way. 4860 * (For now, do this for all chip versions, until newer 4861 * are confirmed to not require the workaround.) 4862 */ 4863 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 || 4864 #ifdef notyet 4865 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 || 4866 #endif 4867 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD) 4868 goto check_dma_bug; 4869 4870 if (bge_cksum_pad(m_head) != 0) 4871 return ENOBUFS; 4872 4873 check_dma_bug: 4874 if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)) 4875 goto doit; 4876 4877 /* 4878 * bcm5700 Revision B silicon cannot handle DMA descriptors with 4879 * less than eight bytes. If we encounter a teeny mbuf 4880 * at the end of a chain, we can pad. Otherwise, copy. 4881 */ 4882 if (bge_compact_dma_runt(m_head) != 0) 4883 return ENOBUFS; 4884 4885 doit: 4886 dma = SLIST_FIRST(&sc->txdma_list); 4887 if (dma == NULL) 4888 return ENOBUFS; 4889 dmamap = dma->dmamap; 4890 4891 /* 4892 * Set up any necessary TSO state before we start packing... 4893 */ 4894 use_tso = (m_head->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 4895 if (!use_tso) { 4896 maxsegsize = 0; 4897 } else { /* TSO setup */ 4898 unsigned mss; 4899 struct ether_header *eh; 4900 unsigned ip_tcp_hlen, iptcp_opt_words, tcp_seg_flags, offset; 4901 struct mbuf * m0 = m_head; 4902 struct ip *ip; 4903 struct tcphdr *th; 4904 int iphl, hlen; 4905 4906 /* 4907 * XXX It would be nice if the mbuf pkthdr had offset 4908 * fields for the protocol headers. 4909 */ 4910 4911 eh = mtod(m0, struct ether_header *); 4912 switch (htons(eh->ether_type)) { 4913 case ETHERTYPE_IP: 4914 offset = ETHER_HDR_LEN; 4915 break; 4916 4917 case ETHERTYPE_VLAN: 4918 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 4919 break; 4920 4921 default: 4922 /* 4923 * Don't support this protocol or encapsulation. 4924 */ 4925 return ENOBUFS; 4926 } 4927 4928 /* 4929 * TCP/IP headers are in the first mbuf; we can do 4930 * this the easy way. 4931 */ 4932 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 4933 hlen = iphl + offset; 4934 if (__predict_false(m0->m_len < 4935 (hlen + sizeof(struct tcphdr)))) { 4936 4937 aprint_debug_dev(sc->bge_dev, 4938 "TSO: hard case m0->m_len == %d < ip/tcp hlen %zd," 4939 "not handled yet\n", 4940 m0->m_len, hlen+ sizeof(struct tcphdr)); 4941 #ifdef NOTYET 4942 /* 4943 * XXX jonathan@NetBSD.org: untested. 4944 * how to force this branch to be taken? 4945 */ 4946 BGE_EVCNT_INCR(&sc->sc_ev_txtsopain); 4947 4948 m_copydata(m0, offset, sizeof(ip), &ip); 4949 m_copydata(m0, hlen, sizeof(th), &th); 4950 4951 ip.ip_len = 0; 4952 4953 m_copyback(m0, hlen + offsetof(struct ip, ip_len), 4954 sizeof(ip.ip_len), &ip.ip_len); 4955 4956 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 4957 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 4958 4959 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 4960 sizeof(th.th_sum), &th.th_sum); 4961 4962 hlen += th.th_off << 2; 4963 iptcp_opt_words = hlen; 4964 #else 4965 /* 4966 * if_wm "hard" case not yet supported, can we not 4967 * mandate it out of existence? 4968 */ 4969 (void) ip; (void)th; (void) ip_tcp_hlen; 4970 4971 return ENOBUFS; 4972 #endif 4973 } else { 4974 ip = (struct ip *) (mtod(m0, char *) + offset); 4975 th = (struct tcphdr *) (mtod(m0, char *) + hlen); 4976 ip_tcp_hlen = iphl + (th->th_off << 2); 4977 4978 /* Total IP/TCP options, in 32-bit words */ 4979 iptcp_opt_words = (ip_tcp_hlen 4980 - sizeof(struct tcphdr) 4981 - sizeof(struct ip)) >> 2; 4982 } 4983 if (BGE_IS_575X_PLUS(sc)) { 4984 th->th_sum = 0; 4985 csum_flags &= ~(BGE_TXBDFLAG_TCP_UDP_CSUM); 4986 } else { 4987 /* 4988 * XXX jonathan@NetBSD.org: 5705 untested. 4989 * Requires TSO firmware patch for 5701/5703/5704. 4990 */ 4991 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 4992 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 4993 } 4994 4995 mss = m_head->m_pkthdr.segsz; 4996 txbd_tso_flags |= 4997 BGE_TXBDFLAG_CPU_PRE_DMA | 4998 BGE_TXBDFLAG_CPU_POST_DMA; 4999 5000 /* 5001 * Our NIC TSO-assist assumes TSO has standard, optionless 5002 * IPv4 and TCP headers, which total 40 bytes. By default, 5003 * the NIC copies 40 bytes of IP/TCP header from the 5004 * supplied header into the IP/TCP header portion of 5005 * each post-TSO-segment. If the supplied packet has IP or 5006 * TCP options, we need to tell the NIC to copy those extra 5007 * bytes into each post-TSO header, in addition to the normal 5008 * 40-byte IP/TCP header (and to leave space accordingly). 5009 * Unfortunately, the driver encoding of option length 5010 * varies across different ASIC families. 5011 */ 5012 tcp_seg_flags = 0; 5013 if (iptcp_opt_words) { 5014 if (BGE_IS_5705_PLUS(sc)) { 5015 tcp_seg_flags = 5016 iptcp_opt_words << 11; 5017 } else { 5018 txbd_tso_flags |= 5019 iptcp_opt_words << 12; 5020 } 5021 } 5022 maxsegsize = mss | tcp_seg_flags; 5023 ip->ip_len = htons(mss + ip_tcp_hlen); 5024 5025 } /* TSO setup */ 5026 5027 /* 5028 * Start packing the mbufs in this chain into 5029 * the fragment pointers. Stop when we run out 5030 * of fragments or hit the end of the mbuf chain. 5031 */ 5032 error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head, 5033 BUS_DMA_NOWAIT); 5034 if (error) 5035 return ENOBUFS; 5036 /* 5037 * Sanity check: avoid coming within 16 descriptors 5038 * of the end of the ring. 5039 */ 5040 if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) { 5041 BGE_TSO_PRINTF(("%s: " 5042 " dmamap_load_mbuf too close to ring wrap\n", 5043 device_xname(sc->bge_dev))); 5044 goto fail_unload; 5045 } 5046 5047 mtag = sc->ethercom.ec_nvlans ? 5048 m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL; 5049 5050 5051 /* Iterate over dmap-map fragments. */ 5052 for (i = 0; i < dmamap->dm_nsegs; i++) { 5053 f = &sc->bge_rdata->bge_tx_ring[frag]; 5054 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 5055 break; 5056 5057 BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr); 5058 f->bge_len = dmamap->dm_segs[i].ds_len; 5059 5060 /* 5061 * For 5751 and follow-ons, for TSO we must turn 5062 * off checksum-assist flag in the tx-descr, and 5063 * supply the ASIC-revision-specific encoding 5064 * of TSO flags and segsize. 5065 */ 5066 if (use_tso) { 5067 if (BGE_IS_575X_PLUS(sc) || i == 0) { 5068 f->bge_rsvd = maxsegsize; 5069 f->bge_flags = csum_flags | txbd_tso_flags; 5070 } else { 5071 f->bge_rsvd = 0; 5072 f->bge_flags = 5073 (csum_flags | txbd_tso_flags) & 0x0fff; 5074 } 5075 } else { 5076 f->bge_rsvd = 0; 5077 f->bge_flags = csum_flags; 5078 } 5079 5080 if (mtag != NULL) { 5081 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 5082 f->bge_vlan_tag = VLAN_TAG_VALUE(mtag); 5083 } else { 5084 f->bge_vlan_tag = 0; 5085 } 5086 cur = frag; 5087 BGE_INC(frag, BGE_TX_RING_CNT); 5088 } 5089 5090 if (i < dmamap->dm_nsegs) { 5091 BGE_TSO_PRINTF(("%s: reached %d < dm_nsegs %d\n", 5092 device_xname(sc->bge_dev), i, dmamap->dm_nsegs)); 5093 goto fail_unload; 5094 } 5095 5096 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 5097 BUS_DMASYNC_PREWRITE); 5098 5099 if (frag == sc->bge_tx_saved_considx) { 5100 BGE_TSO_PRINTF(("%s: frag %d = wrapped id %d?\n", 5101 device_xname(sc->bge_dev), frag, sc->bge_tx_saved_considx)); 5102 5103 goto fail_unload; 5104 } 5105 5106 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 5107 sc->bge_cdata.bge_tx_chain[cur] = m_head; 5108 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 5109 sc->txdma[cur] = dma; 5110 sc->bge_txcnt += dmamap->dm_nsegs; 5111 5112 *txidx = frag; 5113 5114 return 0; 5115 5116 fail_unload: 5117 bus_dmamap_unload(sc->bge_dmatag, dmamap); 5118 5119 return ENOBUFS; 5120 } 5121 5122 /* 5123 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 5124 * to the mbuf data regions directly in the transmit descriptors. 5125 */ 5126 static void 5127 bge_start(struct ifnet *ifp) 5128 { 5129 struct bge_softc *sc; 5130 struct mbuf *m_head = NULL; 5131 uint32_t prodidx; 5132 int pkts = 0; 5133 5134 sc = ifp->if_softc; 5135 5136 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 5137 return; 5138 5139 prodidx = sc->bge_tx_prodidx; 5140 5141 while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 5142 IFQ_POLL(&ifp->if_snd, m_head); 5143 if (m_head == NULL) 5144 break; 5145 5146 #if 0 5147 /* 5148 * XXX 5149 * safety overkill. If this is a fragmented packet chain 5150 * with delayed TCP/UDP checksums, then only encapsulate 5151 * it if we have enough descriptors to handle the entire 5152 * chain at once. 5153 * (paranoia -- may not actually be needed) 5154 */ 5155 if (m_head->m_flags & M_FIRSTFRAG && 5156 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 5157 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 5158 M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) { 5159 ifp->if_flags |= IFF_OACTIVE; 5160 break; 5161 } 5162 } 5163 #endif 5164 5165 /* 5166 * Pack the data into the transmit ring. If we 5167 * don't have room, set the OACTIVE flag and wait 5168 * for the NIC to drain the ring. 5169 */ 5170 if (bge_encap(sc, m_head, &prodidx)) { 5171 ifp->if_flags |= IFF_OACTIVE; 5172 break; 5173 } 5174 5175 /* now we are committed to transmit the packet */ 5176 IFQ_DEQUEUE(&ifp->if_snd, m_head); 5177 pkts++; 5178 5179 /* 5180 * If there's a BPF listener, bounce a copy of this frame 5181 * to him. 5182 */ 5183 bpf_mtap(ifp, m_head); 5184 } 5185 if (pkts == 0) 5186 return; 5187 5188 /* Transmit */ 5189 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 5190 /* 5700 b2 errata */ 5191 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 5192 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 5193 5194 sc->bge_tx_prodidx = prodidx; 5195 5196 /* 5197 * Set a timeout in case the chip goes out to lunch. 5198 */ 5199 ifp->if_timer = 5; 5200 } 5201 5202 static int 5203 bge_init(struct ifnet *ifp) 5204 { 5205 struct bge_softc *sc = ifp->if_softc; 5206 const uint16_t *m; 5207 uint32_t mode; 5208 int s, error = 0; 5209 5210 s = splnet(); 5211 5212 ifp = &sc->ethercom.ec_if; 5213 5214 /* Cancel pending I/O and flush buffers. */ 5215 bge_stop(ifp, 0); 5216 5217 bge_stop_fw(sc); 5218 bge_sig_pre_reset(sc, BGE_RESET_START); 5219 bge_reset(sc); 5220 bge_sig_legacy(sc, BGE_RESET_START); 5221 bge_sig_post_reset(sc, BGE_RESET_START); 5222 5223 bge_chipinit(sc); 5224 5225 /* 5226 * Init the various state machines, ring 5227 * control blocks and firmware. 5228 */ 5229 error = bge_blockinit(sc); 5230 if (error != 0) { 5231 aprint_error_dev(sc->bge_dev, "initialization error %d\n", 5232 error); 5233 splx(s); 5234 return error; 5235 } 5236 5237 ifp = &sc->ethercom.ec_if; 5238 5239 /* 5718 step 25, 57XX step 54 */ 5240 /* Specify MTU. */ 5241 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 5242 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 5243 5244 /* 5718 step 23 */ 5245 /* Load our MAC address. */ 5246 m = (const uint16_t *)&(CLLADDR(ifp->if_sadl)[0]); 5247 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 5248 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 5249 5250 /* Enable or disable promiscuous mode as needed. */ 5251 if (ifp->if_flags & IFF_PROMISC) 5252 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 5253 else 5254 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 5255 5256 /* Program multicast filter. */ 5257 bge_setmulti(sc); 5258 5259 /* Init RX ring. */ 5260 bge_init_rx_ring_std(sc); 5261 5262 /* 5263 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 5264 * memory to insure that the chip has in fact read the first 5265 * entry of the ring. 5266 */ 5267 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 5268 uint32_t v, i; 5269 for (i = 0; i < 10; i++) { 5270 DELAY(20); 5271 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 5272 if (v == (MCLBYTES - ETHER_ALIGN)) 5273 break; 5274 } 5275 if (i == 10) 5276 aprint_error_dev(sc->bge_dev, 5277 "5705 A0 chip failed to load RX ring\n"); 5278 } 5279 5280 /* Init jumbo RX ring. */ 5281 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 5282 bge_init_rx_ring_jumbo(sc); 5283 5284 /* Init our RX return ring index */ 5285 sc->bge_rx_saved_considx = 0; 5286 5287 /* Init TX ring. */ 5288 bge_init_tx_ring(sc); 5289 5290 /* 5718 step 63, 57XX step 94 */ 5291 /* Enable TX MAC state machine lockup fix. */ 5292 mode = CSR_READ_4(sc, BGE_TX_MODE); 5293 if (BGE_IS_5755_PLUS(sc) || 5294 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 5295 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX; 5296 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5720) { 5297 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 5298 mode |= CSR_READ_4(sc, BGE_TX_MODE) & 5299 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); 5300 } 5301 5302 /* Turn on transmitter */ 5303 CSR_WRITE_4_FLUSH(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE); 5304 /* 5718 step 64 */ 5305 DELAY(100); 5306 5307 /* 5718 step 65, 57XX step 95 */ 5308 /* Turn on receiver */ 5309 mode = CSR_READ_4(sc, BGE_RX_MODE); 5310 if (BGE_IS_5755_PLUS(sc)) 5311 mode |= BGE_RXMODE_IPV6_ENABLE; 5312 CSR_WRITE_4_FLUSH(sc, BGE_RX_MODE, mode | BGE_RXMODE_ENABLE); 5313 /* 5718 step 66 */ 5314 DELAY(10); 5315 5316 /* 5718 step 12 */ 5317 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 5318 5319 /* Tell firmware we're alive. */ 5320 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 5321 5322 /* Enable host interrupts. */ 5323 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 5324 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 5325 bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 0); 5326 5327 if ((error = bge_ifmedia_upd(ifp)) != 0) 5328 goto out; 5329 5330 ifp->if_flags |= IFF_RUNNING; 5331 ifp->if_flags &= ~IFF_OACTIVE; 5332 5333 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 5334 5335 out: 5336 sc->bge_if_flags = ifp->if_flags; 5337 splx(s); 5338 5339 return error; 5340 } 5341 5342 /* 5343 * Set media options. 5344 */ 5345 static int 5346 bge_ifmedia_upd(struct ifnet *ifp) 5347 { 5348 struct bge_softc *sc = ifp->if_softc; 5349 struct mii_data *mii = &sc->bge_mii; 5350 struct ifmedia *ifm = &sc->bge_ifmedia; 5351 int rc; 5352 5353 /* If this is a 1000baseX NIC, enable the TBI port. */ 5354 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 5355 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 5356 return EINVAL; 5357 switch (IFM_SUBTYPE(ifm->ifm_media)) { 5358 case IFM_AUTO: 5359 /* 5360 * The BCM5704 ASIC appears to have a special 5361 * mechanism for programming the autoneg 5362 * advertisement registers in TBI mode. 5363 */ 5364 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 5365 uint32_t sgdig; 5366 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS); 5367 if (sgdig & BGE_SGDIGSTS_DONE) { 5368 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 5369 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 5370 sgdig |= BGE_SGDIGCFG_AUTO | 5371 BGE_SGDIGCFG_PAUSE_CAP | 5372 BGE_SGDIGCFG_ASYM_PAUSE; 5373 CSR_WRITE_4_FLUSH(sc, BGE_SGDIG_CFG, 5374 sgdig | BGE_SGDIGCFG_SEND); 5375 DELAY(5); 5376 CSR_WRITE_4_FLUSH(sc, BGE_SGDIG_CFG, 5377 sgdig); 5378 } 5379 } 5380 break; 5381 case IFM_1000_SX: 5382 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 5383 BGE_CLRBIT(sc, BGE_MAC_MODE, 5384 BGE_MACMODE_HALF_DUPLEX); 5385 } else { 5386 BGE_SETBIT(sc, BGE_MAC_MODE, 5387 BGE_MACMODE_HALF_DUPLEX); 5388 } 5389 DELAY(40); 5390 break; 5391 default: 5392 return EINVAL; 5393 } 5394 /* XXX 802.3x flow control for 1000BASE-SX */ 5395 return 0; 5396 } 5397 5398 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT); 5399 if ((rc = mii_mediachg(mii)) == ENXIO) 5400 return 0; 5401 5402 /* 5403 * Force an interrupt so that we will call bge_link_upd 5404 * if needed and clear any pending link state attention. 5405 * Without this we are not getting any further interrupts 5406 * for link state changes and thus will not UP the link and 5407 * not be able to send in bge_start. The only way to get 5408 * things working was to receive a packet and get a RX intr. 5409 */ 5410 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 5411 sc->bge_flags & BGE_IS_5788) 5412 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 5413 else 5414 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 5415 5416 return rc; 5417 } 5418 5419 /* 5420 * Report current media status. 5421 */ 5422 static void 5423 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 5424 { 5425 struct bge_softc *sc = ifp->if_softc; 5426 struct mii_data *mii = &sc->bge_mii; 5427 5428 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 5429 ifmr->ifm_status = IFM_AVALID; 5430 ifmr->ifm_active = IFM_ETHER; 5431 if (CSR_READ_4(sc, BGE_MAC_STS) & 5432 BGE_MACSTAT_TBI_PCS_SYNCHED) 5433 ifmr->ifm_status |= IFM_ACTIVE; 5434 ifmr->ifm_active |= IFM_1000_SX; 5435 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 5436 ifmr->ifm_active |= IFM_HDX; 5437 else 5438 ifmr->ifm_active |= IFM_FDX; 5439 return; 5440 } 5441 5442 mii_pollstat(mii); 5443 ifmr->ifm_status = mii->mii_media_status; 5444 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) | 5445 sc->bge_flowflags; 5446 } 5447 5448 static int 5449 bge_ifflags_cb(struct ethercom *ec) 5450 { 5451 struct ifnet *ifp = &ec->ec_if; 5452 struct bge_softc *sc = ifp->if_softc; 5453 int change = ifp->if_flags ^ sc->bge_if_flags; 5454 5455 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) 5456 return ENETRESET; 5457 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0) 5458 return 0; 5459 5460 if ((ifp->if_flags & IFF_PROMISC) == 0) 5461 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 5462 else 5463 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 5464 5465 bge_setmulti(sc); 5466 5467 sc->bge_if_flags = ifp->if_flags; 5468 return 0; 5469 } 5470 5471 static int 5472 bge_ioctl(struct ifnet *ifp, u_long command, void *data) 5473 { 5474 struct bge_softc *sc = ifp->if_softc; 5475 struct ifreq *ifr = (struct ifreq *) data; 5476 int s, error = 0; 5477 struct mii_data *mii; 5478 5479 s = splnet(); 5480 5481 switch (command) { 5482 case SIOCSIFMEDIA: 5483 /* XXX Flow control is not supported for 1000BASE-SX */ 5484 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 5485 ifr->ifr_media &= ~IFM_ETH_FMASK; 5486 sc->bge_flowflags = 0; 5487 } 5488 5489 /* Flow control requires full-duplex mode. */ 5490 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 5491 (ifr->ifr_media & IFM_FDX) == 0) { 5492 ifr->ifr_media &= ~IFM_ETH_FMASK; 5493 } 5494 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 5495 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 5496 /* We can do both TXPAUSE and RXPAUSE. */ 5497 ifr->ifr_media |= 5498 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 5499 } 5500 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 5501 } 5502 /* FALLTHROUGH */ 5503 case SIOCGIFMEDIA: 5504 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 5505 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 5506 command); 5507 } else { 5508 mii = &sc->bge_mii; 5509 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 5510 command); 5511 } 5512 break; 5513 default: 5514 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) 5515 break; 5516 5517 error = 0; 5518 5519 if (command != SIOCADDMULTI && command != SIOCDELMULTI) 5520 ; 5521 else if (ifp->if_flags & IFF_RUNNING) 5522 bge_setmulti(sc); 5523 break; 5524 } 5525 5526 splx(s); 5527 5528 return error; 5529 } 5530 5531 static void 5532 bge_watchdog(struct ifnet *ifp) 5533 { 5534 struct bge_softc *sc; 5535 5536 sc = ifp->if_softc; 5537 5538 aprint_error_dev(sc->bge_dev, "watchdog timeout -- resetting\n"); 5539 5540 ifp->if_flags &= ~IFF_RUNNING; 5541 bge_init(ifp); 5542 5543 ifp->if_oerrors++; 5544 } 5545 5546 static void 5547 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit) 5548 { 5549 int i; 5550 5551 BGE_CLRBIT_FLUSH(sc, reg, bit); 5552 5553 for (i = 0; i < 1000; i++) { 5554 delay(100); 5555 if ((CSR_READ_4(sc, reg) & bit) == 0) 5556 return; 5557 } 5558 5559 /* 5560 * Doesn't print only when the register is BGE_SRS_MODE. It occurs 5561 * on some environment (and once after boot?) 5562 */ 5563 if (reg != BGE_SRS_MODE) 5564 aprint_error_dev(sc->bge_dev, 5565 "block failed to stop: reg 0x%lx, bit 0x%08x\n", 5566 (u_long)reg, bit); 5567 } 5568 5569 /* 5570 * Stop the adapter and free any mbufs allocated to the 5571 * RX and TX lists. 5572 */ 5573 static void 5574 bge_stop(struct ifnet *ifp, int disable) 5575 { 5576 struct bge_softc *sc = ifp->if_softc; 5577 5578 callout_stop(&sc->bge_timeout); 5579 5580 /* Disable host interrupts. */ 5581 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 5582 bge_writembx_flush(sc, BGE_MBX_IRQ0_LO, 1); 5583 5584 /* 5585 * Tell firmware we're shutting down. 5586 */ 5587 bge_stop_fw(sc); 5588 bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN); 5589 5590 /* 5591 * Disable all of the receiver blocks. 5592 */ 5593 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 5594 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 5595 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 5596 if (BGE_IS_5700_FAMILY(sc)) 5597 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 5598 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 5599 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 5600 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 5601 5602 /* 5603 * Disable all of the transmit blocks. 5604 */ 5605 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 5606 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 5607 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 5608 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 5609 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 5610 if (BGE_IS_5700_FAMILY(sc)) 5611 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 5612 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 5613 5614 BGE_CLRBIT_FLUSH(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB); 5615 delay(40); 5616 5617 bge_stop_block(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 5618 5619 /* 5620 * Shut down all of the memory managers and related 5621 * state machines. 5622 */ 5623 /* 5718 step 5a,5b */ 5624 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 5625 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 5626 if (BGE_IS_5700_FAMILY(sc)) 5627 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 5628 5629 /* 5718 step 5c,5d */ 5630 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 5631 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 5632 5633 if (BGE_IS_5700_FAMILY(sc)) { 5634 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 5635 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 5636 } 5637 5638 bge_reset(sc); 5639 bge_sig_legacy(sc, BGE_RESET_SHUTDOWN); 5640 bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN); 5641 5642 /* 5643 * Keep the ASF firmware running if up. 5644 */ 5645 if (sc->bge_asf_mode & ASF_STACKUP) 5646 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 5647 else 5648 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 5649 5650 /* Free the RX lists. */ 5651 bge_free_rx_ring_std(sc); 5652 5653 /* Free jumbo RX list. */ 5654 if (BGE_IS_JUMBO_CAPABLE(sc)) 5655 bge_free_rx_ring_jumbo(sc); 5656 5657 /* Free TX buffers. */ 5658 bge_free_tx_ring(sc); 5659 5660 /* 5661 * Isolate/power down the PHY. 5662 */ 5663 if (!(sc->bge_flags & BGE_PHY_FIBER_TBI)) 5664 mii_down(&sc->bge_mii); 5665 5666 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 5667 5668 /* Clear MAC's link state (PHY may still have link UP). */ 5669 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 5670 5671 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 5672 } 5673 5674 static void 5675 bge_link_upd(struct bge_softc *sc) 5676 { 5677 struct ifnet *ifp = &sc->ethercom.ec_if; 5678 struct mii_data *mii = &sc->bge_mii; 5679 uint32_t status; 5680 int link; 5681 5682 /* Clear 'pending link event' flag */ 5683 BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT); 5684 5685 /* 5686 * Process link state changes. 5687 * Grrr. The link status word in the status block does 5688 * not work correctly on the BCM5700 rev AX and BX chips, 5689 * according to all available information. Hence, we have 5690 * to enable MII interrupts in order to properly obtain 5691 * async link changes. Unfortunately, this also means that 5692 * we have to read the MAC status register to detect link 5693 * changes, thereby adding an additional register access to 5694 * the interrupt handler. 5695 */ 5696 5697 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) { 5698 status = CSR_READ_4(sc, BGE_MAC_STS); 5699 if (status & BGE_MACSTAT_MI_INTERRUPT) { 5700 mii_pollstat(mii); 5701 5702 if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 5703 mii->mii_media_status & IFM_ACTIVE && 5704 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 5705 BGE_STS_SETBIT(sc, BGE_STS_LINK); 5706 else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 5707 (!(mii->mii_media_status & IFM_ACTIVE) || 5708 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 5709 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 5710 5711 /* Clear the interrupt */ 5712 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 5713 BGE_EVTENB_MI_INTERRUPT); 5714 bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr, 5715 BRGPHY_MII_ISR); 5716 bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr, 5717 BRGPHY_MII_IMR, BRGPHY_INTRS); 5718 } 5719 return; 5720 } 5721 5722 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 5723 status = CSR_READ_4(sc, BGE_MAC_STS); 5724 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 5725 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) { 5726 BGE_STS_SETBIT(sc, BGE_STS_LINK); 5727 if (BGE_ASICREV(sc->bge_chipid) 5728 == BGE_ASICREV_BCM5704) { 5729 BGE_CLRBIT(sc, BGE_MAC_MODE, 5730 BGE_MACMODE_TBI_SEND_CFGS); 5731 DELAY(40); 5732 } 5733 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 5734 if_link_state_change(ifp, LINK_STATE_UP); 5735 } 5736 } else if (BGE_STS_BIT(sc, BGE_STS_LINK)) { 5737 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 5738 if_link_state_change(ifp, LINK_STATE_DOWN); 5739 } 5740 /* 5741 * Discard link events for MII/GMII cards if MI auto-polling disabled. 5742 * This should not happen since mii callouts are locked now, but 5743 * we keep this check for debug. 5744 */ 5745 } else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) { 5746 /* 5747 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED 5748 * bit in status word always set. Workaround this bug by 5749 * reading PHY link status directly. 5750 */ 5751 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)? 5752 BGE_STS_LINK : 0; 5753 5754 if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) { 5755 mii_pollstat(mii); 5756 5757 if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 5758 mii->mii_media_status & IFM_ACTIVE && 5759 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 5760 BGE_STS_SETBIT(sc, BGE_STS_LINK); 5761 else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 5762 (!(mii->mii_media_status & IFM_ACTIVE) || 5763 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 5764 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 5765 } 5766 } 5767 5768 /* Clear the attention */ 5769 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 5770 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 5771 BGE_MACSTAT_LINK_CHANGED); 5772 } 5773 5774 static int 5775 bge_sysctl_verify(SYSCTLFN_ARGS) 5776 { 5777 int error, t; 5778 struct sysctlnode node; 5779 5780 node = *rnode; 5781 t = *(int*)rnode->sysctl_data; 5782 node.sysctl_data = &t; 5783 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 5784 if (error || newp == NULL) 5785 return error; 5786 5787 #if 0 5788 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t, 5789 node.sysctl_num, rnode->sysctl_num)); 5790 #endif 5791 5792 if (node.sysctl_num == bge_rxthresh_nodenum) { 5793 if (t < 0 || t >= NBGE_RX_THRESH) 5794 return EINVAL; 5795 bge_update_all_threshes(t); 5796 } else 5797 return EINVAL; 5798 5799 *(int*)rnode->sysctl_data = t; 5800 5801 return 0; 5802 } 5803 5804 /* 5805 * Set up sysctl(3) MIB, hw.bge.*. 5806 */ 5807 static void 5808 bge_sysctl_init(struct bge_softc *sc) 5809 { 5810 int rc, bge_root_num; 5811 const struct sysctlnode *node; 5812 5813 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, NULL, 5814 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL, 5815 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) { 5816 goto out; 5817 } 5818 5819 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node, 5820 0, CTLTYPE_NODE, "bge", 5821 SYSCTL_DESCR("BGE interface controls"), 5822 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 5823 goto out; 5824 } 5825 5826 bge_root_num = node->sysctl_num; 5827 5828 /* BGE Rx interrupt mitigation level */ 5829 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node, 5830 CTLFLAG_READWRITE, 5831 CTLTYPE_INT, "rx_lvl", 5832 SYSCTL_DESCR("BGE receive interrupt mitigation level"), 5833 bge_sysctl_verify, 0, 5834 &bge_rx_thresh_lvl, 5835 0, CTL_HW, bge_root_num, CTL_CREATE, 5836 CTL_EOL)) != 0) { 5837 goto out; 5838 } 5839 5840 bge_rxthresh_nodenum = node->sysctl_num; 5841 5842 return; 5843 5844 out: 5845 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc); 5846 } 5847 5848 #ifdef BGE_DEBUG 5849 void 5850 bge_debug_info(struct bge_softc *sc) 5851 { 5852 5853 printf("Hardware Flags:\n"); 5854 if (BGE_IS_57765_PLUS(sc)) 5855 printf(" - 57765 Plus\n"); 5856 if (BGE_IS_5717_PLUS(sc)) 5857 printf(" - 5717 Plus\n"); 5858 if (BGE_IS_5755_PLUS(sc)) 5859 printf(" - 5755 Plus\n"); 5860 if (BGE_IS_575X_PLUS(sc)) 5861 printf(" - 575X Plus\n"); 5862 if (BGE_IS_5705_PLUS(sc)) 5863 printf(" - 5705 Plus\n"); 5864 if (BGE_IS_5714_FAMILY(sc)) 5865 printf(" - 5714 Family\n"); 5866 if (BGE_IS_5700_FAMILY(sc)) 5867 printf(" - 5700 Family\n"); 5868 if (sc->bge_flags & BGE_IS_5788) 5869 printf(" - 5788\n"); 5870 if (sc->bge_flags & BGE_JUMBO_CAPABLE) 5871 printf(" - Supports Jumbo Frames\n"); 5872 if (sc->bge_flags & BGE_NO_EEPROM) 5873 printf(" - No EEPROM\n"); 5874 if (sc->bge_flags & BGE_PCIX) 5875 printf(" - PCI-X Bus\n"); 5876 if (sc->bge_flags & BGE_PCIE) 5877 printf(" - PCI Express Bus\n"); 5878 if (sc->bge_flags & BGE_RX_ALIGNBUG) 5879 printf(" - RX Alignment Bug\n"); 5880 if (sc->bge_flags & BGE_APE) 5881 printf(" - APE\n"); 5882 if (sc->bge_flags & BGE_CPMU_PRESENT) 5883 printf(" - CPMU\n"); 5884 if (sc->bge_flags & BGE_TSO) 5885 printf(" - TSO\n"); 5886 5887 if (sc->bge_flags & BGE_PHY_NO_3LED) 5888 printf(" - No 3 LEDs\n"); 5889 if (sc->bge_flags & BGE_PHY_CRC_BUG) 5890 printf(" - CRC bug\n"); 5891 if (sc->bge_flags & BGE_PHY_ADC_BUG) 5892 printf(" - ADC bug\n"); 5893 if (sc->bge_flags & BGE_PHY_5704_A0_BUG) 5894 printf(" - 5704 A0 bug\n"); 5895 if (sc->bge_flags & BGE_PHY_JITTER_BUG) 5896 printf(" - jitter bug\n"); 5897 if (sc->bge_flags & BGE_PHY_BER_BUG) 5898 printf(" - BER bug\n"); 5899 if (sc->bge_flags & BGE_PHY_ADJUST_TRIM) 5900 printf(" - adjust trim\n"); 5901 if (sc->bge_flags & BGE_PHY_NO_WIRESPEED) 5902 printf(" - no wirespeed\n"); 5903 } 5904 #endif /* BGE_DEBUG */ 5905 5906 static int 5907 bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]) 5908 { 5909 prop_dictionary_t dict; 5910 prop_data_t ea; 5911 5912 if ((sc->bge_flags & BGE_NO_EEPROM) == 0) 5913 return 1; 5914 5915 dict = device_properties(sc->bge_dev); 5916 ea = prop_dictionary_get(dict, "mac-address"); 5917 if (ea != NULL) { 5918 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 5919 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 5920 memcpy(ether_addr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN); 5921 return 0; 5922 } 5923 5924 return 1; 5925 } 5926 5927 static int 5928 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[]) 5929 { 5930 uint32_t mac_addr; 5931 5932 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB); 5933 if ((mac_addr >> 16) == 0x484b) { 5934 ether_addr[0] = (uint8_t)(mac_addr >> 8); 5935 ether_addr[1] = (uint8_t)mac_addr; 5936 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB); 5937 ether_addr[2] = (uint8_t)(mac_addr >> 24); 5938 ether_addr[3] = (uint8_t)(mac_addr >> 16); 5939 ether_addr[4] = (uint8_t)(mac_addr >> 8); 5940 ether_addr[5] = (uint8_t)mac_addr; 5941 return 0; 5942 } 5943 return 1; 5944 } 5945 5946 static int 5947 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[]) 5948 { 5949 int mac_offset = BGE_EE_MAC_OFFSET; 5950 5951 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 5952 mac_offset = BGE_EE_MAC_OFFSET_5906; 5953 5954 return (bge_read_nvram(sc, ether_addr, mac_offset + 2, 5955 ETHER_ADDR_LEN)); 5956 } 5957 5958 static int 5959 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[]) 5960 { 5961 5962 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 5963 return 1; 5964 5965 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, 5966 ETHER_ADDR_LEN)); 5967 } 5968 5969 static int 5970 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[]) 5971 { 5972 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = { 5973 /* NOTE: Order is critical */ 5974 bge_get_eaddr_fw, 5975 bge_get_eaddr_mem, 5976 bge_get_eaddr_nvram, 5977 bge_get_eaddr_eeprom, 5978 NULL 5979 }; 5980 const bge_eaddr_fcn_t *func; 5981 5982 for (func = bge_eaddr_funcs; *func != NULL; ++func) { 5983 if ((*func)(sc, eaddr) == 0) 5984 break; 5985 } 5986 return (*func == NULL ? ENXIO : 0); 5987 } 5988