1 /* $NetBSD: if_bge.c,v 1.198 2011/06/09 12:04:29 cegger Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2001 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $ 36 */ 37 38 /* 39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD. 40 * 41 * NetBSD version by: 42 * 43 * Frank van der Linden <fvdl@wasabisystems.com> 44 * Jason Thorpe <thorpej@wasabisystems.com> 45 * Jonathan Stone <jonathan@dsg.stanford.edu> 46 * 47 * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com> 48 * Senior Engineer, Wind River Systems 49 */ 50 51 /* 52 * The Broadcom BCM5700 is based on technology originally developed by 53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 54 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 57 * frames, highly configurable RX filtering, and 16 RX and TX queues 58 * (which, along with RX filter rules, can be used for QOS applications). 59 * Other features, such as TCP segmentation, may be available as part 60 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 61 * firmware images can be stored in hardware and need not be compiled 62 * into the driver. 63 * 64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. 66 * 67 * The BCM5701 is a single-chip solution incorporating both the BCM5700 68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 69 * does not support external SSRAM. 70 * 71 * Broadcom also produces a variation of the BCM5700 under the "Altima" 72 * brand name, which is functionally similar but lacks PCI-X support. 73 * 74 * Without external SSRAM, you can only have at most 4 TX rings, 75 * and the use of the mini RX ring is disabled. This seems to imply 76 * that these features are simply not available on the BCM5701. As a 77 * result, this driver does not implement any support for the mini RX 78 * ring. 79 */ 80 81 #include <sys/cdefs.h> 82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.198 2011/06/09 12:04:29 cegger Exp $"); 83 84 #include "vlan.h" 85 #include "rnd.h" 86 87 #include <sys/param.h> 88 #include <sys/systm.h> 89 #include <sys/callout.h> 90 #include <sys/sockio.h> 91 #include <sys/mbuf.h> 92 #include <sys/malloc.h> 93 #include <sys/kernel.h> 94 #include <sys/device.h> 95 #include <sys/socket.h> 96 #include <sys/sysctl.h> 97 98 #include <net/if.h> 99 #include <net/if_dl.h> 100 #include <net/if_media.h> 101 #include <net/if_ether.h> 102 103 #if NRND > 0 104 #include <sys/rnd.h> 105 #endif 106 107 #ifdef INET 108 #include <netinet/in.h> 109 #include <netinet/in_systm.h> 110 #include <netinet/in_var.h> 111 #include <netinet/ip.h> 112 #endif 113 114 /* Headers for TCP Segmentation Offload (TSO) */ 115 #include <netinet/in_systm.h> /* n_time for <netinet/ip.h>... */ 116 #include <netinet/in.h> /* ip_{src,dst}, for <netinet/ip.h> */ 117 #include <netinet/ip.h> /* for struct ip */ 118 #include <netinet/tcp.h> /* for struct tcphdr */ 119 120 121 #include <net/bpf.h> 122 123 #include <dev/pci/pcireg.h> 124 #include <dev/pci/pcivar.h> 125 #include <dev/pci/pcidevs.h> 126 127 #include <dev/mii/mii.h> 128 #include <dev/mii/miivar.h> 129 #include <dev/mii/miidevs.h> 130 #include <dev/mii/brgphyreg.h> 131 132 #include <dev/pci/if_bgereg.h> 133 #include <dev/pci/if_bgevar.h> 134 135 #include <prop/proplib.h> 136 137 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 138 139 140 /* 141 * Tunable thresholds for rx-side bge interrupt mitigation. 142 */ 143 144 /* 145 * The pairs of values below were obtained from empirical measurement 146 * on bcm5700 rev B2; they ar designed to give roughly 1 receive 147 * interrupt for every N packets received, where N is, approximately, 148 * the second value (rx_max_bds) in each pair. The values are chosen 149 * such that moving from one pair to the succeeding pair was observed 150 * to roughly halve interrupt rate under sustained input packet load. 151 * The values were empirically chosen to avoid overflowing internal 152 * limits on the bcm5700: increasing rx_ticks much beyond 600 153 * results in internal wrapping and higher interrupt rates. 154 * The limit of 46 frames was chosen to match NFS workloads. 155 * 156 * These values also work well on bcm5701, bcm5704C, and (less 157 * tested) bcm5703. On other chipsets, (including the Altima chip 158 * family), the larger values may overflow internal chip limits, 159 * leading to increasing interrupt rates rather than lower interrupt 160 * rates. 161 * 162 * Applications using heavy interrupt mitigation (interrupting every 163 * 32 or 46 frames) in both directions may need to increase the TCP 164 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain 165 * full link bandwidth, due to ACKs and window updates lingering 166 * in the RX queue during the 30-to-40-frame interrupt-mitigation window. 167 */ 168 static const struct bge_load_rx_thresh { 169 int rx_ticks; 170 int rx_max_bds; } 171 bge_rx_threshes[] = { 172 { 32, 2 }, 173 { 50, 4 }, 174 { 100, 8 }, 175 { 192, 16 }, 176 { 416, 32 }, 177 { 598, 46 } 178 }; 179 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0])) 180 181 /* XXX patchable; should be sysctl'able */ 182 static int bge_auto_thresh = 1; 183 static int bge_rx_thresh_lvl; 184 185 static int bge_rxthresh_nodenum; 186 187 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]); 188 189 static int bge_probe(device_t, cfdata_t, void *); 190 static void bge_attach(device_t, device_t, void *); 191 static void bge_release_resources(struct bge_softc *); 192 193 static int bge_get_eaddr_fw(struct bge_softc *, uint8_t[]); 194 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]); 195 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]); 196 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]); 197 static int bge_get_eaddr(struct bge_softc *, uint8_t[]); 198 199 static void bge_txeof(struct bge_softc *); 200 static void bge_rxeof(struct bge_softc *); 201 202 static void bge_asf_driver_up (struct bge_softc *); 203 static void bge_tick(void *); 204 static void bge_stats_update(struct bge_softc *); 205 static void bge_stats_update_regs(struct bge_softc *); 206 static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *); 207 208 static int bge_intr(void *); 209 static void bge_start(struct ifnet *); 210 static int bge_ifflags_cb(struct ethercom *); 211 static int bge_ioctl(struct ifnet *, u_long, void *); 212 static int bge_init(struct ifnet *); 213 static void bge_stop(struct ifnet *, int); 214 static void bge_watchdog(struct ifnet *); 215 static int bge_ifmedia_upd(struct ifnet *); 216 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 217 218 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *); 219 static int bge_read_nvram(struct bge_softc *, uint8_t *, int, int); 220 221 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); 222 static int bge_read_eeprom(struct bge_softc *, void *, int, int); 223 static void bge_setmulti(struct bge_softc *); 224 225 static void bge_handle_events(struct bge_softc *); 226 static int bge_alloc_jumbo_mem(struct bge_softc *); 227 #if 0 /* XXX */ 228 static void bge_free_jumbo_mem(struct bge_softc *); 229 #endif 230 static void *bge_jalloc(struct bge_softc *); 231 static void bge_jfree(struct mbuf *, void *, size_t, void *); 232 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, 233 bus_dmamap_t); 234 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 235 static int bge_init_rx_ring_std(struct bge_softc *); 236 static void bge_free_rx_ring_std(struct bge_softc *); 237 static int bge_init_rx_ring_jumbo(struct bge_softc *); 238 static void bge_free_rx_ring_jumbo(struct bge_softc *); 239 static void bge_free_tx_ring(struct bge_softc *); 240 static int bge_init_tx_ring(struct bge_softc *); 241 242 static int bge_chipinit(struct bge_softc *); 243 static int bge_blockinit(struct bge_softc *); 244 static int bge_setpowerstate(struct bge_softc *, int); 245 static uint32_t bge_readmem_ind(struct bge_softc *, int); 246 static void bge_writemem_ind(struct bge_softc *, int, int); 247 static void bge_writembx(struct bge_softc *, int, int); 248 static void bge_writemem_direct(struct bge_softc *, int, int); 249 static void bge_writereg_ind(struct bge_softc *, int, int); 250 static void bge_set_max_readrq(struct bge_softc *); 251 252 static int bge_miibus_readreg(device_t, int, int); 253 static void bge_miibus_writereg(device_t, int, int, int); 254 static void bge_miibus_statchg(device_t); 255 256 #define BGE_RESET_START 1 257 #define BGE_RESET_STOP 2 258 static void bge_sig_post_reset(struct bge_softc *, int); 259 static void bge_sig_legacy(struct bge_softc *, int); 260 static void bge_sig_pre_reset(struct bge_softc *, int); 261 static void bge_stop_fw(struct bge_softc *); 262 static int bge_reset(struct bge_softc *); 263 static void bge_link_upd(struct bge_softc *); 264 static void sysctl_bge_init(struct bge_softc *); 265 static int sysctl_bge_verify(SYSCTLFN_PROTO); 266 267 #ifdef BGE_DEBUG 268 #define DPRINTF(x) if (bgedebug) printf x 269 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x 270 #define BGE_TSO_PRINTF(x) do { if (bge_tso_debug) printf x ;} while (0) 271 int bgedebug = 0; 272 int bge_tso_debug = 0; 273 void bge_debug_info(struct bge_softc *); 274 #else 275 #define DPRINTF(x) 276 #define DPRINTFN(n,x) 277 #define BGE_TSO_PRINTF(x) 278 #endif 279 280 #ifdef BGE_EVENT_COUNTERS 281 #define BGE_EVCNT_INCR(ev) (ev).ev_count++ 282 #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val) 283 #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val) 284 #else 285 #define BGE_EVCNT_INCR(ev) /* nothing */ 286 #define BGE_EVCNT_ADD(ev, val) /* nothing */ 287 #define BGE_EVCNT_UPD(ev, val) /* nothing */ 288 #endif 289 290 static const struct bge_product { 291 pci_vendor_id_t bp_vendor; 292 pci_product_id_t bp_product; 293 const char *bp_name; 294 } bge_products[] = { 295 /* 296 * The BCM5700 documentation seems to indicate that the hardware 297 * still has the Alteon vendor ID burned into it, though it 298 * should always be overridden by the value in the EEPROM. We'll 299 * check for it anyway. 300 */ 301 { PCI_VENDOR_ALTEON, 302 PCI_PRODUCT_ALTEON_BCM5700, 303 "Broadcom BCM5700 Gigabit Ethernet", 304 }, 305 { PCI_VENDOR_ALTEON, 306 PCI_PRODUCT_ALTEON_BCM5701, 307 "Broadcom BCM5701 Gigabit Ethernet", 308 }, 309 { PCI_VENDOR_ALTIMA, 310 PCI_PRODUCT_ALTIMA_AC1000, 311 "Altima AC1000 Gigabit Ethernet", 312 }, 313 { PCI_VENDOR_ALTIMA, 314 PCI_PRODUCT_ALTIMA_AC1001, 315 "Altima AC1001 Gigabit Ethernet", 316 }, 317 { PCI_VENDOR_ALTIMA, 318 PCI_PRODUCT_ALTIMA_AC9100, 319 "Altima AC9100 Gigabit Ethernet", 320 }, 321 { PCI_VENDOR_BROADCOM, 322 PCI_PRODUCT_BROADCOM_BCM5700, 323 "Broadcom BCM5700 Gigabit Ethernet", 324 }, 325 { PCI_VENDOR_BROADCOM, 326 PCI_PRODUCT_BROADCOM_BCM5701, 327 "Broadcom BCM5701 Gigabit Ethernet", 328 }, 329 { PCI_VENDOR_BROADCOM, 330 PCI_PRODUCT_BROADCOM_BCM5702, 331 "Broadcom BCM5702 Gigabit Ethernet", 332 }, 333 { PCI_VENDOR_BROADCOM, 334 PCI_PRODUCT_BROADCOM_BCM5702X, 335 "Broadcom BCM5702X Gigabit Ethernet" }, 336 { PCI_VENDOR_BROADCOM, 337 PCI_PRODUCT_BROADCOM_BCM5703, 338 "Broadcom BCM5703 Gigabit Ethernet", 339 }, 340 { PCI_VENDOR_BROADCOM, 341 PCI_PRODUCT_BROADCOM_BCM5703X, 342 "Broadcom BCM5703X Gigabit Ethernet", 343 }, 344 { PCI_VENDOR_BROADCOM, 345 PCI_PRODUCT_BROADCOM_BCM5703_ALT, 346 "Broadcom BCM5703 Gigabit Ethernet", 347 }, 348 { PCI_VENDOR_BROADCOM, 349 PCI_PRODUCT_BROADCOM_BCM5704C, 350 "Broadcom BCM5704C Dual Gigabit Ethernet", 351 }, 352 { PCI_VENDOR_BROADCOM, 353 PCI_PRODUCT_BROADCOM_BCM5704S, 354 "Broadcom BCM5704S Dual Gigabit Ethernet", 355 }, 356 { PCI_VENDOR_BROADCOM, 357 PCI_PRODUCT_BROADCOM_BCM5705, 358 "Broadcom BCM5705 Gigabit Ethernet", 359 }, 360 { PCI_VENDOR_BROADCOM, 361 PCI_PRODUCT_BROADCOM_BCM5705F, 362 "Broadcom BCM5705F Gigabit Ethernet", 363 }, 364 { PCI_VENDOR_BROADCOM, 365 PCI_PRODUCT_BROADCOM_BCM5705K, 366 "Broadcom BCM5705K Gigabit Ethernet", 367 }, 368 { PCI_VENDOR_BROADCOM, 369 PCI_PRODUCT_BROADCOM_BCM5705M, 370 "Broadcom BCM5705M Gigabit Ethernet", 371 }, 372 { PCI_VENDOR_BROADCOM, 373 PCI_PRODUCT_BROADCOM_BCM5705M_ALT, 374 "Broadcom BCM5705M Gigabit Ethernet", 375 }, 376 { PCI_VENDOR_BROADCOM, 377 PCI_PRODUCT_BROADCOM_BCM5714, 378 "Broadcom BCM5714 Gigabit Ethernet", 379 }, 380 { PCI_VENDOR_BROADCOM, 381 PCI_PRODUCT_BROADCOM_BCM5714S, 382 "Broadcom BCM5714S Gigabit Ethernet", 383 }, 384 { PCI_VENDOR_BROADCOM, 385 PCI_PRODUCT_BROADCOM_BCM5715, 386 "Broadcom BCM5715 Gigabit Ethernet", 387 }, 388 { PCI_VENDOR_BROADCOM, 389 PCI_PRODUCT_BROADCOM_BCM5715S, 390 "Broadcom BCM5715S Gigabit Ethernet", 391 }, 392 { PCI_VENDOR_BROADCOM, 393 PCI_PRODUCT_BROADCOM_BCM5717, 394 "Broadcom BCM5717 Gigabit Ethernet", 395 }, 396 { PCI_VENDOR_BROADCOM, 397 PCI_PRODUCT_BROADCOM_BCM5718, 398 "Broadcom BCM5718 Gigabit Ethernet", 399 }, 400 { PCI_VENDOR_BROADCOM, 401 PCI_PRODUCT_BROADCOM_BCM5720, 402 "Broadcom BCM5720 Gigabit Ethernet", 403 }, 404 { PCI_VENDOR_BROADCOM, 405 PCI_PRODUCT_BROADCOM_BCM5721, 406 "Broadcom BCM5721 Gigabit Ethernet", 407 }, 408 { PCI_VENDOR_BROADCOM, 409 PCI_PRODUCT_BROADCOM_BCM5722, 410 "Broadcom BCM5722 Gigabit Ethernet", 411 }, 412 { PCI_VENDOR_BROADCOM, 413 PCI_PRODUCT_BROADCOM_BCM5723, 414 "Broadcom BCM5723 Gigabit Ethernet", 415 }, 416 { PCI_VENDOR_BROADCOM, 417 PCI_PRODUCT_BROADCOM_BCM5724, 418 "Broadcom BCM5724 Gigabit Ethernet", 419 }, 420 { PCI_VENDOR_BROADCOM, 421 PCI_PRODUCT_BROADCOM_BCM5750, 422 "Broadcom BCM5750 Gigabit Ethernet", 423 }, 424 { PCI_VENDOR_BROADCOM, 425 PCI_PRODUCT_BROADCOM_BCM5750M, 426 "Broadcom BCM5750M Gigabit Ethernet", 427 }, 428 { PCI_VENDOR_BROADCOM, 429 PCI_PRODUCT_BROADCOM_BCM5751, 430 "Broadcom BCM5751 Gigabit Ethernet", 431 }, 432 { PCI_VENDOR_BROADCOM, 433 PCI_PRODUCT_BROADCOM_BCM5751F, 434 "Broadcom BCM5751F Gigabit Ethernet", 435 }, 436 { PCI_VENDOR_BROADCOM, 437 PCI_PRODUCT_BROADCOM_BCM5751M, 438 "Broadcom BCM5751M Gigabit Ethernet", 439 }, 440 { PCI_VENDOR_BROADCOM, 441 PCI_PRODUCT_BROADCOM_BCM5752, 442 "Broadcom BCM5752 Gigabit Ethernet", 443 }, 444 { PCI_VENDOR_BROADCOM, 445 PCI_PRODUCT_BROADCOM_BCM5752M, 446 "Broadcom BCM5752M Gigabit Ethernet", 447 }, 448 { PCI_VENDOR_BROADCOM, 449 PCI_PRODUCT_BROADCOM_BCM5753, 450 "Broadcom BCM5753 Gigabit Ethernet", 451 }, 452 { PCI_VENDOR_BROADCOM, 453 PCI_PRODUCT_BROADCOM_BCM5753F, 454 "Broadcom BCM5753F Gigabit Ethernet", 455 }, 456 { PCI_VENDOR_BROADCOM, 457 PCI_PRODUCT_BROADCOM_BCM5753M, 458 "Broadcom BCM5753M Gigabit Ethernet", 459 }, 460 { PCI_VENDOR_BROADCOM, 461 PCI_PRODUCT_BROADCOM_BCM5754, 462 "Broadcom BCM5754 Gigabit Ethernet", 463 }, 464 { PCI_VENDOR_BROADCOM, 465 PCI_PRODUCT_BROADCOM_BCM5754M, 466 "Broadcom BCM5754M Gigabit Ethernet", 467 }, 468 { PCI_VENDOR_BROADCOM, 469 PCI_PRODUCT_BROADCOM_BCM5755, 470 "Broadcom BCM5755 Gigabit Ethernet", 471 }, 472 { PCI_VENDOR_BROADCOM, 473 PCI_PRODUCT_BROADCOM_BCM5755M, 474 "Broadcom BCM5755M Gigabit Ethernet", 475 }, 476 { PCI_VENDOR_BROADCOM, 477 PCI_PRODUCT_BROADCOM_BCM5756, 478 "Broadcom BCM5756 Gigabit Ethernet", 479 }, 480 { PCI_VENDOR_BROADCOM, 481 PCI_PRODUCT_BROADCOM_BCM5761, 482 "Broadcom BCM5761 Gigabit Ethernet", 483 }, 484 { PCI_VENDOR_BROADCOM, 485 PCI_PRODUCT_BROADCOM_BCM5761E, 486 "Broadcom BCM5761E Gigabit Ethernet", 487 }, 488 { PCI_VENDOR_BROADCOM, 489 PCI_PRODUCT_BROADCOM_BCM5761S, 490 "Broadcom BCM5761S Gigabit Ethernet", 491 }, 492 { PCI_VENDOR_BROADCOM, 493 PCI_PRODUCT_BROADCOM_BCM5761SE, 494 "Broadcom BCM5761SE Gigabit Ethernet", 495 }, 496 { PCI_VENDOR_BROADCOM, 497 PCI_PRODUCT_BROADCOM_BCM5764, 498 "Broadcom BCM5764 Gigabit Ethernet", 499 }, 500 { PCI_VENDOR_BROADCOM, 501 PCI_PRODUCT_BROADCOM_BCM5780, 502 "Broadcom BCM5780 Gigabit Ethernet", 503 }, 504 { PCI_VENDOR_BROADCOM, 505 PCI_PRODUCT_BROADCOM_BCM5780S, 506 "Broadcom BCM5780S Gigabit Ethernet", 507 }, 508 { PCI_VENDOR_BROADCOM, 509 PCI_PRODUCT_BROADCOM_BCM5781, 510 "Broadcom BCM5781 Gigabit Ethernet", 511 }, 512 { PCI_VENDOR_BROADCOM, 513 PCI_PRODUCT_BROADCOM_BCM5782, 514 "Broadcom BCM5782 Gigabit Ethernet", 515 }, 516 { PCI_VENDOR_BROADCOM, 517 PCI_PRODUCT_BROADCOM_BCM5784M, 518 "BCM5784M NetLink 1000baseT Ethernet", 519 }, 520 { PCI_VENDOR_BROADCOM, 521 PCI_PRODUCT_BROADCOM_BCM5786, 522 "Broadcom BCM5786 Gigabit Ethernet", 523 }, 524 { PCI_VENDOR_BROADCOM, 525 PCI_PRODUCT_BROADCOM_BCM5787, 526 "Broadcom BCM5787 Gigabit Ethernet", 527 }, 528 { PCI_VENDOR_BROADCOM, 529 PCI_PRODUCT_BROADCOM_BCM5787M, 530 "Broadcom BCM5787M Gigabit Ethernet", 531 }, 532 { PCI_VENDOR_BROADCOM, 533 PCI_PRODUCT_BROADCOM_BCM5788, 534 "Broadcom BCM5788 Gigabit Ethernet", 535 }, 536 { PCI_VENDOR_BROADCOM, 537 PCI_PRODUCT_BROADCOM_BCM5789, 538 "Broadcom BCM5789 Gigabit Ethernet", 539 }, 540 { PCI_VENDOR_BROADCOM, 541 PCI_PRODUCT_BROADCOM_BCM5901, 542 "Broadcom BCM5901 Fast Ethernet", 543 }, 544 { PCI_VENDOR_BROADCOM, 545 PCI_PRODUCT_BROADCOM_BCM5901A2, 546 "Broadcom BCM5901A2 Fast Ethernet", 547 }, 548 { PCI_VENDOR_BROADCOM, 549 PCI_PRODUCT_BROADCOM_BCM5903M, 550 "Broadcom BCM5903M Fast Ethernet", 551 }, 552 { PCI_VENDOR_BROADCOM, 553 PCI_PRODUCT_BROADCOM_BCM5906, 554 "Broadcom BCM5906 Fast Ethernet", 555 }, 556 { PCI_VENDOR_BROADCOM, 557 PCI_PRODUCT_BROADCOM_BCM5906M, 558 "Broadcom BCM5906M Fast Ethernet", 559 }, 560 { PCI_VENDOR_BROADCOM, 561 PCI_PRODUCT_BROADCOM_BCM57760, 562 "Broadcom BCM57760 Fast Ethernet", 563 }, 564 { PCI_VENDOR_BROADCOM, 565 PCI_PRODUCT_BROADCOM_BCM57761, 566 "Broadcom BCM57761 Fast Ethernet", 567 }, 568 { PCI_VENDOR_BROADCOM, 569 PCI_PRODUCT_BROADCOM_BCM57765, 570 "Broadcom BCM57765 Fast Ethernet", 571 }, 572 { PCI_VENDOR_BROADCOM, 573 PCI_PRODUCT_BROADCOM_BCM57780, 574 "Broadcom BCM57780 Fast Ethernet", 575 }, 576 { PCI_VENDOR_BROADCOM, 577 PCI_PRODUCT_BROADCOM_BCM57781, 578 "Broadcom BCM57781 Fast Ethernet", 579 }, 580 { PCI_VENDOR_BROADCOM, 581 PCI_PRODUCT_BROADCOM_BCM57785, 582 "Broadcom BCM57785 Fast Ethernet", 583 }, 584 { PCI_VENDOR_BROADCOM, 585 PCI_PRODUCT_BROADCOM_BCM57788, 586 "Broadcom BCM57788 Fast Ethernet", 587 }, 588 { PCI_VENDOR_BROADCOM, 589 PCI_PRODUCT_BROADCOM_BCM57790, 590 "Broadcom BCM57790 Fast Ethernet", 591 }, 592 { PCI_VENDOR_BROADCOM, 593 PCI_PRODUCT_BROADCOM_BCM57791, 594 "Broadcom BCM57791 Fast Ethernet", 595 }, 596 { PCI_VENDOR_BROADCOM, 597 PCI_PRODUCT_BROADCOM_BCM57795, 598 "Broadcom BCM57795 Fast Ethernet", 599 }, 600 { PCI_VENDOR_SCHNEIDERKOCH, 601 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1, 602 "SysKonnect SK-9Dx1 Gigabit Ethernet", 603 }, 604 { PCI_VENDOR_3COM, 605 PCI_PRODUCT_3COM_3C996, 606 "3Com 3c996 Gigabit Ethernet", 607 }, 608 { PCI_VENDOR_FUJITSU4, 609 PCI_PRODUCT_FUJITSU4_PW008GE4, 610 "Fujitsu PW008GE4 Gigabit Ethernet", 611 }, 612 { PCI_VENDOR_FUJITSU4, 613 PCI_PRODUCT_FUJITSU4_PW008GE5, 614 "Fujitsu PW008GE5 Gigabit Ethernet", 615 }, 616 { PCI_VENDOR_FUJITSU4, 617 PCI_PRODUCT_FUJITSU4_PP250_450_LAN, 618 "Fujitsu Primepower 250/450 Gigabit Ethernet", 619 }, 620 { 0, 621 0, 622 NULL }, 623 }; 624 625 /* 626 * XXX: how to handle variants based on 5750 and derivatives: 627 * 5750 5751, 5721, possibly 5714, 5752, and 5708?, which 628 * in general behave like a 5705, except with additional quirks. 629 * This driver's current handling of the 5721 is wrong; 630 * how we map ASIC revision to "quirks" needs more thought. 631 * (defined here until the thought is done). 632 */ 633 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_5700_FAMILY) 634 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_5714_FAMILY) 635 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_5705_PLUS) 636 #define BGE_IS_5750_OR_BEYOND(sc) ((sc)->bge_flags & BGE_5750_PLUS) 637 #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_5755_PLUS) 638 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_JUMBO_CAPABLE) 639 640 static const struct bge_revision { 641 uint32_t br_chipid; 642 const char *br_name; 643 } bge_revisions[] = { 644 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, 645 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, 646 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, 647 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, 648 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, 649 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, 650 /* This is treated like a BCM5700 Bx */ 651 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, 652 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, 653 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, 654 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, 655 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, 656 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, 657 { BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" }, 658 { BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" }, 659 { BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" }, 660 { BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" }, 661 { BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" }, 662 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, 663 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, 664 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, 665 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, 666 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, 667 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, 668 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, 669 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, 670 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, 671 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, 672 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, 673 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, 674 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, 675 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, 676 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, 677 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, 678 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, 679 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, 680 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, 681 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, 682 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, 683 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, 684 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, 685 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, 686 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, 687 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" }, 688 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" }, 689 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" }, 690 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" }, 691 { BGE_CHIPID_BCM5755_C0, "BCM5755 C0" }, 692 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" }, 693 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" }, 694 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" }, 695 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" }, 696 /* 5754 and 5787 share the same ASIC ID */ 697 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" }, 698 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" }, 699 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" }, 700 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" }, 701 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" }, 702 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" }, 703 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" }, 704 705 { 0, NULL } 706 }; 707 708 /* 709 * Some defaults for major revisions, so that newer steppings 710 * that we don't know about have a shot at working. 711 */ 712 static const struct bge_revision bge_majorrevs[] = { 713 { BGE_ASICREV_BCM5700, "unknown BCM5700" }, 714 { BGE_ASICREV_BCM5701, "unknown BCM5701" }, 715 { BGE_ASICREV_BCM5703, "unknown BCM5703" }, 716 { BGE_ASICREV_BCM5704, "unknown BCM5704" }, 717 { BGE_ASICREV_BCM5705, "unknown BCM5705" }, 718 { BGE_ASICREV_BCM5750, "unknown BCM5750" }, 719 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, 720 { BGE_ASICREV_BCM5752, "unknown BCM5752" }, 721 { BGE_ASICREV_BCM5780, "unknown BCM5780" }, 722 { BGE_ASICREV_BCM5714, "unknown BCM5714" }, 723 { BGE_ASICREV_BCM5755, "unknown BCM5755" }, 724 { BGE_ASICREV_BCM5761, "unknown BCM5761" }, 725 { BGE_ASICREV_BCM5784, "unknown BCM5784" }, 726 { BGE_ASICREV_BCM5785, "unknown BCM5785" }, 727 /* 5754 and 5787 share the same ASIC ID */ 728 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" }, 729 { BGE_ASICREV_BCM5906, "unknown BCM5906" }, 730 { BGE_ASICREV_BCM57780, "unknown BCM57780" }, 731 { BGE_ASICREV_BCM5717, "unknown BCM5717" }, 732 { BGE_ASICREV_BCM57765, "unknown BCM57765" }, 733 734 { 0, NULL } 735 }; 736 737 static int bge_allow_asf = 1; 738 739 CFATTACH_DECL_NEW(bge, sizeof(struct bge_softc), 740 bge_probe, bge_attach, NULL, NULL); 741 742 static uint32_t 743 bge_readmem_ind(struct bge_softc *sc, int off) 744 { 745 pcireg_t val; 746 747 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off); 748 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA); 749 return val; 750 } 751 752 static void 753 bge_writemem_ind(struct bge_softc *sc, int off, int val) 754 { 755 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off); 756 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA, val); 757 } 758 759 /* 760 * PCI Express only 761 */ 762 static void 763 bge_set_max_readrq(struct bge_softc *sc) 764 { 765 pcireg_t val; 766 767 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap 768 + PCI_PCIE_DCSR); 769 if ((val & PCI_PCIE_DCSR_MAX_READ_REQ) != 770 BGE_PCIE_DEVCTL_MAX_READRQ_4096) { 771 aprint_verbose_dev(sc->bge_dev, 772 "adjust device control 0x%04x ", val); 773 val &= ~PCI_PCIE_DCSR_MAX_READ_REQ; 774 val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096; 775 pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap 776 + PCI_PCIE_DCSR, val); 777 aprint_verbose("-> 0x%04x\n", val); 778 } 779 } 780 781 #ifdef notdef 782 static uint32_t 783 bge_readreg_ind(struct bge_softc *sc, int off) 784 { 785 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off); 786 return (pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA)); 787 } 788 #endif 789 790 static void 791 bge_writereg_ind(struct bge_softc *sc, int off, int val) 792 { 793 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off); 794 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA, val); 795 } 796 797 static void 798 bge_writemem_direct(struct bge_softc *sc, int off, int val) 799 { 800 CSR_WRITE_4(sc, off, val); 801 } 802 803 static void 804 bge_writembx(struct bge_softc *sc, int off, int val) 805 { 806 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 807 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; 808 809 CSR_WRITE_4(sc, off, val); 810 } 811 812 static uint8_t 813 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 814 { 815 uint32_t access, byte = 0; 816 int i; 817 818 /* Lock. */ 819 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 820 for (i = 0; i < 8000; i++) { 821 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) 822 break; 823 DELAY(20); 824 } 825 if (i == 8000) 826 return 1; 827 828 /* Enable access. */ 829 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); 830 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); 831 832 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); 833 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); 834 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 835 DELAY(10); 836 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { 837 DELAY(10); 838 break; 839 } 840 } 841 842 if (i == BGE_TIMEOUT * 10) { 843 aprint_error_dev(sc->bge_dev, "nvram read timed out\n"); 844 return 1; 845 } 846 847 /* Get result. */ 848 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); 849 850 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; 851 852 /* Disable access. */ 853 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); 854 855 /* Unlock. */ 856 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); 857 CSR_READ_4(sc, BGE_NVRAM_SWARB); 858 859 return 0; 860 } 861 862 /* 863 * Read a sequence of bytes from NVRAM. 864 */ 865 static int 866 bge_read_nvram(struct bge_softc *sc, uint8_t *dest, int off, int cnt) 867 { 868 int err = 0, i; 869 uint8_t byte = 0; 870 871 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906) 872 return 1; 873 874 for (i = 0; i < cnt; i++) { 875 err = bge_nvram_getbyte(sc, off + i, &byte); 876 if (err) 877 break; 878 *(dest + i) = byte; 879 } 880 881 return (err ? 1 : 0); 882 } 883 884 /* 885 * Read a byte of data stored in the EEPROM at address 'addr.' The 886 * BCM570x supports both the traditional bitbang interface and an 887 * auto access interface for reading the EEPROM. We use the auto 888 * access method. 889 */ 890 static uint8_t 891 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 892 { 893 int i; 894 uint32_t byte = 0; 895 896 /* 897 * Enable use of auto EEPROM access so we can avoid 898 * having to use the bitbang method. 899 */ 900 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 901 902 /* Reset the EEPROM, load the clock period. */ 903 CSR_WRITE_4(sc, BGE_EE_ADDR, 904 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 905 DELAY(20); 906 907 /* Issue the read EEPROM command. */ 908 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 909 910 /* Wait for completion */ 911 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 912 DELAY(10); 913 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 914 break; 915 } 916 917 if (i == BGE_TIMEOUT * 10) { 918 aprint_error_dev(sc->bge_dev, "eeprom read timed out\n"); 919 return 1; 920 } 921 922 /* Get result. */ 923 byte = CSR_READ_4(sc, BGE_EE_DATA); 924 925 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 926 927 return 0; 928 } 929 930 /* 931 * Read a sequence of bytes from the EEPROM. 932 */ 933 static int 934 bge_read_eeprom(struct bge_softc *sc, void *destv, int off, int cnt) 935 { 936 int err = 0, i; 937 uint8_t byte = 0; 938 char *dest = destv; 939 940 for (i = 0; i < cnt; i++) { 941 err = bge_eeprom_getbyte(sc, off + i, &byte); 942 if (err) 943 break; 944 *(dest + i) = byte; 945 } 946 947 return (err ? 1 : 0); 948 } 949 950 static int 951 bge_miibus_readreg(device_t dev, int phy, int reg) 952 { 953 struct bge_softc *sc = device_private(dev); 954 uint32_t val; 955 uint32_t autopoll; 956 int i; 957 958 /* 959 * Broadcom's own driver always assumes the internal 960 * PHY is at GMII address 1. On some chips, the PHY responds 961 * to accesses at all addresses, which could cause us to 962 * bogusly attach the PHY 32 times at probe type. Always 963 * restricting the lookup to address 1 is simpler than 964 * trying to figure out which chips revisions should be 965 * special-cased. 966 */ 967 if (phy != 1) 968 return 0; 969 970 /* Reading with autopolling on may trigger PCI errors */ 971 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 972 if (autopoll & BGE_MIMODE_AUTOPOLL) { 973 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL); 974 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 975 DELAY(40); 976 } 977 978 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | 979 BGE_MIPHY(phy) | BGE_MIREG(reg)); 980 981 for (i = 0; i < BGE_TIMEOUT; i++) { 982 val = CSR_READ_4(sc, BGE_MI_COMM); 983 if (!(val & BGE_MICOMM_BUSY)) 984 break; 985 delay(10); 986 } 987 988 if (i == BGE_TIMEOUT) { 989 aprint_error_dev(sc->bge_dev, "PHY read timed out\n"); 990 val = 0; 991 goto done; 992 } 993 994 val = CSR_READ_4(sc, BGE_MI_COMM); 995 996 done: 997 if (autopoll & BGE_MIMODE_AUTOPOLL) { 998 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 999 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1000 DELAY(40); 1001 } 1002 1003 if (val & BGE_MICOMM_READFAIL) 1004 return 0; 1005 1006 return (val & 0xFFFF); 1007 } 1008 1009 static void 1010 bge_miibus_writereg(device_t dev, int phy, int reg, int val) 1011 { 1012 struct bge_softc *sc = device_private(dev); 1013 uint32_t autopoll; 1014 int i; 1015 1016 if (phy!=1) { 1017 return; 1018 } 1019 1020 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 && 1021 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL)) { 1022 return; 1023 } 1024 1025 /* Reading with autopolling on may trigger PCI errors */ 1026 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 1027 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1028 delay(40); 1029 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL); 1030 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1031 delay(10); /* 40 usec is supposed to be adequate */ 1032 } 1033 1034 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | 1035 BGE_MIPHY(phy) | BGE_MIREG(reg) | val); 1036 1037 for (i = 0; i < BGE_TIMEOUT; i++) { 1038 delay(10); 1039 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { 1040 delay(5); 1041 CSR_READ_4(sc, BGE_MI_COMM); 1042 break; 1043 } 1044 } 1045 1046 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1047 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 1048 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1049 delay(40); 1050 } 1051 1052 if (i == BGE_TIMEOUT) 1053 aprint_error_dev(sc->bge_dev, "PHY read timed out\n"); 1054 } 1055 1056 static void 1057 bge_miibus_statchg(device_t dev) 1058 { 1059 struct bge_softc *sc = device_private(dev); 1060 struct mii_data *mii = &sc->bge_mii; 1061 1062 /* 1063 * Get flow control negotiation result. 1064 */ 1065 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1066 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) { 1067 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1068 mii->mii_media_active &= ~IFM_ETH_FMASK; 1069 } 1070 1071 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 1072 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 1073 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 1074 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 1075 else 1076 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 1077 1078 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 1079 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 1080 else 1081 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 1082 1083 /* 1084 * 802.3x flow control 1085 */ 1086 if (sc->bge_flowflags & IFM_ETH_RXPAUSE) 1087 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 1088 else 1089 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 1090 1091 if (sc->bge_flowflags & IFM_ETH_TXPAUSE) 1092 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 1093 else 1094 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 1095 } 1096 1097 /* 1098 * Update rx threshold levels to values in a particular slot 1099 * of the interrupt-mitigation table bge_rx_threshes. 1100 */ 1101 static void 1102 bge_set_thresh(struct ifnet *ifp, int lvl) 1103 { 1104 struct bge_softc *sc = ifp->if_softc; 1105 int s; 1106 1107 /* For now, just save the new Rx-intr thresholds and record 1108 * that a threshold update is pending. Updating the hardware 1109 * registers here (even at splhigh()) is observed to 1110 * occasionaly cause glitches where Rx-interrupts are not 1111 * honoured for up to 10 seconds. jonathan@NetBSD.org, 2003-04-05 1112 */ 1113 s = splnet(); 1114 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks; 1115 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds; 1116 sc->bge_pending_rxintr_change = 1; 1117 splx(s); 1118 1119 return; 1120 } 1121 1122 1123 /* 1124 * Update Rx thresholds of all bge devices 1125 */ 1126 static void 1127 bge_update_all_threshes(int lvl) 1128 { 1129 struct ifnet *ifp; 1130 const char * const namebuf = "bge"; 1131 int namelen; 1132 1133 if (lvl < 0) 1134 lvl = 0; 1135 else if (lvl >= NBGE_RX_THRESH) 1136 lvl = NBGE_RX_THRESH - 1; 1137 1138 namelen = strlen(namebuf); 1139 /* 1140 * Now search all the interfaces for this name/number 1141 */ 1142 IFNET_FOREACH(ifp) { 1143 if (strncmp(ifp->if_xname, namebuf, namelen) != 0) 1144 continue; 1145 /* We got a match: update if doing auto-threshold-tuning */ 1146 if (bge_auto_thresh) 1147 bge_set_thresh(ifp, lvl); 1148 } 1149 } 1150 1151 /* 1152 * Handle events that have triggered interrupts. 1153 */ 1154 static void 1155 bge_handle_events(struct bge_softc *sc) 1156 { 1157 1158 return; 1159 } 1160 1161 /* 1162 * Memory management for jumbo frames. 1163 */ 1164 1165 static int 1166 bge_alloc_jumbo_mem(struct bge_softc *sc) 1167 { 1168 char *ptr, *kva; 1169 bus_dma_segment_t seg; 1170 int i, rseg, state, error; 1171 struct bge_jpool_entry *entry; 1172 1173 state = error = 0; 1174 1175 /* Grab a big chunk o' storage. */ 1176 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0, 1177 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 1178 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n"); 1179 return ENOBUFS; 1180 } 1181 1182 state = 1; 1183 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, (void **)&kva, 1184 BUS_DMA_NOWAIT)) { 1185 aprint_error_dev(sc->bge_dev, 1186 "can't map DMA buffers (%d bytes)\n", (int)BGE_JMEM); 1187 error = ENOBUFS; 1188 goto out; 1189 } 1190 1191 state = 2; 1192 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0, 1193 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) { 1194 aprint_error_dev(sc->bge_dev, "can't create DMA map\n"); 1195 error = ENOBUFS; 1196 goto out; 1197 } 1198 1199 state = 3; 1200 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 1201 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) { 1202 aprint_error_dev(sc->bge_dev, "can't load DMA map\n"); 1203 error = ENOBUFS; 1204 goto out; 1205 } 1206 1207 state = 4; 1208 sc->bge_cdata.bge_jumbo_buf = (void *)kva; 1209 DPRINTFN(1,("bge_jumbo_buf = %p\n", sc->bge_cdata.bge_jumbo_buf)); 1210 1211 SLIST_INIT(&sc->bge_jfree_listhead); 1212 SLIST_INIT(&sc->bge_jinuse_listhead); 1213 1214 /* 1215 * Now divide it up into 9K pieces and save the addresses 1216 * in an array. 1217 */ 1218 ptr = sc->bge_cdata.bge_jumbo_buf; 1219 for (i = 0; i < BGE_JSLOTS; i++) { 1220 sc->bge_cdata.bge_jslots[i] = ptr; 1221 ptr += BGE_JLEN; 1222 entry = malloc(sizeof(struct bge_jpool_entry), 1223 M_DEVBUF, M_NOWAIT); 1224 if (entry == NULL) { 1225 aprint_error_dev(sc->bge_dev, 1226 "no memory for jumbo buffer queue!\n"); 1227 error = ENOBUFS; 1228 goto out; 1229 } 1230 entry->slot = i; 1231 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 1232 entry, jpool_entries); 1233 } 1234 out: 1235 if (error != 0) { 1236 switch (state) { 1237 case 4: 1238 bus_dmamap_unload(sc->bge_dmatag, 1239 sc->bge_cdata.bge_rx_jumbo_map); 1240 case 3: 1241 bus_dmamap_destroy(sc->bge_dmatag, 1242 sc->bge_cdata.bge_rx_jumbo_map); 1243 case 2: 1244 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 1245 case 1: 1246 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 1247 break; 1248 default: 1249 break; 1250 } 1251 } 1252 1253 return error; 1254 } 1255 1256 /* 1257 * Allocate a jumbo buffer. 1258 */ 1259 static void * 1260 bge_jalloc(struct bge_softc *sc) 1261 { 1262 struct bge_jpool_entry *entry; 1263 1264 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 1265 1266 if (entry == NULL) { 1267 aprint_error_dev(sc->bge_dev, "no free jumbo buffers\n"); 1268 return NULL; 1269 } 1270 1271 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 1272 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 1273 return (sc->bge_cdata.bge_jslots[entry->slot]); 1274 } 1275 1276 /* 1277 * Release a jumbo buffer. 1278 */ 1279 static void 1280 bge_jfree(struct mbuf *m, void *buf, size_t size, void *arg) 1281 { 1282 struct bge_jpool_entry *entry; 1283 struct bge_softc *sc; 1284 int i, s; 1285 1286 /* Extract the softc struct pointer. */ 1287 sc = (struct bge_softc *)arg; 1288 1289 if (sc == NULL) 1290 panic("bge_jfree: can't find softc pointer!"); 1291 1292 /* calculate the slot this buffer belongs to */ 1293 1294 i = ((char *)buf 1295 - (char *)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 1296 1297 if ((i < 0) || (i >= BGE_JSLOTS)) 1298 panic("bge_jfree: asked to free buffer that we don't manage!"); 1299 1300 s = splvm(); 1301 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 1302 if (entry == NULL) 1303 panic("bge_jfree: buffer not in use!"); 1304 entry->slot = i; 1305 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 1306 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 1307 1308 if (__predict_true(m != NULL)) 1309 pool_cache_put(mb_cache, m); 1310 splx(s); 1311 } 1312 1313 1314 /* 1315 * Initialize a standard receive ring descriptor. 1316 */ 1317 static int 1318 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m, 1319 bus_dmamap_t dmamap) 1320 { 1321 struct mbuf *m_new = NULL; 1322 struct bge_rx_bd *r; 1323 int error; 1324 1325 if (dmamap == NULL) { 1326 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, 1327 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap); 1328 if (error != 0) 1329 return error; 1330 } 1331 1332 sc->bge_cdata.bge_rx_std_map[i] = dmamap; 1333 1334 if (m == NULL) { 1335 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1336 if (m_new == NULL) 1337 return ENOBUFS; 1338 1339 MCLGET(m_new, M_DONTWAIT); 1340 if (!(m_new->m_flags & M_EXT)) { 1341 m_freem(m_new); 1342 return ENOBUFS; 1343 } 1344 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1345 1346 } else { 1347 m_new = m; 1348 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1349 m_new->m_data = m_new->m_ext.ext_buf; 1350 } 1351 if (!(sc->bge_flags & BGE_RX_ALIGNBUG)) 1352 m_adj(m_new, ETHER_ALIGN); 1353 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new, 1354 BUS_DMA_READ|BUS_DMA_NOWAIT)) 1355 return ENOBUFS; 1356 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 1357 BUS_DMASYNC_PREREAD); 1358 1359 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 1360 r = &sc->bge_rdata->bge_rx_std_ring[i]; 1361 BGE_HOSTADDR(r->bge_addr, dmamap->dm_segs[0].ds_addr); 1362 r->bge_flags = BGE_RXBDFLAG_END; 1363 r->bge_len = m_new->m_len; 1364 r->bge_idx = i; 1365 1366 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1367 offsetof(struct bge_ring_data, bge_rx_std_ring) + 1368 i * sizeof (struct bge_rx_bd), 1369 sizeof (struct bge_rx_bd), 1370 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1371 1372 return 0; 1373 } 1374 1375 /* 1376 * Initialize a jumbo receive ring descriptor. This allocates 1377 * a jumbo buffer from the pool managed internally by the driver. 1378 */ 1379 static int 1380 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m) 1381 { 1382 struct mbuf *m_new = NULL; 1383 struct bge_rx_bd *r; 1384 void *buf = NULL; 1385 1386 if (m == NULL) { 1387 1388 /* Allocate the mbuf. */ 1389 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1390 if (m_new == NULL) 1391 return ENOBUFS; 1392 1393 /* Allocate the jumbo buffer */ 1394 buf = bge_jalloc(sc); 1395 if (buf == NULL) { 1396 m_freem(m_new); 1397 aprint_error_dev(sc->bge_dev, 1398 "jumbo allocation failed -- packet dropped!\n"); 1399 return ENOBUFS; 1400 } 1401 1402 /* Attach the buffer to the mbuf. */ 1403 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 1404 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF, 1405 bge_jfree, sc); 1406 m_new->m_flags |= M_EXT_RW; 1407 } else { 1408 m_new = m; 1409 buf = m_new->m_data = m_new->m_ext.ext_buf; 1410 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 1411 } 1412 if (!(sc->bge_flags & BGE_RX_ALIGNBUG)) 1413 m_adj(m_new, ETHER_ALIGN); 1414 bus_dmamap_sync(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 1415 mtod(m_new, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, BGE_JLEN, 1416 BUS_DMASYNC_PREREAD); 1417 /* Set up the descriptor. */ 1418 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 1419 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 1420 BGE_HOSTADDR(r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new)); 1421 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 1422 r->bge_len = m_new->m_len; 1423 r->bge_idx = i; 1424 1425 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1426 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 1427 i * sizeof (struct bge_rx_bd), 1428 sizeof (struct bge_rx_bd), 1429 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1430 1431 return 0; 1432 } 1433 1434 /* 1435 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 1436 * that's 1MB or memory, which is a lot. For now, we fill only the first 1437 * 256 ring entries and hope that our CPU is fast enough to keep up with 1438 * the NIC. 1439 */ 1440 static int 1441 bge_init_rx_ring_std(struct bge_softc *sc) 1442 { 1443 int i; 1444 1445 if (sc->bge_flags & BGE_RXRING_VALID) 1446 return 0; 1447 1448 for (i = 0; i < BGE_SSLOTS; i++) { 1449 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 1450 return ENOBUFS; 1451 } 1452 1453 sc->bge_std = i - 1; 1454 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 1455 1456 sc->bge_flags |= BGE_RXRING_VALID; 1457 1458 return 0; 1459 } 1460 1461 static void 1462 bge_free_rx_ring_std(struct bge_softc *sc) 1463 { 1464 int i; 1465 1466 if (!(sc->bge_flags & BGE_RXRING_VALID)) 1467 return; 1468 1469 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1470 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 1471 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 1472 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 1473 bus_dmamap_destroy(sc->bge_dmatag, 1474 sc->bge_cdata.bge_rx_std_map[i]); 1475 } 1476 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0, 1477 sizeof(struct bge_rx_bd)); 1478 } 1479 1480 sc->bge_flags &= ~BGE_RXRING_VALID; 1481 } 1482 1483 static int 1484 bge_init_rx_ring_jumbo(struct bge_softc *sc) 1485 { 1486 int i; 1487 volatile struct bge_rcb *rcb; 1488 1489 if (sc->bge_flags & BGE_JUMBO_RXRING_VALID) 1490 return 0; 1491 1492 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1493 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 1494 return ENOBUFS; 1495 }; 1496 1497 sc->bge_jumbo = i - 1; 1498 sc->bge_flags |= BGE_JUMBO_RXRING_VALID; 1499 1500 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1501 rcb->bge_maxlen_flags = 0; 1502 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1503 1504 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1505 1506 return 0; 1507 } 1508 1509 static void 1510 bge_free_rx_ring_jumbo(struct bge_softc *sc) 1511 { 1512 int i; 1513 1514 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID)) 1515 return; 1516 1517 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1518 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 1519 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 1520 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 1521 } 1522 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0, 1523 sizeof(struct bge_rx_bd)); 1524 } 1525 1526 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID; 1527 } 1528 1529 static void 1530 bge_free_tx_ring(struct bge_softc *sc) 1531 { 1532 int i, freed; 1533 struct txdmamap_pool_entry *dma; 1534 1535 if (!(sc->bge_flags & BGE_TXRING_VALID)) 1536 return; 1537 1538 freed = 0; 1539 1540 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1541 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 1542 freed++; 1543 m_freem(sc->bge_cdata.bge_tx_chain[i]); 1544 sc->bge_cdata.bge_tx_chain[i] = NULL; 1545 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i], 1546 link); 1547 sc->txdma[i] = 0; 1548 } 1549 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0, 1550 sizeof(struct bge_tx_bd)); 1551 } 1552 1553 while ((dma = SLIST_FIRST(&sc->txdma_list))) { 1554 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 1555 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap); 1556 free(dma, M_DEVBUF); 1557 } 1558 1559 sc->bge_flags &= ~BGE_TXRING_VALID; 1560 } 1561 1562 static int 1563 bge_init_tx_ring(struct bge_softc *sc) 1564 { 1565 int i; 1566 bus_dmamap_t dmamap; 1567 struct txdmamap_pool_entry *dma; 1568 1569 if (sc->bge_flags & BGE_TXRING_VALID) 1570 return 0; 1571 1572 sc->bge_txcnt = 0; 1573 sc->bge_tx_saved_considx = 0; 1574 1575 /* Initialize transmit producer index for host-memory send ring. */ 1576 sc->bge_tx_prodidx = 0; 1577 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1578 /* 5700 b2 errata */ 1579 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 1580 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1581 1582 /* NIC-memory send ring not used; initialize to zero. */ 1583 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1584 /* 5700 b2 errata */ 1585 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 1586 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1587 1588 SLIST_INIT(&sc->txdma_list); 1589 for (i = 0; i < BGE_RSLOTS; i++) { 1590 if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX, 1591 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT, 1592 &dmamap)) 1593 return ENOBUFS; 1594 if (dmamap == NULL) 1595 panic("dmamap NULL in bge_init_tx_ring"); 1596 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT); 1597 if (dma == NULL) { 1598 aprint_error_dev(sc->bge_dev, 1599 "can't alloc txdmamap_pool_entry\n"); 1600 bus_dmamap_destroy(sc->bge_dmatag, dmamap); 1601 return ENOMEM; 1602 } 1603 dma->dmamap = dmamap; 1604 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 1605 } 1606 1607 sc->bge_flags |= BGE_TXRING_VALID; 1608 1609 return 0; 1610 } 1611 1612 static void 1613 bge_setmulti(struct bge_softc *sc) 1614 { 1615 struct ethercom *ac = &sc->ethercom; 1616 struct ifnet *ifp = &ac->ec_if; 1617 struct ether_multi *enm; 1618 struct ether_multistep step; 1619 uint32_t hashes[4] = { 0, 0, 0, 0 }; 1620 uint32_t h; 1621 int i; 1622 1623 if (ifp->if_flags & IFF_PROMISC) 1624 goto allmulti; 1625 1626 /* Now program new ones. */ 1627 ETHER_FIRST_MULTI(step, ac, enm); 1628 while (enm != NULL) { 1629 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1630 /* 1631 * We must listen to a range of multicast addresses. 1632 * For now, just accept all multicasts, rather than 1633 * trying to set only those filter bits needed to match 1634 * the range. (At this time, the only use of address 1635 * ranges is for IP multicast routing, for which the 1636 * range is big enough to require all bits set.) 1637 */ 1638 goto allmulti; 1639 } 1640 1641 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1642 1643 /* Just want the 7 least-significant bits. */ 1644 h &= 0x7f; 1645 1646 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1647 ETHER_NEXT_MULTI(step, enm); 1648 } 1649 1650 ifp->if_flags &= ~IFF_ALLMULTI; 1651 goto setit; 1652 1653 allmulti: 1654 ifp->if_flags |= IFF_ALLMULTI; 1655 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff; 1656 1657 setit: 1658 for (i = 0; i < 4; i++) 1659 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1660 } 1661 1662 static void 1663 bge_sig_pre_reset(struct bge_softc *sc, int type) 1664 { 1665 /* 1666 * Some chips don't like this so only do this if ASF is enabled 1667 */ 1668 if (sc->bge_asf_mode) 1669 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 1670 1671 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1672 switch (type) { 1673 case BGE_RESET_START: 1674 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 1675 break; 1676 case BGE_RESET_STOP: 1677 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 1678 break; 1679 } 1680 } 1681 } 1682 1683 static void 1684 bge_sig_post_reset(struct bge_softc *sc, int type) 1685 { 1686 1687 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1688 switch (type) { 1689 case BGE_RESET_START: 1690 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001); 1691 /* START DONE */ 1692 break; 1693 case BGE_RESET_STOP: 1694 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002); 1695 break; 1696 } 1697 } 1698 } 1699 1700 static void 1701 bge_sig_legacy(struct bge_softc *sc, int type) 1702 { 1703 1704 if (sc->bge_asf_mode) { 1705 switch (type) { 1706 case BGE_RESET_START: 1707 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 1708 break; 1709 case BGE_RESET_STOP: 1710 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 1711 break; 1712 } 1713 } 1714 } 1715 1716 static void 1717 bge_stop_fw(struct bge_softc *sc) 1718 { 1719 int i; 1720 1721 if (sc->bge_asf_mode) { 1722 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE); 1723 CSR_WRITE_4(sc, BGE_CPU_EVENT, 1724 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14)); 1725 1726 for (i = 0; i < 100; i++) { 1727 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14))) 1728 break; 1729 DELAY(10); 1730 } 1731 } 1732 } 1733 1734 static int 1735 bge_poll_fw(struct bge_softc *sc) 1736 { 1737 uint32_t val; 1738 int i; 1739 1740 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 1741 for (i = 0; i < BGE_TIMEOUT; i++) { 1742 val = CSR_READ_4(sc, BGE_VCPU_STATUS); 1743 if (val & BGE_VCPU_STATUS_INIT_DONE) 1744 break; 1745 DELAY(100); 1746 } 1747 if (i >= BGE_TIMEOUT) { 1748 aprint_error_dev(sc->bge_dev, "reset timed out\n"); 1749 return -1; 1750 } 1751 } else if ((sc->bge_flags & BGE_NO_EEPROM) == 0) { 1752 /* 1753 * Poll the value location we just wrote until 1754 * we see the 1's complement of the magic number. 1755 * This indicates that the firmware initialization 1756 * is complete. 1757 * XXX 1000ms for Flash and 10000ms for SEEPROM. 1758 */ 1759 for (i = 0; i < BGE_TIMEOUT; i++) { 1760 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 1761 if (val == ~BGE_MAGIC_NUMBER) 1762 break; 1763 DELAY(10); 1764 } 1765 1766 if (i >= BGE_TIMEOUT) { 1767 aprint_error_dev(sc->bge_dev, 1768 "firmware handshake timed out, val = %x\n", val); 1769 return -1; 1770 } 1771 } 1772 1773 return 0; 1774 } 1775 1776 /* 1777 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1778 * self-test results. 1779 */ 1780 static int 1781 bge_chipinit(struct bge_softc *sc) 1782 { 1783 int i; 1784 uint32_t dma_rw_ctl; 1785 1786 /* Set endianness before we access any non-PCI registers. */ 1787 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 1788 BGE_INIT); 1789 1790 /* Set power state to D0. */ 1791 bge_setpowerstate(sc, 0); 1792 1793 /* Clear the MAC control register */ 1794 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1795 1796 /* 1797 * Clear the MAC statistics block in the NIC's 1798 * internal memory. 1799 */ 1800 for (i = BGE_STATS_BLOCK; 1801 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 1802 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0); 1803 1804 for (i = BGE_STATUS_BLOCK; 1805 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 1806 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0); 1807 1808 /* Set up the PCI DMA control register. */ 1809 dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD; 1810 if (sc->bge_flags & BGE_PCIE) { 1811 /* Read watermark not used, 128 bytes for write. */ 1812 DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n", 1813 device_xname(sc->bge_dev))); 1814 dma_rw_ctl |= (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1815 } else if (sc->bge_flags & BGE_PCIX) { 1816 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", 1817 device_xname(sc->bge_dev))); 1818 /* PCI-X bus */ 1819 if (BGE_IS_5714_FAMILY(sc)) { 1820 /* 256 bytes for read and write. */ 1821 dma_rw_ctl |= (0x02 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1822 (0x02 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1823 1824 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780) 1825 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 1826 else 1827 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL; 1828 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 1829 /* 1536 bytes for read, 384 bytes for write. */ 1830 dma_rw_ctl |= 1831 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1832 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1833 } else { 1834 /* 384 bytes for read and write. */ 1835 dma_rw_ctl |= (0x03 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1836 (0x03 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1837 (0x0F); 1838 } 1839 1840 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 1841 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 1842 uint32_t tmp; 1843 1844 /* Set ONEDMA_ATONCE for hardware workaround. */ 1845 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; 1846 if (tmp == 6 || tmp == 7) 1847 dma_rw_ctl |= 1848 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 1849 1850 /* Set PCI-X DMA write workaround. */ 1851 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE; 1852 } 1853 } else { 1854 /* Conventional PCI bus: 256 bytes for read and write. */ 1855 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", 1856 device_xname(sc->bge_dev))); 1857 dma_rw_ctl |= (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1858 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1859 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 && 1860 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750) 1861 dma_rw_ctl |= 0x0F; 1862 } 1863 1864 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 1865 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) 1866 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM | 1867 BGE_PCIDMARWCTL_ASRT_ALL_BE; 1868 1869 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 1870 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 1871 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 1872 1873 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, 1874 dma_rw_ctl); 1875 1876 /* 1877 * Set up general mode register. 1878 */ 1879 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS | 1880 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS | 1881 BGE_MODECTL_TX_NO_PHDR_CSUM); 1882 1883 /* 1884 * BCM5701 B5 have a bug causing data corruption when using 1885 * 64-bit DMA reads, which can be terminated early and then 1886 * completed later as 32-bit accesses, in combination with 1887 * certain bridges. 1888 */ 1889 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 && 1890 sc->bge_chipid == BGE_CHIPID_BCM5701_B5) 1891 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32); 1892 1893 /* 1894 * Tell the firmware the driver is running 1895 */ 1896 if (sc->bge_asf_mode & ASF_STACKUP) 1897 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 1898 1899 /* 1900 * Disable memory write invalidate. Apparently it is not supported 1901 * properly by these devices. 1902 */ 1903 PCI_CLRBIT(sc->sc_pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, 1904 PCI_COMMAND_INVALIDATE_ENABLE); 1905 1906 #ifdef __brokenalpha__ 1907 /* 1908 * Must insure that we do not cross an 8K (bytes) boundary 1909 * for DMA reads. Our highest limit is 1K bytes. This is a 1910 * restriction on some ALPHA platforms with early revision 1911 * 21174 PCI chipsets, such as the AlphaPC 164lx 1912 */ 1913 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 1914 #endif 1915 1916 /* Set the timer prescaler (always 66MHz) */ 1917 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1918 1919 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 1920 DELAY(40); /* XXX */ 1921 1922 /* Put PHY into ready state */ 1923 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); 1924 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */ 1925 DELAY(40); 1926 } 1927 1928 return 0; 1929 } 1930 1931 static int 1932 bge_blockinit(struct bge_softc *sc) 1933 { 1934 volatile struct bge_rcb *rcb; 1935 bus_size_t rcb_addr; 1936 int i; 1937 struct ifnet *ifp = &sc->ethercom.ec_if; 1938 bge_hostaddr taddr; 1939 uint32_t val; 1940 1941 /* 1942 * Initialize the memory window pointer register so that 1943 * we can access the first 32K of internal NIC RAM. This will 1944 * allow us to set up the TX send ring RCBs and the RX return 1945 * ring RCBs, plus other things which live in NIC memory. 1946 */ 1947 1948 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0); 1949 1950 /* Step 33: Configure mbuf memory pool */ 1951 if (BGE_IS_5700_FAMILY(sc)) { 1952 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1953 BGE_BUFFPOOL_1); 1954 1955 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 1956 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1957 else 1958 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1959 1960 /* Configure DMA resource pool */ 1961 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1962 BGE_DMA_DESCRIPTORS); 1963 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1964 } 1965 1966 /* Step 35: Configure mbuf pool watermarks */ 1967 #ifdef ORIG_WPAUL_VALUES 1968 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24); 1969 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24); 1970 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48); 1971 #else 1972 1973 /* new broadcom docs strongly recommend these: */ 1974 if (!BGE_IS_5705_PLUS(sc)) { 1975 if (ifp->if_mtu > ETHER_MAX_LEN) { 1976 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1977 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1978 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1979 } else { 1980 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 304); 1981 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 152); 1982 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 380); 1983 } 1984 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 1985 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1986 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); 1987 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); 1988 } else { 1989 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1990 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1991 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1992 } 1993 #endif 1994 1995 /* Step 36: Configure DMA resource watermarks */ 1996 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1997 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1998 1999 /* Step 38: Enable buffer manager */ 2000 CSR_WRITE_4(sc, BGE_BMAN_MODE, 2001 BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN); 2002 2003 /* Step 39: Poll for buffer manager start indication */ 2004 for (i = 0; i < BGE_TIMEOUT * 2; i++) { 2005 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 2006 break; 2007 DELAY(10); 2008 } 2009 2010 if (i == BGE_TIMEOUT * 2) { 2011 aprint_error_dev(sc->bge_dev, 2012 "buffer manager failed to start\n"); 2013 return ENXIO; 2014 } 2015 2016 /* Step 40: Enable flow-through queues */ 2017 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 2018 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 2019 2020 /* Wait until queue initialization is complete */ 2021 for (i = 0; i < BGE_TIMEOUT * 2; i++) { 2022 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 2023 break; 2024 DELAY(10); 2025 } 2026 2027 if (i == BGE_TIMEOUT * 2) { 2028 aprint_error_dev(sc->bge_dev, 2029 "flow-through queue init failed\n"); 2030 return ENXIO; 2031 } 2032 2033 /* Step 41: Initialize the standard RX ring control block */ 2034 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 2035 BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); 2036 if (BGE_IS_5705_PLUS(sc)) 2037 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 2038 else 2039 rcb->bge_maxlen_flags = 2040 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 2041 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 2042 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 2043 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 2044 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 2045 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 2046 2047 /* 2048 * Step 42: Initialize the jumbo RX ring control block 2049 * We set the 'ring disabled' bit in the flags 2050 * field until we're actually ready to start 2051 * using this ring (i.e. once we set the MTU 2052 * high enough to require it). 2053 */ 2054 if (BGE_IS_JUMBO_CAPABLE(sc)) { 2055 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 2056 BGE_HOSTADDR(rcb->bge_hostaddr, 2057 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); 2058 rcb->bge_maxlen_flags = 2059 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 2060 BGE_RCB_FLAG_RING_DISABLED); 2061 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 2062 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 2063 rcb->bge_hostaddr.bge_addr_hi); 2064 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 2065 rcb->bge_hostaddr.bge_addr_lo); 2066 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 2067 rcb->bge_maxlen_flags); 2068 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 2069 2070 /* Set up dummy disabled mini ring RCB */ 2071 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 2072 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 2073 BGE_RCB_FLAG_RING_DISABLED); 2074 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 2075 rcb->bge_maxlen_flags); 2076 2077 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2078 offsetof(struct bge_ring_data, bge_info), 2079 sizeof (struct bge_gib), 2080 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2081 } 2082 2083 /* 2084 * Set the BD ring replenish thresholds. The recommended 2085 * values are 1/8th the number of descriptors allocated to 2086 * each ring. 2087 */ 2088 i = BGE_STD_RX_RING_CNT / 8; 2089 2090 /* 2091 * Use a value of 8 for the following chips to workaround HW errata. 2092 * Some of these chips have been added based on empirical 2093 * evidence (they don't work unless this is done). 2094 */ 2095 if (BGE_IS_5705_PLUS(sc)) 2096 i = 8; 2097 2098 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, i); 2099 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT / 8); 2100 2101 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2102 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765) { 2103 CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4); 2104 CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4); 2105 } 2106 2107 /* 2108 * Disable all unused send rings by setting the 'ring disabled' 2109 * bit in the flags field of all the TX send ring control blocks. 2110 * These are located in NIC memory. 2111 */ 2112 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 2113 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 2114 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2115 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 2116 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 2117 rcb_addr += sizeof(struct bge_rcb); 2118 } 2119 2120 /* Configure TX RCB 0 (we use only the first ring) */ 2121 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 2122 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 2123 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 2124 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 2125 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 2126 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 2127 if (BGE_IS_5700_FAMILY(sc)) 2128 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2129 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 2130 2131 /* Disable all unused RX return rings */ 2132 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 2133 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 2134 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 2135 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 2136 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2137 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 2138 BGE_RCB_FLAG_RING_DISABLED)); 2139 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 2140 bge_writembx(sc, BGE_MBX_RX_CONS0_LO + 2141 (i * (sizeof(uint64_t))), 0); 2142 rcb_addr += sizeof(struct bge_rcb); 2143 } 2144 2145 /* Initialize RX ring indexes */ 2146 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); 2147 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 2148 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 2149 2150 /* 2151 * Set up RX return ring 0 2152 * Note that the NIC address for RX return rings is 0x00000000. 2153 * The return rings live entirely within the host, so the 2154 * nicaddr field in the RCB isn't used. 2155 */ 2156 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 2157 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 2158 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 2159 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 2160 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 2161 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2162 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 2163 2164 /* Set random backoff seed for TX */ 2165 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 2166 CLLADDR(ifp->if_sadl)[0] + CLLADDR(ifp->if_sadl)[1] + 2167 CLLADDR(ifp->if_sadl)[2] + CLLADDR(ifp->if_sadl)[3] + 2168 CLLADDR(ifp->if_sadl)[4] + CLLADDR(ifp->if_sadl)[5] + 2169 BGE_TX_BACKOFF_SEED_MASK); 2170 2171 /* Set inter-packet gap */ 2172 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 2173 2174 /* 2175 * Specify which ring to use for packets that don't match 2176 * any RX rules. 2177 */ 2178 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 2179 2180 /* 2181 * Configure number of RX lists. One interrupt distribution 2182 * list, sixteen active lists, one bad frames class. 2183 */ 2184 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 2185 2186 /* Inialize RX list placement stats mask. */ 2187 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 2188 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 2189 2190 /* Disable host coalescing until we get it set up */ 2191 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 2192 2193 /* Poll to make sure it's shut down. */ 2194 for (i = 0; i < BGE_TIMEOUT * 2; i++) { 2195 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 2196 break; 2197 DELAY(10); 2198 } 2199 2200 if (i == BGE_TIMEOUT * 2) { 2201 aprint_error_dev(sc->bge_dev, 2202 "host coalescing engine failed to idle\n"); 2203 return ENXIO; 2204 } 2205 2206 /* Set up host coalescing defaults */ 2207 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 2208 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 2209 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 2210 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 2211 if (BGE_IS_5700_FAMILY(sc)) { 2212 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 2213 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 2214 } 2215 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 2216 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 2217 2218 /* Set up address of statistics block */ 2219 if (BGE_IS_5700_FAMILY(sc)) { 2220 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 2221 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 2222 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 2223 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi); 2224 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo); 2225 } 2226 2227 /* Set up address of status block */ 2228 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block)); 2229 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 2230 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi); 2231 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo); 2232 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 2233 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 2234 2235 /* Turn on host coalescing state machine */ 2236 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 2237 2238 /* Turn on RX BD completion state machine and enable attentions */ 2239 CSR_WRITE_4(sc, BGE_RBDC_MODE, 2240 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN); 2241 2242 /* Turn on RX list placement state machine */ 2243 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 2244 2245 /* Turn on RX list selector state machine. */ 2246 if (BGE_IS_5700_FAMILY(sc)) 2247 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 2248 2249 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | 2250 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | 2251 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | 2252 BGE_MACMODE_FRMHDR_DMA_ENB; 2253 2254 if (sc->bge_flags & BGE_PHY_FIBER_TBI) 2255 val |= BGE_PORTMODE_TBI; 2256 else if (sc->bge_flags & BGE_PHY_FIBER_MII) 2257 val |= BGE_PORTMODE_GMII; 2258 else 2259 val |= BGE_PORTMODE_MII; 2260 2261 /* Turn on DMA, clear stats */ 2262 CSR_WRITE_4(sc, BGE_MAC_MODE, val); 2263 2264 /* Set misc. local control, enable interrupts on attentions */ 2265 sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM; 2266 2267 #ifdef notdef 2268 /* Assert GPIO pins for PHY reset */ 2269 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 2270 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 2271 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 2272 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 2273 #endif 2274 2275 #if defined(not_quite_yet) 2276 /* Linux driver enables enable gpio pin #1 on 5700s */ 2277 if (sc->bge_chipid == BGE_CHIPID_BCM5700) { 2278 sc->bge_local_ctrl_reg |= 2279 (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1); 2280 } 2281 #endif 2282 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 2283 2284 /* Turn on DMA completion state machine */ 2285 if (BGE_IS_5700_FAMILY(sc)) 2286 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 2287 2288 /* Turn on write DMA state machine */ 2289 { 2290 uint32_t bge_wdma_mode = 2291 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS; 2292 2293 /* Enable host coalescing bug fix; see Linux tg3.c */ 2294 if (BGE_IS_5755_PLUS(sc)) 2295 bge_wdma_mode |= BGE_WDMAMODE_STATUS_TAG_FIX; 2296 2297 CSR_WRITE_4(sc, BGE_WDMA_MODE, bge_wdma_mode); 2298 } 2299 2300 /* Turn on read DMA state machine */ 2301 { 2302 uint32_t dma_read_modebits; 2303 2304 dma_read_modebits = 2305 BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 2306 2307 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2308 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 2309 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) 2310 dma_read_modebits |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | 2311 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | 2312 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; 2313 2314 if (sc->bge_flags & BGE_PCIE) 2315 dma_read_modebits |= BGE_RDMA_MODE_FIFO_LONG_BURST; 2316 if (sc->bge_flags & BGE_TSO) 2317 dma_read_modebits |= BGE_RDMAMODE_TSO4_ENABLE; 2318 CSR_WRITE_4(sc, BGE_RDMA_MODE, dma_read_modebits); 2319 delay(40); 2320 } 2321 2322 /* Turn on RX data completion state machine */ 2323 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 2324 2325 /* Turn on RX BD initiator state machine */ 2326 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 2327 2328 /* Turn on RX data and RX BD initiator state machine */ 2329 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 2330 2331 /* Turn on Mbuf cluster free state machine */ 2332 if (BGE_IS_5700_FAMILY(sc)) 2333 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 2334 2335 /* Turn on send BD completion state machine */ 2336 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 2337 2338 /* Turn on send data completion state machine */ 2339 val = BGE_SDCMODE_ENABLE; 2340 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 2341 val |= BGE_SDCMODE_CDELAY; 2342 CSR_WRITE_4(sc, BGE_SDC_MODE, val); 2343 2344 /* Turn on send data initiator state machine */ 2345 if (sc->bge_flags & BGE_TSO) { 2346 /* XXX: magic value from Linux driver */ 2347 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08); 2348 } else 2349 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 2350 2351 /* Turn on send BD initiator state machine */ 2352 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 2353 2354 /* Turn on send BD selector state machine */ 2355 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 2356 2357 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 2358 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 2359 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER); 2360 2361 /* ack/clear link change events */ 2362 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 2363 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 2364 BGE_MACSTAT_LINK_CHANGED); 2365 CSR_WRITE_4(sc, BGE_MI_STS, 0); 2366 2367 /* Enable PHY auto polling (for MII/GMII only) */ 2368 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 2369 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 2370 } else { 2371 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 2372 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL | (10 << 16)); 2373 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) 2374 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 2375 BGE_EVTENB_MI_INTERRUPT); 2376 } 2377 2378 /* 2379 * Clear any pending link state attention. 2380 * Otherwise some link state change events may be lost until attention 2381 * is cleared by bge_intr() -> bge_link_upd() sequence. 2382 * It's not necessary on newer BCM chips - perhaps enabling link 2383 * state change attentions implies clearing pending attention. 2384 */ 2385 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 2386 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 2387 BGE_MACSTAT_LINK_CHANGED); 2388 2389 /* Enable link state change attentions. */ 2390 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 2391 2392 return 0; 2393 } 2394 2395 static const struct bge_revision * 2396 bge_lookup_rev(uint32_t chipid) 2397 { 2398 const struct bge_revision *br; 2399 2400 for (br = bge_revisions; br->br_name != NULL; br++) { 2401 if (br->br_chipid == chipid) 2402 return br; 2403 } 2404 2405 for (br = bge_majorrevs; br->br_name != NULL; br++) { 2406 if (br->br_chipid == BGE_ASICREV(chipid)) 2407 return br; 2408 } 2409 2410 return NULL; 2411 } 2412 2413 static const struct bge_product * 2414 bge_lookup(const struct pci_attach_args *pa) 2415 { 2416 const struct bge_product *bp; 2417 2418 for (bp = bge_products; bp->bp_name != NULL; bp++) { 2419 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor && 2420 PCI_PRODUCT(pa->pa_id) == bp->bp_product) 2421 return bp; 2422 } 2423 2424 return NULL; 2425 } 2426 2427 static int 2428 bge_setpowerstate(struct bge_softc *sc, int powerlevel) 2429 { 2430 #ifdef NOTYET 2431 uint32_t pm_ctl = 0; 2432 2433 /* XXX FIXME: make sure indirect accesses enabled? */ 2434 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4); 2435 pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS; 2436 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4); 2437 2438 /* clear the PME_assert bit and power state bits, enable PME */ 2439 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2); 2440 pm_ctl &= ~PCIM_PSTAT_DMASK; 2441 pm_ctl |= (1 << 8); 2442 2443 if (powerlevel == 0) { 2444 pm_ctl |= PCIM_PSTAT_D0; 2445 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2446 pm_ctl, 2); 2447 DELAY(10000); 2448 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 2449 DELAY(10000); 2450 2451 #ifdef NOTYET 2452 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */ 2453 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02); 2454 #endif 2455 DELAY(40); DELAY(40); DELAY(40); 2456 DELAY(10000); /* above not quite adequate on 5700 */ 2457 return 0; 2458 } 2459 2460 2461 /* 2462 * Entering ACPI power states D1-D3 is achieved by wiggling 2463 * GMII gpio pins. Example code assumes all hardware vendors 2464 * followed Broadcom's sample pcb layout. Until we verify that 2465 * for all supported OEM cards, states D1-D3 are unsupported. 2466 */ 2467 aprint_error_dev(sc->bge_dev, 2468 "power state %d unimplemented; check GPIO pins\n", 2469 powerlevel); 2470 #endif 2471 return EOPNOTSUPP; 2472 } 2473 2474 2475 /* 2476 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 2477 * against our list and return its name if we find a match. Note 2478 * that since the Broadcom controller contains VPD support, we 2479 * can get the device name string from the controller itself instead 2480 * of the compiled-in string. This is a little slow, but it guarantees 2481 * we'll always announce the right product name. 2482 */ 2483 static int 2484 bge_probe(device_t parent, cfdata_t match, void *aux) 2485 { 2486 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 2487 2488 if (bge_lookup(pa) != NULL) 2489 return 1; 2490 2491 return 0; 2492 } 2493 2494 static void 2495 bge_attach(device_t parent, device_t self, void *aux) 2496 { 2497 struct bge_softc *sc = device_private(self); 2498 struct pci_attach_args *pa = aux; 2499 prop_dictionary_t dict; 2500 const struct bge_product *bp; 2501 const struct bge_revision *br; 2502 pci_chipset_tag_t pc; 2503 pci_intr_handle_t ih; 2504 const char *intrstr = NULL; 2505 bus_dma_segment_t seg; 2506 int rseg; 2507 uint32_t hwcfg = 0; 2508 uint32_t command; 2509 struct ifnet *ifp; 2510 uint32_t misccfg; 2511 void * kva; 2512 u_char eaddr[ETHER_ADDR_LEN]; 2513 pcireg_t memtype, subid; 2514 bus_addr_t memaddr; 2515 bus_size_t memsize; 2516 uint32_t pm_ctl; 2517 bool no_seeprom; 2518 2519 bp = bge_lookup(pa); 2520 KASSERT(bp != NULL); 2521 2522 sc->sc_pc = pa->pa_pc; 2523 sc->sc_pcitag = pa->pa_tag; 2524 sc->bge_dev = self; 2525 2526 pc = sc->sc_pc; 2527 subid = pci_conf_read(pc, sc->sc_pcitag, PCI_SUBSYS_ID_REG); 2528 2529 aprint_naive(": Ethernet controller\n"); 2530 aprint_normal(": %s\n", bp->bp_name); 2531 2532 /* 2533 * Map control/status registers. 2534 */ 2535 DPRINTFN(5, ("Map control/status regs\n")); 2536 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 2537 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 2538 pci_conf_write(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, command); 2539 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 2540 2541 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 2542 aprint_error_dev(sc->bge_dev, 2543 "failed to enable memory mapping!\n"); 2544 return; 2545 } 2546 2547 DPRINTFN(5, ("pci_mem_find\n")); 2548 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_pcitag, BGE_PCI_BAR0); 2549 switch (memtype) { 2550 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 2551 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 2552 if (pci_mapreg_map(pa, BGE_PCI_BAR0, 2553 memtype, 0, &sc->bge_btag, &sc->bge_bhandle, 2554 &memaddr, &memsize) == 0) 2555 break; 2556 default: 2557 aprint_error_dev(sc->bge_dev, "can't find mem space\n"); 2558 return; 2559 } 2560 2561 DPRINTFN(5, ("pci_intr_map\n")); 2562 if (pci_intr_map(pa, &ih)) { 2563 aprint_error_dev(sc->bge_dev, "couldn't map interrupt\n"); 2564 return; 2565 } 2566 2567 DPRINTFN(5, ("pci_intr_string\n")); 2568 intrstr = pci_intr_string(pc, ih); 2569 2570 DPRINTFN(5, ("pci_intr_establish\n")); 2571 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc); 2572 2573 if (sc->bge_intrhand == NULL) { 2574 aprint_error_dev(sc->bge_dev, 2575 "couldn't establish interrupt%s%s\n", 2576 intrstr ? " at " : "", intrstr ? intrstr : ""); 2577 return; 2578 } 2579 aprint_normal_dev(sc->bge_dev, "interrupting at %s\n", intrstr); 2580 2581 /* 2582 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) 2583 * can clobber the chip's PCI config-space power control registers, 2584 * leaving the card in D3 powersave state. 2585 * We do not have memory-mapped registers in this state, 2586 * so force device into D0 state before starting initialization. 2587 */ 2588 pm_ctl = pci_conf_read(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD); 2589 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3); 2590 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */ 2591 pci_conf_write(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD, pm_ctl); 2592 DELAY(1000); /* 27 usec is allegedly sufficent */ 2593 2594 /* 2595 * Save ASIC rev. 2596 */ 2597 sc->bge_chipid = 2598 pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) 2599 >> BGE_PCIMISCCTL_ASICREV_SHIFT; 2600 2601 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) { 2602 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5717 || 2603 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5718 || 2604 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5724) 2605 sc->bge_chipid = pci_conf_read(pc, pa->pa_tag, 2606 BGE_PCI_GEN2_PRODID_ASICREV); 2607 else if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57761 || 2608 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57765 || 2609 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57781 || 2610 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57785 || 2611 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57791 || 2612 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57795) 2613 sc->bge_chipid = pci_conf_read(pc, pa->pa_tag, 2614 BGE_PCI_GEN15_PRODID_ASICREV); 2615 else 2616 sc->bge_chipid = pci_conf_read(pc, pa->pa_tag, 2617 BGE_PCI_PRODID_ASICREV); 2618 } 2619 2620 if ((pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PCIEXPRESS, 2621 &sc->bge_pciecap, NULL) != 0) 2622 || (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785)) { 2623 /* PCIe */ 2624 sc->bge_flags |= BGE_PCIE; 2625 bge_set_max_readrq(sc); 2626 } else if ((pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE) & 2627 BGE_PCISTATE_PCI_BUSMODE) == 0) { 2628 /* PCI-X */ 2629 sc->bge_flags |= BGE_PCIX; 2630 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIX, 2631 &sc->bge_pcixcap, NULL) == 0) 2632 aprint_error_dev(sc->bge_dev, 2633 "unable to find PCIX capability\n"); 2634 } 2635 2636 /* chipid */ 2637 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 2638 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 || 2639 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 2640 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 2641 sc->bge_flags |= BGE_5700_FAMILY; 2642 2643 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 || 2644 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780 || 2645 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714) 2646 sc->bge_flags |= BGE_5714_FAMILY; 2647 2648 /* Intentionally exclude BGE_ASICREV_BCM5906 */ 2649 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2650 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 2651 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 2652 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2653 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 2654 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787 || 2655 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 || 2656 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) 2657 sc->bge_flags |= BGE_5755_PLUS; 2658 2659 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 || 2660 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || 2661 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 || 2662 BGE_IS_5755_PLUS(sc) || 2663 BGE_IS_5714_FAMILY(sc)) 2664 sc->bge_flags |= BGE_5750_PLUS; 2665 2666 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 || 2667 BGE_IS_5750_OR_BEYOND(sc)) 2668 sc->bge_flags |= BGE_5705_PLUS; 2669 2670 /* 2671 * When using the BCM5701 in PCI-X mode, data corruption has 2672 * been observed in the first few bytes of some received packets. 2673 * Aligning the packet buffer in memory eliminates the corruption. 2674 * Unfortunately, this misaligns the packet payloads. On platforms 2675 * which do not support unaligned accesses, we will realign the 2676 * payloads by copying the received packets. 2677 */ 2678 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 && 2679 sc->bge_flags & BGE_PCIX) 2680 sc->bge_flags |= BGE_RX_ALIGNBUG; 2681 2682 if (BGE_IS_5700_FAMILY(sc)) 2683 sc->bge_flags |= BGE_JUMBO_CAPABLE; 2684 2685 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 2686 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) && 2687 PCI_VENDOR(subid) == PCI_VENDOR_DELL) 2688 sc->bge_flags |= BGE_NO_3LED; 2689 2690 misccfg = CSR_READ_4(sc, BGE_MISC_CFG); 2691 misccfg &= BGE_MISCCFG_BOARD_ID_MASK; 2692 2693 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 2694 (misccfg == BGE_MISCCFG_BOARD_ID_5788 || 2695 misccfg == BGE_MISCCFG_BOARD_ID_5788M)) 2696 sc->bge_flags |= BGE_IS_5788; 2697 2698 /* 2699 * Some controllers seem to require a special firmware to use 2700 * TSO. But the firmware is not available to FreeBSD and Linux 2701 * claims that the TSO performed by the firmware is slower than 2702 * hardware based TSO. Moreover the firmware based TSO has one 2703 * known bug which can't handle TSO if ethernet header + IP/TCP 2704 * header is greater than 80 bytes. The workaround for the TSO 2705 * bug exist but it seems it's too expensive than not using 2706 * TSO at all. Some hardwares also have the TSO bug so limit 2707 * the TSO to the controllers that are not affected TSO issues 2708 * (e.g. 5755 or higher). 2709 */ 2710 if (BGE_IS_5755_PLUS(sc)) { 2711 /* 2712 * BCM5754 and BCM5787 shares the same ASIC id so 2713 * explicit device id check is required. 2714 */ 2715 if ((PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754) && 2716 (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754M)) 2717 sc->bge_flags |= BGE_TSO; 2718 } 2719 2720 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 && 2721 (misccfg == 0x4000 || misccfg == 0x8000)) || 2722 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 2723 PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && 2724 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 || 2725 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 || 2726 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) || 2727 (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && 2728 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F || 2729 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F || 2730 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) || 2731 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 || 2732 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 2733 sc->bge_flags |= BGE_10_100_ONLY; 2734 2735 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 2736 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 2737 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && 2738 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) || 2739 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 2740 sc->bge_flags |= BGE_NO_ETH_WIRE_SPEED; 2741 2742 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || 2743 sc->bge_chipid == BGE_CHIPID_BCM5701_B0) 2744 sc->bge_flags |= BGE_PHY_CRC_BUG; 2745 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX || 2746 BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX) 2747 sc->bge_flags |= BGE_PHY_ADC_BUG; 2748 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) 2749 sc->bge_flags |= BGE_PHY_5704_A0_BUG; 2750 2751 if (BGE_IS_5705_PLUS(sc) && 2752 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 && 2753 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 && 2754 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 && 2755 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57765 && 2756 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780) { 2757 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 2758 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 2759 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2760 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) { 2761 if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 && 2762 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756) 2763 sc->bge_flags |= BGE_PHY_JITTER_BUG; 2764 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M) 2765 sc->bge_flags |= BGE_PHY_ADJUST_TRIM; 2766 } else if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906) 2767 sc->bge_flags |= BGE_PHY_BER_BUG; 2768 } 2769 2770 /* 2771 * SEEPROM check. 2772 * First check if firmware knows we do not have SEEPROM. 2773 */ 2774 if (prop_dictionary_get_bool(device_properties(self), 2775 "without-seeprom", &no_seeprom) && no_seeprom) 2776 sc->bge_flags |= BGE_NO_EEPROM; 2777 2778 /* Now check the 'ROM failed' bit on the RX CPU */ 2779 else if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) 2780 sc->bge_flags |= BGE_NO_EEPROM; 2781 2782 /* Try to reset the chip. */ 2783 DPRINTFN(5, ("bge_reset\n")); 2784 bge_reset(sc); 2785 2786 sc->bge_asf_mode = 0; 2787 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) 2788 == BGE_MAGIC_NUMBER)) { 2789 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG) 2790 & BGE_HWCFG_ASF) { 2791 sc->bge_asf_mode |= ASF_ENABLE; 2792 sc->bge_asf_mode |= ASF_STACKUP; 2793 if (BGE_IS_5750_OR_BEYOND(sc)) { 2794 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; 2795 } 2796 } 2797 } 2798 2799 /* Try to reset the chip again the nice way. */ 2800 bge_stop_fw(sc); 2801 bge_sig_pre_reset(sc, BGE_RESET_STOP); 2802 if (bge_reset(sc)) 2803 aprint_error_dev(sc->bge_dev, "chip reset failed\n"); 2804 2805 bge_sig_legacy(sc, BGE_RESET_STOP); 2806 bge_sig_post_reset(sc, BGE_RESET_STOP); 2807 2808 if (bge_chipinit(sc)) { 2809 aprint_error_dev(sc->bge_dev, "chip initialization failed\n"); 2810 bge_release_resources(sc); 2811 return; 2812 } 2813 2814 /* 2815 * Get station address from the EEPROM 2816 */ 2817 if (bge_get_eaddr(sc, eaddr)) { 2818 aprint_error_dev(sc->bge_dev, 2819 "failed to read station address\n"); 2820 bge_release_resources(sc); 2821 return; 2822 } 2823 2824 br = bge_lookup_rev(sc->bge_chipid); 2825 2826 if (br == NULL) { 2827 aprint_normal_dev(sc->bge_dev, "unknown ASIC (0x%x)", 2828 sc->bge_chipid); 2829 } else { 2830 aprint_normal_dev(sc->bge_dev, "ASIC %s (0x%x)", 2831 br->br_name, sc->bge_chipid); 2832 } 2833 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr)); 2834 2835 /* Allocate the general information block and ring buffers. */ 2836 if (pci_dma64_available(pa)) 2837 sc->bge_dmatag = pa->pa_dmat64; 2838 else 2839 sc->bge_dmatag = pa->pa_dmat; 2840 DPRINTFN(5, ("bus_dmamem_alloc\n")); 2841 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 2842 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 2843 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n"); 2844 return; 2845 } 2846 DPRINTFN(5, ("bus_dmamem_map\n")); 2847 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, 2848 sizeof(struct bge_ring_data), &kva, 2849 BUS_DMA_NOWAIT)) { 2850 aprint_error_dev(sc->bge_dev, 2851 "can't map DMA buffers (%zu bytes)\n", 2852 sizeof(struct bge_ring_data)); 2853 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2854 return; 2855 } 2856 DPRINTFN(5, ("bus_dmamem_create\n")); 2857 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 2858 sizeof(struct bge_ring_data), 0, 2859 BUS_DMA_NOWAIT, &sc->bge_ring_map)) { 2860 aprint_error_dev(sc->bge_dev, "can't create DMA map\n"); 2861 bus_dmamem_unmap(sc->bge_dmatag, kva, 2862 sizeof(struct bge_ring_data)); 2863 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2864 return; 2865 } 2866 DPRINTFN(5, ("bus_dmamem_load\n")); 2867 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 2868 sizeof(struct bge_ring_data), NULL, 2869 BUS_DMA_NOWAIT)) { 2870 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 2871 bus_dmamem_unmap(sc->bge_dmatag, kva, 2872 sizeof(struct bge_ring_data)); 2873 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2874 return; 2875 } 2876 2877 DPRINTFN(5, ("bzero\n")); 2878 sc->bge_rdata = (struct bge_ring_data *)kva; 2879 2880 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data)); 2881 2882 /* Try to allocate memory for jumbo buffers. */ 2883 if (BGE_IS_JUMBO_CAPABLE(sc)) { 2884 if (bge_alloc_jumbo_mem(sc)) { 2885 aprint_error_dev(sc->bge_dev, 2886 "jumbo buffer allocation failed\n"); 2887 } else 2888 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 2889 } 2890 2891 /* Set default tuneable values. */ 2892 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2893 sc->bge_rx_coal_ticks = 150; 2894 sc->bge_rx_max_coal_bds = 64; 2895 #ifdef ORIG_WPAUL_VALUES 2896 sc->bge_tx_coal_ticks = 150; 2897 sc->bge_tx_max_coal_bds = 128; 2898 #else 2899 sc->bge_tx_coal_ticks = 300; 2900 sc->bge_tx_max_coal_bds = 400; 2901 #endif 2902 if (BGE_IS_5705_PLUS(sc)) { 2903 sc->bge_tx_coal_ticks = (12 * 5); 2904 sc->bge_tx_max_coal_bds = (12 * 5); 2905 aprint_verbose_dev(sc->bge_dev, 2906 "setting short Tx thresholds\n"); 2907 } 2908 2909 if (BGE_IS_5705_PLUS(sc)) 2910 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 2911 else 2912 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 2913 2914 /* Set up ifnet structure */ 2915 ifp = &sc->ethercom.ec_if; 2916 ifp->if_softc = sc; 2917 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2918 ifp->if_ioctl = bge_ioctl; 2919 ifp->if_stop = bge_stop; 2920 ifp->if_start = bge_start; 2921 ifp->if_init = bge_init; 2922 ifp->if_watchdog = bge_watchdog; 2923 IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN)); 2924 IFQ_SET_READY(&ifp->if_snd); 2925 DPRINTFN(5, ("strcpy if_xname\n")); 2926 strcpy(ifp->if_xname, device_xname(sc->bge_dev)); 2927 2928 if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) 2929 sc->ethercom.ec_if.if_capabilities |= 2930 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx; 2931 #if 1 /* XXX TCP/UDP checksum offload breaks with pf(4) */ 2932 sc->ethercom.ec_if.if_capabilities |= 2933 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 2934 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 2935 #endif 2936 sc->ethercom.ec_capabilities |= 2937 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 2938 2939 if (sc->bge_flags & BGE_TSO) 2940 sc->ethercom.ec_if.if_capabilities |= IFCAP_TSOv4; 2941 2942 /* 2943 * Do MII setup. 2944 */ 2945 DPRINTFN(5, ("mii setup\n")); 2946 sc->bge_mii.mii_ifp = ifp; 2947 sc->bge_mii.mii_readreg = bge_miibus_readreg; 2948 sc->bge_mii.mii_writereg = bge_miibus_writereg; 2949 sc->bge_mii.mii_statchg = bge_miibus_statchg; 2950 2951 /* 2952 * Figure out what sort of media we have by checking the 2953 * hardware config word in the first 32k of NIC internal memory, 2954 * or fall back to the config word in the EEPROM. Note: on some BCM5700 2955 * cards, this value appears to be unset. If that's the 2956 * case, we have to rely on identifying the NIC by its PCI 2957 * subsystem ID, as we do below for the SysKonnect SK-9D41. 2958 */ 2959 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) { 2960 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2961 } else if (!(sc->bge_flags & BGE_NO_EEPROM)) { 2962 bge_read_eeprom(sc, (void *)&hwcfg, 2963 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 2964 hwcfg = be32toh(hwcfg); 2965 } 2966 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2967 if (PCI_PRODUCT(pa->pa_id) == SK_SUBSYSID_9D41 || 2968 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) { 2969 if (BGE_IS_5714_FAMILY(sc)) 2970 sc->bge_flags |= BGE_PHY_FIBER_MII; 2971 else 2972 sc->bge_flags |= BGE_PHY_FIBER_TBI; 2973 } 2974 2975 /* set phyflags and chipid before mii_attach() */ 2976 dict = device_properties(self); 2977 prop_dictionary_set_uint32(dict, "phyflags", sc->bge_flags); 2978 prop_dictionary_set_uint32(dict, "chipid", sc->bge_chipid); 2979 2980 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 2981 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 2982 bge_ifmedia_sts); 2983 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER |IFM_1000_SX, 0, NULL); 2984 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX|IFM_FDX, 2985 0, NULL); 2986 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); 2987 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO); 2988 /* Pretend the user requested this setting */ 2989 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 2990 } else { 2991 /* 2992 * Do transceiver setup and tell the firmware the 2993 * driver is down so we can try to get access the 2994 * probe if ASF is running. Retry a couple of times 2995 * if we get a conflict with the ASF firmware accessing 2996 * the PHY. 2997 */ 2998 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2999 bge_asf_driver_up(sc); 3000 3001 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd, 3002 bge_ifmedia_sts); 3003 mii_attach(sc->bge_dev, &sc->bge_mii, 0xffffffff, 3004 MII_PHY_ANY, MII_OFFSET_ANY, 3005 MIIF_FORCEANEG|MIIF_DOPAUSE); 3006 3007 if (LIST_EMPTY(&sc->bge_mii.mii_phys)) { 3008 aprint_error_dev(sc->bge_dev, "no PHY found!\n"); 3009 ifmedia_add(&sc->bge_mii.mii_media, 3010 IFM_ETHER|IFM_MANUAL, 0, NULL); 3011 ifmedia_set(&sc->bge_mii.mii_media, 3012 IFM_ETHER|IFM_MANUAL); 3013 } else 3014 ifmedia_set(&sc->bge_mii.mii_media, 3015 IFM_ETHER|IFM_AUTO); 3016 3017 /* 3018 * Now tell the firmware we are going up after probing the PHY 3019 */ 3020 if (sc->bge_asf_mode & ASF_STACKUP) 3021 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3022 } 3023 3024 /* 3025 * Call MI attach routine. 3026 */ 3027 DPRINTFN(5, ("if_attach\n")); 3028 if_attach(ifp); 3029 DPRINTFN(5, ("ether_ifattach\n")); 3030 ether_ifattach(ifp, eaddr); 3031 ether_set_ifflags_cb(&sc->ethercom, bge_ifflags_cb); 3032 #if NRND > 0 3033 rnd_attach_source(&sc->rnd_source, device_xname(sc->bge_dev), 3034 RND_TYPE_NET, 0); 3035 #endif 3036 #ifdef BGE_EVENT_COUNTERS 3037 /* 3038 * Attach event counters. 3039 */ 3040 evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR, 3041 NULL, device_xname(sc->bge_dev), "intr"); 3042 evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC, 3043 NULL, device_xname(sc->bge_dev), "tx_xoff"); 3044 evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC, 3045 NULL, device_xname(sc->bge_dev), "tx_xon"); 3046 evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC, 3047 NULL, device_xname(sc->bge_dev), "rx_xoff"); 3048 evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC, 3049 NULL, device_xname(sc->bge_dev), "rx_xon"); 3050 evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC, 3051 NULL, device_xname(sc->bge_dev), "rx_macctl"); 3052 evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC, 3053 NULL, device_xname(sc->bge_dev), "xoffentered"); 3054 #endif /* BGE_EVENT_COUNTERS */ 3055 DPRINTFN(5, ("callout_init\n")); 3056 callout_init(&sc->bge_timeout, 0); 3057 3058 if (pmf_device_register(self, NULL, NULL)) 3059 pmf_class_network_register(self, ifp); 3060 else 3061 aprint_error_dev(self, "couldn't establish power handler\n"); 3062 3063 sysctl_bge_init(sc); 3064 3065 #ifdef BGE_DEBUG 3066 bge_debug_info(sc); 3067 #endif 3068 } 3069 3070 static void 3071 bge_release_resources(struct bge_softc *sc) 3072 { 3073 if (sc->bge_vpd_prodname != NULL) 3074 free(sc->bge_vpd_prodname, M_DEVBUF); 3075 3076 if (sc->bge_vpd_readonly != NULL) 3077 free(sc->bge_vpd_readonly, M_DEVBUF); 3078 } 3079 3080 static int 3081 bge_reset(struct bge_softc *sc) 3082 { 3083 uint32_t cachesize, command, pcistate, marbmode; 3084 #if 0 3085 uint32_t new_pcistate; 3086 #endif 3087 pcireg_t devctl, reg; 3088 int i, val; 3089 void (*write_op)(struct bge_softc *, int, int); 3090 3091 if (BGE_IS_5750_OR_BEYOND(sc) && !BGE_IS_5714_FAMILY(sc) 3092 && (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)) { 3093 if (sc->bge_flags & BGE_PCIE) 3094 write_op = bge_writemem_direct; 3095 else 3096 write_op = bge_writemem_ind; 3097 } else 3098 write_op = bge_writereg_ind; 3099 3100 /* Save some important PCI state. */ 3101 cachesize = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ); 3102 command = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD); 3103 pcistate = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE); 3104 3105 /* Step 5a: Enable memory arbiter. */ 3106 marbmode = 0; 3107 if (BGE_IS_5714_FAMILY(sc)) 3108 marbmode = CSR_READ_4(sc, BGE_MARB_MODE); 3109 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode); 3110 3111 /* Step 5b-5d: */ 3112 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 3113 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 3114 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW); 3115 3116 /* XXX ???: Disable fastboot on controllers that support it. */ 3117 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || 3118 BGE_IS_5755_PLUS(sc)) 3119 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0); 3120 3121 /* 3122 * Step 6: Write the magic number to SRAM at offset 0xB50. 3123 * When firmware finishes its initialization it will 3124 * write ~BGE_MAGIC_NUMBER to the same location. 3125 */ 3126 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 3127 3128 /* Step 7: */ 3129 val = BGE_MISCCFG_RESET_CORE_CLOCKS | (65<<1); 3130 /* 3131 * XXX: from FreeBSD/Linux; no documentation 3132 */ 3133 if (sc->bge_flags & BGE_PCIE) { 3134 if (CSR_READ_4(sc, BGE_PCIE_CTL1) == 0x60) 3135 /* PCI Express 1.0 system */ 3136 CSR_WRITE_4(sc, BGE_PCIE_CTL1, 0x20); 3137 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 3138 /* 3139 * Prevent PCI Express link training 3140 * during global reset. 3141 */ 3142 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); 3143 val |= (1<<29); 3144 } 3145 } 3146 3147 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 3148 i = CSR_READ_4(sc, BGE_VCPU_STATUS); 3149 CSR_WRITE_4(sc, BGE_VCPU_STATUS, 3150 i | BGE_VCPU_STATUS_DRV_RESET); 3151 i = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); 3152 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, 3153 i & ~BGE_VCPU_EXT_CTRL_HALT_CPU); 3154 } 3155 3156 /* 3157 * Set GPHY Power Down Override to leave GPHY 3158 * powered up in D0 uninitialized. 3159 */ 3160 if (BGE_IS_5705_PLUS(sc)) 3161 val |= BGE_MISCCFG_KEEP_GPHY_POWER; 3162 3163 /* XXX 5721, 5751 and 5752 */ 3164 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750) 3165 val |= BGE_MISCCFG_GRC_RESET_DISABLE; 3166 3167 /* Issue global reset */ 3168 write_op(sc, BGE_MISC_CFG, val); 3169 3170 /* Step 8: wait for complete */ 3171 if (sc->bge_flags & BGE_PCIE) 3172 delay(100*1000); /* too big */ 3173 else 3174 delay(100); 3175 3176 /* From Linux: dummy read to flush PCI posted writes */ 3177 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD); 3178 3179 /* Step 9-10: Reset some of the PCI state that got zapped by reset */ 3180 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 3181 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 3182 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW 3183 | BGE_PCIMISCCTL_CLOCKCTL_RW); 3184 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, command); 3185 write_op(sc, BGE_MISC_CFG, (65 << 1)); 3186 3187 /* Step 11: disable PCI-X Relaxed Ordering. */ 3188 if (sc->bge_flags & BGE_PCIX) { 3189 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap 3190 + PCI_PCIX_CMD); 3191 pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap 3192 + PCI_PCIX_CMD, reg & ~PCI_PCIX_CMD_RELAXED_ORDER); 3193 } 3194 3195 if (sc->bge_flags & BGE_PCIE) { 3196 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 3197 DELAY(500000); 3198 /* XXX: Magic Numbers */ 3199 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 3200 BGE_PCI_UNKNOWN0); 3201 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 3202 BGE_PCI_UNKNOWN0, 3203 reg | (1 << 15)); 3204 } 3205 devctl = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 3206 sc->bge_pciecap + PCI_PCIE_DCSR); 3207 /* Clear enable no snoop and disable relaxed ordering. */ 3208 devctl &= ~(0x0010 | PCI_PCIE_DCSR_ENA_NO_SNOOP); 3209 /* Set PCIE max payload size to 128. */ 3210 devctl &= ~(0x00e0); 3211 /* Clear device status register. Write 1b to clear */ 3212 devctl |= PCI_PCIE_DCSR_URD | PCI_PCIE_DCSR_FED 3213 | PCI_PCIE_DCSR_NFED | PCI_PCIE_DCSR_CED; 3214 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 3215 sc->bge_pciecap + PCI_PCIE_DCSR, devctl); 3216 } 3217 3218 /* Step 12: Enable memory arbiter. */ 3219 marbmode = 0; 3220 if (BGE_IS_5714_FAMILY(sc)) 3221 marbmode = CSR_READ_4(sc, BGE_MARB_MODE); 3222 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode); 3223 3224 /* Step 17: Poll until the firmware initialization is complete */ 3225 bge_poll_fw(sc); 3226 3227 /* XXX 5721, 5751 and 5752 */ 3228 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750) { 3229 /* Step 19: */ 3230 BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, 1 << 29 | 1 << 25); 3231 /* Step 20: */ 3232 BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, BGE_TLP_DATA_FIFO_PROTECT); 3233 } 3234 3235 /* 3236 * Step 18: wirte mac mode 3237 * XXX Write 0x0c for 5703S and 5704S 3238 */ 3239 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 3240 3241 3242 /* Step 21: 5822 B0 errata */ 3243 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_BX) { 3244 pcireg_t msidata; 3245 3246 msidata = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 3247 BGE_PCI_MSI_DATA); 3248 msidata |= ((1 << 13 | 1 << 12 | 1 << 10) << 16); 3249 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MSI_DATA, 3250 msidata); 3251 } 3252 3253 /* Step 23: restore cache line size */ 3254 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ, cachesize); 3255 3256 #if 0 3257 /* 3258 * XXX Wait for the value of the PCISTATE register to 3259 * return to its original pre-reset state. This is a 3260 * fairly good indicator of reset completion. If we don't 3261 * wait for the reset to fully complete, trying to read 3262 * from the device's non-PCI registers may yield garbage 3263 * results. 3264 */ 3265 for (i = 0; i < BGE_TIMEOUT; i++) { 3266 new_pcistate = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 3267 BGE_PCI_PCISTATE); 3268 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) == 3269 (pcistate & ~BGE_PCISTATE_RESERVED)) 3270 break; 3271 DELAY(10); 3272 } 3273 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) != 3274 (pcistate & ~BGE_PCISTATE_RESERVED)) { 3275 aprint_error_dev(sc->bge_dev, "pcistate failed to revert\n"); 3276 } 3277 #endif 3278 3279 /* Step 28: Fix up byte swapping */ 3280 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS); 3281 3282 /* Tell the ASF firmware we are up */ 3283 if (sc->bge_asf_mode & ASF_STACKUP) 3284 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3285 3286 /* 3287 * The 5704 in TBI mode apparently needs some special 3288 * adjustment to insure the SERDES drive level is set 3289 * to 1.2V. 3290 */ 3291 if (sc->bge_flags & BGE_PHY_FIBER_TBI && 3292 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 3293 uint32_t serdescfg; 3294 3295 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 3296 serdescfg = (serdescfg & ~0xFFF) | 0x880; 3297 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 3298 } 3299 3300 if (sc->bge_flags & BGE_PCIE && 3301 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 && 3302 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 && 3303 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 && 3304 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57765) { 3305 uint32_t v; 3306 3307 /* Enable PCI Express bug fix */ 3308 v = CSR_READ_4(sc, 0x7c00); 3309 CSR_WRITE_4(sc, 0x7c00, v | (1<<25)); 3310 } 3311 DELAY(10000); 3312 3313 return 0; 3314 } 3315 3316 /* 3317 * Frame reception handling. This is called if there's a frame 3318 * on the receive return list. 3319 * 3320 * Note: we have to be able to handle two possibilities here: 3321 * 1) the frame is from the jumbo receive ring 3322 * 2) the frame is from the standard receive ring 3323 */ 3324 3325 static void 3326 bge_rxeof(struct bge_softc *sc) 3327 { 3328 struct ifnet *ifp; 3329 uint16_t rx_prod, rx_cons; 3330 int stdcnt = 0, jumbocnt = 0; 3331 bus_dmamap_t dmamap; 3332 bus_addr_t offset, toff; 3333 bus_size_t tlen; 3334 int tosync; 3335 3336 rx_cons = sc->bge_rx_saved_considx; 3337 rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx; 3338 3339 /* Nothing to do */ 3340 if (rx_cons == rx_prod) 3341 return; 3342 3343 ifp = &sc->ethercom.ec_if; 3344 3345 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3346 offsetof(struct bge_ring_data, bge_status_block), 3347 sizeof (struct bge_status_block), 3348 BUS_DMASYNC_POSTREAD); 3349 3350 offset = offsetof(struct bge_ring_data, bge_rx_return_ring); 3351 tosync = rx_prod - rx_cons; 3352 3353 #if NRND > 0 3354 if (tosync != 0 && RND_ENABLED(&sc->rnd_source)) 3355 rnd_add_uint32(&sc->rnd_source, tosync); 3356 #endif 3357 3358 toff = offset + (rx_cons * sizeof (struct bge_rx_bd)); 3359 3360 if (tosync < 0) { 3361 tlen = (sc->bge_return_ring_cnt - rx_cons) * 3362 sizeof (struct bge_rx_bd); 3363 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3364 toff, tlen, BUS_DMASYNC_POSTREAD); 3365 tosync = -tosync; 3366 } 3367 3368 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3369 offset, tosync * sizeof (struct bge_rx_bd), 3370 BUS_DMASYNC_POSTREAD); 3371 3372 while (rx_cons != rx_prod) { 3373 struct bge_rx_bd *cur_rx; 3374 uint32_t rxidx; 3375 struct mbuf *m = NULL; 3376 3377 cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons]; 3378 3379 rxidx = cur_rx->bge_idx; 3380 BGE_INC(rx_cons, sc->bge_return_ring_cnt); 3381 3382 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 3383 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 3384 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 3385 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 3386 jumbocnt++; 3387 bus_dmamap_sync(sc->bge_dmatag, 3388 sc->bge_cdata.bge_rx_jumbo_map, 3389 mtod(m, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, 3390 BGE_JLEN, BUS_DMASYNC_POSTREAD); 3391 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3392 ifp->if_ierrors++; 3393 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 3394 continue; 3395 } 3396 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 3397 NULL)== ENOBUFS) { 3398 ifp->if_ierrors++; 3399 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 3400 continue; 3401 } 3402 } else { 3403 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 3404 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 3405 3406 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 3407 stdcnt++; 3408 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; 3409 sc->bge_cdata.bge_rx_std_map[rxidx] = 0; 3410 if (dmamap == NULL) { 3411 ifp->if_ierrors++; 3412 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 3413 continue; 3414 } 3415 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, 3416 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 3417 bus_dmamap_unload(sc->bge_dmatag, dmamap); 3418 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3419 ifp->if_ierrors++; 3420 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 3421 continue; 3422 } 3423 if (bge_newbuf_std(sc, sc->bge_std, 3424 NULL, dmamap) == ENOBUFS) { 3425 ifp->if_ierrors++; 3426 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 3427 continue; 3428 } 3429 } 3430 3431 ifp->if_ipackets++; 3432 #ifndef __NO_STRICT_ALIGNMENT 3433 /* 3434 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect, 3435 * the Rx buffer has the layer-2 header unaligned. 3436 * If our CPU requires alignment, re-align by copying. 3437 */ 3438 if (sc->bge_flags & BGE_RX_ALIGNBUG) { 3439 memmove(mtod(m, char *) + ETHER_ALIGN, m->m_data, 3440 cur_rx->bge_len); 3441 m->m_data += ETHER_ALIGN; 3442 } 3443 #endif 3444 3445 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 3446 m->m_pkthdr.rcvif = ifp; 3447 3448 /* 3449 * Handle BPF listeners. Let the BPF user see the packet. 3450 */ 3451 bpf_mtap(ifp, m); 3452 3453 m->m_pkthdr.csum_flags = M_CSUM_IPv4; 3454 3455 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0) 3456 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 3457 /* 3458 * Rx transport checksum-offload may also 3459 * have bugs with packets which, when transmitted, 3460 * were `runts' requiring padding. 3461 */ 3462 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 3463 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/ 3464 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) { 3465 m->m_pkthdr.csum_data = 3466 cur_rx->bge_tcp_udp_csum; 3467 m->m_pkthdr.csum_flags |= 3468 (M_CSUM_TCPv4|M_CSUM_UDPv4| 3469 M_CSUM_DATA); 3470 } 3471 3472 /* 3473 * If we received a packet with a vlan tag, pass it 3474 * to vlan_input() instead of ether_input(). 3475 */ 3476 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 3477 VLAN_INPUT_TAG(ifp, m, cur_rx->bge_vlan_tag, continue); 3478 } 3479 3480 (*ifp->if_input)(ifp, m); 3481 } 3482 3483 sc->bge_rx_saved_considx = rx_cons; 3484 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 3485 if (stdcnt) 3486 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 3487 if (jumbocnt) 3488 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 3489 } 3490 3491 static void 3492 bge_txeof(struct bge_softc *sc) 3493 { 3494 struct bge_tx_bd *cur_tx = NULL; 3495 struct ifnet *ifp; 3496 struct txdmamap_pool_entry *dma; 3497 bus_addr_t offset, toff; 3498 bus_size_t tlen; 3499 int tosync; 3500 struct mbuf *m; 3501 3502 ifp = &sc->ethercom.ec_if; 3503 3504 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3505 offsetof(struct bge_ring_data, bge_status_block), 3506 sizeof (struct bge_status_block), 3507 BUS_DMASYNC_POSTREAD); 3508 3509 offset = offsetof(struct bge_ring_data, bge_tx_ring); 3510 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx - 3511 sc->bge_tx_saved_considx; 3512 3513 #if NRND > 0 3514 if (tosync != 0 && RND_ENABLED(&sc->rnd_source)) 3515 rnd_add_uint32(&sc->rnd_source, tosync); 3516 #endif 3517 3518 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd)); 3519 3520 if (tosync < 0) { 3521 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) * 3522 sizeof (struct bge_tx_bd); 3523 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3524 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3525 tosync = -tosync; 3526 } 3527 3528 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3529 offset, tosync * sizeof (struct bge_tx_bd), 3530 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3531 3532 /* 3533 * Go through our tx ring and free mbufs for those 3534 * frames that have been sent. 3535 */ 3536 while (sc->bge_tx_saved_considx != 3537 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 3538 uint32_t idx = 0; 3539 3540 idx = sc->bge_tx_saved_considx; 3541 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 3542 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 3543 ifp->if_opackets++; 3544 m = sc->bge_cdata.bge_tx_chain[idx]; 3545 if (m != NULL) { 3546 sc->bge_cdata.bge_tx_chain[idx] = NULL; 3547 dma = sc->txdma[idx]; 3548 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0, 3549 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 3550 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap); 3551 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 3552 sc->txdma[idx] = NULL; 3553 3554 m_freem(m); 3555 } 3556 sc->bge_txcnt--; 3557 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 3558 ifp->if_timer = 0; 3559 } 3560 3561 if (cur_tx != NULL) 3562 ifp->if_flags &= ~IFF_OACTIVE; 3563 } 3564 3565 static int 3566 bge_intr(void *xsc) 3567 { 3568 struct bge_softc *sc; 3569 struct ifnet *ifp; 3570 uint32_t statusword; 3571 3572 sc = xsc; 3573 ifp = &sc->ethercom.ec_if; 3574 3575 /* It is possible for the interrupt to arrive before 3576 * the status block is updated prior to the interrupt. 3577 * Reading the PCI State register will confirm whether the 3578 * interrupt is ours and will flush the status block. 3579 */ 3580 3581 /* read status word from status block */ 3582 statusword = sc->bge_rdata->bge_status_block.bge_status; 3583 3584 if ((statusword & BGE_STATFLAG_UPDATED) || 3585 (!(CSR_READ_4(sc, BGE_PCI_PCISTATE) & BGE_PCISTATE_INTR_NOT_ACTIVE))) { 3586 /* Ack interrupt and stop others from occuring. */ 3587 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 3588 3589 BGE_EVCNT_INCR(sc->bge_ev_intr); 3590 3591 /* clear status word */ 3592 sc->bge_rdata->bge_status_block.bge_status = 0; 3593 3594 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 3595 statusword & BGE_STATFLAG_LINKSTATE_CHANGED || 3596 BGE_STS_BIT(sc, BGE_STS_LINK_EVT)) 3597 bge_link_upd(sc); 3598 3599 if (ifp->if_flags & IFF_RUNNING) { 3600 /* Check RX return ring producer/consumer */ 3601 bge_rxeof(sc); 3602 3603 /* Check TX ring producer/consumer */ 3604 bge_txeof(sc); 3605 } 3606 3607 if (sc->bge_pending_rxintr_change) { 3608 uint32_t rx_ticks = sc->bge_rx_coal_ticks; 3609 uint32_t rx_bds = sc->bge_rx_max_coal_bds; 3610 uint32_t junk; 3611 3612 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks); 3613 DELAY(10); 3614 junk = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); 3615 3616 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds); 3617 DELAY(10); 3618 junk = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); 3619 3620 sc->bge_pending_rxintr_change = 0; 3621 } 3622 bge_handle_events(sc); 3623 3624 /* Re-enable interrupts. */ 3625 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 3626 3627 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) 3628 bge_start(ifp); 3629 3630 return 1; 3631 } else 3632 return 0; 3633 } 3634 3635 static void 3636 bge_asf_driver_up(struct bge_softc *sc) 3637 { 3638 if (sc->bge_asf_mode & ASF_STACKUP) { 3639 /* Send ASF heartbeat aprox. every 2s */ 3640 if (sc->bge_asf_count) 3641 sc->bge_asf_count --; 3642 else { 3643 sc->bge_asf_count = 2; 3644 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, 3645 BGE_FW_DRV_ALIVE); 3646 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4); 3647 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3); 3648 CSR_WRITE_4(sc, BGE_CPU_EVENT, 3649 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14)); 3650 } 3651 } 3652 } 3653 3654 static void 3655 bge_tick(void *xsc) 3656 { 3657 struct bge_softc *sc = xsc; 3658 struct mii_data *mii = &sc->bge_mii; 3659 int s; 3660 3661 s = splnet(); 3662 3663 if (BGE_IS_5705_PLUS(sc)) 3664 bge_stats_update_regs(sc); 3665 else 3666 bge_stats_update(sc); 3667 3668 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 3669 /* 3670 * Since in TBI mode auto-polling can't be used we should poll 3671 * link status manually. Here we register pending link event 3672 * and trigger interrupt. 3673 */ 3674 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT); 3675 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 3676 } else { 3677 /* 3678 * Do not touch PHY if we have link up. This could break 3679 * IPMI/ASF mode or produce extra input errors. 3680 * (extra input errors was reported for bcm5701 & bcm5704). 3681 */ 3682 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) 3683 mii_tick(mii); 3684 } 3685 3686 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 3687 3688 splx(s); 3689 } 3690 3691 static void 3692 bge_stats_update_regs(struct bge_softc *sc) 3693 { 3694 struct ifnet *ifp = &sc->ethercom.ec_if; 3695 3696 ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS + 3697 offsetof(struct bge_mac_stats_regs, etherStatsCollisions)); 3698 3699 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); 3700 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS); 3701 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); 3702 } 3703 3704 static void 3705 bge_stats_update(struct bge_softc *sc) 3706 { 3707 struct ifnet *ifp = &sc->ethercom.ec_if; 3708 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 3709 3710 #define READ_STAT(sc, stats, stat) \ 3711 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 3712 3713 ifp->if_collisions += 3714 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) + 3715 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) + 3716 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) + 3717 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) - 3718 ifp->if_collisions; 3719 3720 BGE_EVCNT_UPD(sc->bge_ev_tx_xoff, 3721 READ_STAT(sc, stats, outXoffSent.bge_addr_lo)); 3722 BGE_EVCNT_UPD(sc->bge_ev_tx_xon, 3723 READ_STAT(sc, stats, outXonSent.bge_addr_lo)); 3724 BGE_EVCNT_UPD(sc->bge_ev_rx_xoff, 3725 READ_STAT(sc, stats, 3726 xoffPauseFramesReceived.bge_addr_lo)); 3727 BGE_EVCNT_UPD(sc->bge_ev_rx_xon, 3728 READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo)); 3729 BGE_EVCNT_UPD(sc->bge_ev_rx_macctl, 3730 READ_STAT(sc, stats, 3731 macControlFramesReceived.bge_addr_lo)); 3732 BGE_EVCNT_UPD(sc->bge_ev_xoffentered, 3733 READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo)); 3734 3735 #undef READ_STAT 3736 3737 #ifdef notdef 3738 ifp->if_collisions += 3739 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 3740 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 3741 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 3742 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 3743 ifp->if_collisions; 3744 #endif 3745 } 3746 3747 /* 3748 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 3749 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 3750 * but when such padded frames employ the bge IP/TCP checksum offload, 3751 * the hardware checksum assist gives incorrect results (possibly 3752 * from incorporating its own padding into the UDP/TCP checksum; who knows). 3753 * If we pad such runts with zeros, the onboard checksum comes out correct. 3754 */ 3755 static inline int 3756 bge_cksum_pad(struct mbuf *pkt) 3757 { 3758 struct mbuf *last = NULL; 3759 int padlen; 3760 3761 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len; 3762 3763 /* if there's only the packet-header and we can pad there, use it. */ 3764 if (pkt->m_pkthdr.len == pkt->m_len && 3765 M_TRAILINGSPACE(pkt) >= padlen) { 3766 last = pkt; 3767 } else { 3768 /* 3769 * Walk packet chain to find last mbuf. We will either 3770 * pad there, or append a new mbuf and pad it 3771 * (thus perhaps avoiding the bcm5700 dma-min bug). 3772 */ 3773 for (last = pkt; last->m_next != NULL; last = last->m_next) { 3774 continue; /* do nothing */ 3775 } 3776 3777 /* `last' now points to last in chain. */ 3778 if (M_TRAILINGSPACE(last) < padlen) { 3779 /* Allocate new empty mbuf, pad it. Compact later. */ 3780 struct mbuf *n; 3781 MGET(n, M_DONTWAIT, MT_DATA); 3782 if (n == NULL) 3783 return ENOBUFS; 3784 n->m_len = 0; 3785 last->m_next = n; 3786 last = n; 3787 } 3788 } 3789 3790 KDASSERT(!M_READONLY(last)); 3791 KDASSERT(M_TRAILINGSPACE(last) >= padlen); 3792 3793 /* Now zero the pad area, to avoid the bge cksum-assist bug */ 3794 memset(mtod(last, char *) + last->m_len, 0, padlen); 3795 last->m_len += padlen; 3796 pkt->m_pkthdr.len += padlen; 3797 return 0; 3798 } 3799 3800 /* 3801 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes. 3802 */ 3803 static inline int 3804 bge_compact_dma_runt(struct mbuf *pkt) 3805 { 3806 struct mbuf *m, *prev; 3807 int totlen, prevlen; 3808 3809 prev = NULL; 3810 totlen = 0; 3811 prevlen = -1; 3812 3813 for (m = pkt; m != NULL; prev = m,m = m->m_next) { 3814 int mlen = m->m_len; 3815 int shortfall = 8 - mlen ; 3816 3817 totlen += mlen; 3818 if (mlen == 0) { 3819 continue; 3820 } 3821 if (mlen >= 8) 3822 continue; 3823 3824 /* If we get here, mbuf data is too small for DMA engine. 3825 * Try to fix by shuffling data to prev or next in chain. 3826 * If that fails, do a compacting deep-copy of the whole chain. 3827 */ 3828 3829 /* Internal frag. If fits in prev, copy it there. */ 3830 if (prev && M_TRAILINGSPACE(prev) >= m->m_len) { 3831 memcpy(prev->m_data + prev->m_len, m->m_data, mlen); 3832 prev->m_len += mlen; 3833 m->m_len = 0; 3834 /* XXX stitch chain */ 3835 prev->m_next = m_free(m); 3836 m = prev; 3837 continue; 3838 } 3839 else if (m->m_next != NULL && 3840 M_TRAILINGSPACE(m) >= shortfall && 3841 m->m_next->m_len >= (8 + shortfall)) { 3842 /* m is writable and have enough data in next, pull up. */ 3843 3844 memcpy(m->m_data + m->m_len, m->m_next->m_data, 3845 shortfall); 3846 m->m_len += shortfall; 3847 m->m_next->m_len -= shortfall; 3848 m->m_next->m_data += shortfall; 3849 } 3850 else if (m->m_next == NULL || 1) { 3851 /* Got a runt at the very end of the packet. 3852 * borrow data from the tail of the preceding mbuf and 3853 * update its length in-place. (The original data is still 3854 * valid, so we can do this even if prev is not writable.) 3855 */ 3856 3857 /* if we'd make prev a runt, just move all of its data. */ 3858 KASSERT(prev != NULL /*, ("runt but null PREV")*/); 3859 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/); 3860 3861 if ((prev->m_len - shortfall) < 8) 3862 shortfall = prev->m_len; 3863 3864 #ifdef notyet /* just do the safe slow thing for now */ 3865 if (!M_READONLY(m)) { 3866 if (M_LEADINGSPACE(m) < shorfall) { 3867 void *m_dat; 3868 m_dat = (m->m_flags & M_PKTHDR) ? 3869 m->m_pktdat : m->dat; 3870 memmove(m_dat, mtod(m, void*), m->m_len); 3871 m->m_data = m_dat; 3872 } 3873 } else 3874 #endif /* just do the safe slow thing */ 3875 { 3876 struct mbuf * n = NULL; 3877 int newprevlen = prev->m_len - shortfall; 3878 3879 MGET(n, M_NOWAIT, MT_DATA); 3880 if (n == NULL) 3881 return ENOBUFS; 3882 KASSERT(m->m_len + shortfall < MLEN 3883 /*, 3884 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/); 3885 3886 /* first copy the data we're stealing from prev */ 3887 memcpy(n->m_data, prev->m_data + newprevlen, 3888 shortfall); 3889 3890 /* update prev->m_len accordingly */ 3891 prev->m_len -= shortfall; 3892 3893 /* copy data from runt m */ 3894 memcpy(n->m_data + shortfall, m->m_data, 3895 m->m_len); 3896 3897 /* n holds what we stole from prev, plus m */ 3898 n->m_len = shortfall + m->m_len; 3899 3900 /* stitch n into chain and free m */ 3901 n->m_next = m->m_next; 3902 prev->m_next = n; 3903 /* KASSERT(m->m_next == NULL); */ 3904 m->m_next = NULL; 3905 m_free(m); 3906 m = n; /* for continuing loop */ 3907 } 3908 } 3909 prevlen = m->m_len; 3910 } 3911 return 0; 3912 } 3913 3914 /* 3915 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 3916 * pointers to descriptors. 3917 */ 3918 static int 3919 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx) 3920 { 3921 struct bge_tx_bd *f = NULL; 3922 uint32_t frag, cur; 3923 uint16_t csum_flags = 0; 3924 uint16_t txbd_tso_flags = 0; 3925 struct txdmamap_pool_entry *dma; 3926 bus_dmamap_t dmamap; 3927 int i = 0; 3928 struct m_tag *mtag; 3929 int use_tso, maxsegsize, error; 3930 3931 cur = frag = *txidx; 3932 3933 if (m_head->m_pkthdr.csum_flags) { 3934 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4) 3935 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3936 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) 3937 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3938 } 3939 3940 /* 3941 * If we were asked to do an outboard checksum, and the NIC 3942 * has the bug where it sometimes adds in the Ethernet padding, 3943 * explicitly pad with zeros so the cksum will be correct either way. 3944 * (For now, do this for all chip versions, until newer 3945 * are confirmed to not require the workaround.) 3946 */ 3947 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 || 3948 #ifdef notyet 3949 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 || 3950 #endif 3951 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD) 3952 goto check_dma_bug; 3953 3954 if (bge_cksum_pad(m_head) != 0) 3955 return ENOBUFS; 3956 3957 check_dma_bug: 3958 if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)) 3959 goto doit; 3960 3961 /* 3962 * bcm5700 Revision B silicon cannot handle DMA descriptors with 3963 * less than eight bytes. If we encounter a teeny mbuf 3964 * at the end of a chain, we can pad. Otherwise, copy. 3965 */ 3966 if (bge_compact_dma_runt(m_head) != 0) 3967 return ENOBUFS; 3968 3969 doit: 3970 dma = SLIST_FIRST(&sc->txdma_list); 3971 if (dma == NULL) 3972 return ENOBUFS; 3973 dmamap = dma->dmamap; 3974 3975 /* 3976 * Set up any necessary TSO state before we start packing... 3977 */ 3978 use_tso = (m_head->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 3979 if (!use_tso) { 3980 maxsegsize = 0; 3981 } else { /* TSO setup */ 3982 unsigned mss; 3983 struct ether_header *eh; 3984 unsigned ip_tcp_hlen, iptcp_opt_words, tcp_seg_flags, offset; 3985 struct mbuf * m0 = m_head; 3986 struct ip *ip; 3987 struct tcphdr *th; 3988 int iphl, hlen; 3989 3990 /* 3991 * XXX It would be nice if the mbuf pkthdr had offset 3992 * fields for the protocol headers. 3993 */ 3994 3995 eh = mtod(m0, struct ether_header *); 3996 switch (htons(eh->ether_type)) { 3997 case ETHERTYPE_IP: 3998 offset = ETHER_HDR_LEN; 3999 break; 4000 4001 case ETHERTYPE_VLAN: 4002 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 4003 break; 4004 4005 default: 4006 /* 4007 * Don't support this protocol or encapsulation. 4008 */ 4009 return ENOBUFS; 4010 } 4011 4012 /* 4013 * TCP/IP headers are in the first mbuf; we can do 4014 * this the easy way. 4015 */ 4016 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 4017 hlen = iphl + offset; 4018 if (__predict_false(m0->m_len < 4019 (hlen + sizeof(struct tcphdr)))) { 4020 4021 aprint_debug_dev(sc->bge_dev, 4022 "TSO: hard case m0->m_len == %d < ip/tcp hlen %zd," 4023 "not handled yet\n", 4024 m0->m_len, hlen+ sizeof(struct tcphdr)); 4025 #ifdef NOTYET 4026 /* 4027 * XXX jonathan@NetBSD.org: untested. 4028 * how to force this branch to be taken? 4029 */ 4030 BGE_EVCNT_INCR(&sc->sc_ev_txtsopain); 4031 4032 m_copydata(m0, offset, sizeof(ip), &ip); 4033 m_copydata(m0, hlen, sizeof(th), &th); 4034 4035 ip.ip_len = 0; 4036 4037 m_copyback(m0, hlen + offsetof(struct ip, ip_len), 4038 sizeof(ip.ip_len), &ip.ip_len); 4039 4040 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 4041 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 4042 4043 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 4044 sizeof(th.th_sum), &th.th_sum); 4045 4046 hlen += th.th_off << 2; 4047 iptcp_opt_words = hlen; 4048 #else 4049 /* 4050 * if_wm "hard" case not yet supported, can we not 4051 * mandate it out of existence? 4052 */ 4053 (void) ip; (void)th; (void) ip_tcp_hlen; 4054 4055 return ENOBUFS; 4056 #endif 4057 } else { 4058 ip = (struct ip *) (mtod(m0, char *) + offset); 4059 th = (struct tcphdr *) (mtod(m0, char *) + hlen); 4060 ip_tcp_hlen = iphl + (th->th_off << 2); 4061 4062 /* Total IP/TCP options, in 32-bit words */ 4063 iptcp_opt_words = (ip_tcp_hlen 4064 - sizeof(struct tcphdr) 4065 - sizeof(struct ip)) >> 2; 4066 } 4067 if (BGE_IS_5750_OR_BEYOND(sc)) { 4068 th->th_sum = 0; 4069 csum_flags &= ~(BGE_TXBDFLAG_TCP_UDP_CSUM); 4070 } else { 4071 /* 4072 * XXX jonathan@NetBSD.org: 5705 untested. 4073 * Requires TSO firmware patch for 5701/5703/5704. 4074 */ 4075 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 4076 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 4077 } 4078 4079 mss = m_head->m_pkthdr.segsz; 4080 txbd_tso_flags |= 4081 BGE_TXBDFLAG_CPU_PRE_DMA | 4082 BGE_TXBDFLAG_CPU_POST_DMA; 4083 4084 /* 4085 * Our NIC TSO-assist assumes TSO has standard, optionless 4086 * IPv4 and TCP headers, which total 40 bytes. By default, 4087 * the NIC copies 40 bytes of IP/TCP header from the 4088 * supplied header into the IP/TCP header portion of 4089 * each post-TSO-segment. If the supplied packet has IP or 4090 * TCP options, we need to tell the NIC to copy those extra 4091 * bytes into each post-TSO header, in addition to the normal 4092 * 40-byte IP/TCP header (and to leave space accordingly). 4093 * Unfortunately, the driver encoding of option length 4094 * varies across different ASIC families. 4095 */ 4096 tcp_seg_flags = 0; 4097 if (iptcp_opt_words) { 4098 if (BGE_IS_5705_PLUS(sc)) { 4099 tcp_seg_flags = 4100 iptcp_opt_words << 11; 4101 } else { 4102 txbd_tso_flags |= 4103 iptcp_opt_words << 12; 4104 } 4105 } 4106 maxsegsize = mss | tcp_seg_flags; 4107 ip->ip_len = htons(mss + ip_tcp_hlen); 4108 4109 } /* TSO setup */ 4110 4111 /* 4112 * Start packing the mbufs in this chain into 4113 * the fragment pointers. Stop when we run out 4114 * of fragments or hit the end of the mbuf chain. 4115 */ 4116 error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head, 4117 BUS_DMA_NOWAIT); 4118 if (error) 4119 return ENOBUFS; 4120 /* 4121 * Sanity check: avoid coming within 16 descriptors 4122 * of the end of the ring. 4123 */ 4124 if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) { 4125 BGE_TSO_PRINTF(("%s: " 4126 " dmamap_load_mbuf too close to ring wrap\n", 4127 device_xname(sc->bge_dev))); 4128 goto fail_unload; 4129 } 4130 4131 mtag = sc->ethercom.ec_nvlans ? 4132 m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL; 4133 4134 4135 /* Iterate over dmap-map fragments. */ 4136 for (i = 0; i < dmamap->dm_nsegs; i++) { 4137 f = &sc->bge_rdata->bge_tx_ring[frag]; 4138 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 4139 break; 4140 4141 BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr); 4142 f->bge_len = dmamap->dm_segs[i].ds_len; 4143 4144 /* 4145 * For 5751 and follow-ons, for TSO we must turn 4146 * off checksum-assist flag in the tx-descr, and 4147 * supply the ASIC-revision-specific encoding 4148 * of TSO flags and segsize. 4149 */ 4150 if (use_tso) { 4151 if (BGE_IS_5750_OR_BEYOND(sc) || i == 0) { 4152 f->bge_rsvd = maxsegsize; 4153 f->bge_flags = csum_flags | txbd_tso_flags; 4154 } else { 4155 f->bge_rsvd = 0; 4156 f->bge_flags = 4157 (csum_flags | txbd_tso_flags) & 0x0fff; 4158 } 4159 } else { 4160 f->bge_rsvd = 0; 4161 f->bge_flags = csum_flags; 4162 } 4163 4164 if (mtag != NULL) { 4165 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 4166 f->bge_vlan_tag = VLAN_TAG_VALUE(mtag); 4167 } else { 4168 f->bge_vlan_tag = 0; 4169 } 4170 cur = frag; 4171 BGE_INC(frag, BGE_TX_RING_CNT); 4172 } 4173 4174 if (i < dmamap->dm_nsegs) { 4175 BGE_TSO_PRINTF(("%s: reached %d < dm_nsegs %d\n", 4176 device_xname(sc->bge_dev), i, dmamap->dm_nsegs)); 4177 goto fail_unload; 4178 } 4179 4180 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 4181 BUS_DMASYNC_PREWRITE); 4182 4183 if (frag == sc->bge_tx_saved_considx) { 4184 BGE_TSO_PRINTF(("%s: frag %d = wrapped id %d?\n", 4185 device_xname(sc->bge_dev), frag, sc->bge_tx_saved_considx)); 4186 4187 goto fail_unload; 4188 } 4189 4190 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 4191 sc->bge_cdata.bge_tx_chain[cur] = m_head; 4192 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 4193 sc->txdma[cur] = dma; 4194 sc->bge_txcnt += dmamap->dm_nsegs; 4195 4196 *txidx = frag; 4197 4198 return 0; 4199 4200 fail_unload: 4201 bus_dmamap_unload(sc->bge_dmatag, dmamap); 4202 4203 return ENOBUFS; 4204 } 4205 4206 /* 4207 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 4208 * to the mbuf data regions directly in the transmit descriptors. 4209 */ 4210 static void 4211 bge_start(struct ifnet *ifp) 4212 { 4213 struct bge_softc *sc; 4214 struct mbuf *m_head = NULL; 4215 uint32_t prodidx; 4216 int pkts = 0; 4217 4218 sc = ifp->if_softc; 4219 4220 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 4221 return; 4222 4223 prodidx = sc->bge_tx_prodidx; 4224 4225 while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 4226 IFQ_POLL(&ifp->if_snd, m_head); 4227 if (m_head == NULL) 4228 break; 4229 4230 #if 0 4231 /* 4232 * XXX 4233 * safety overkill. If this is a fragmented packet chain 4234 * with delayed TCP/UDP checksums, then only encapsulate 4235 * it if we have enough descriptors to handle the entire 4236 * chain at once. 4237 * (paranoia -- may not actually be needed) 4238 */ 4239 if (m_head->m_flags & M_FIRSTFRAG && 4240 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 4241 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 4242 M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) { 4243 ifp->if_flags |= IFF_OACTIVE; 4244 break; 4245 } 4246 } 4247 #endif 4248 4249 /* 4250 * Pack the data into the transmit ring. If we 4251 * don't have room, set the OACTIVE flag and wait 4252 * for the NIC to drain the ring. 4253 */ 4254 if (bge_encap(sc, m_head, &prodidx)) { 4255 ifp->if_flags |= IFF_OACTIVE; 4256 break; 4257 } 4258 4259 /* now we are committed to transmit the packet */ 4260 IFQ_DEQUEUE(&ifp->if_snd, m_head); 4261 pkts++; 4262 4263 /* 4264 * If there's a BPF listener, bounce a copy of this frame 4265 * to him. 4266 */ 4267 bpf_mtap(ifp, m_head); 4268 } 4269 if (pkts == 0) 4270 return; 4271 4272 /* Transmit */ 4273 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 4274 /* 5700 b2 errata */ 4275 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 4276 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 4277 4278 sc->bge_tx_prodidx = prodidx; 4279 4280 /* 4281 * Set a timeout in case the chip goes out to lunch. 4282 */ 4283 ifp->if_timer = 5; 4284 } 4285 4286 static int 4287 bge_init(struct ifnet *ifp) 4288 { 4289 struct bge_softc *sc = ifp->if_softc; 4290 const uint16_t *m; 4291 int s, error = 0; 4292 4293 s = splnet(); 4294 4295 ifp = &sc->ethercom.ec_if; 4296 4297 /* Cancel pending I/O and flush buffers. */ 4298 bge_stop(ifp, 0); 4299 4300 bge_stop_fw(sc); 4301 bge_sig_pre_reset(sc, BGE_RESET_START); 4302 bge_reset(sc); 4303 bge_sig_legacy(sc, BGE_RESET_START); 4304 bge_sig_post_reset(sc, BGE_RESET_START); 4305 4306 bge_chipinit(sc); 4307 4308 /* 4309 * Init the various state machines, ring 4310 * control blocks and firmware. 4311 */ 4312 error = bge_blockinit(sc); 4313 if (error != 0) { 4314 aprint_error_dev(sc->bge_dev, "initialization error %d\n", 4315 error); 4316 splx(s); 4317 return error; 4318 } 4319 4320 ifp = &sc->ethercom.ec_if; 4321 4322 /* Specify MTU. */ 4323 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 4324 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 4325 4326 /* Load our MAC address. */ 4327 m = (const uint16_t *)&(CLLADDR(ifp->if_sadl)[0]); 4328 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 4329 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 4330 4331 /* Enable or disable promiscuous mode as needed. */ 4332 if (ifp->if_flags & IFF_PROMISC) 4333 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4334 else 4335 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4336 4337 /* Program multicast filter. */ 4338 bge_setmulti(sc); 4339 4340 /* Init RX ring. */ 4341 bge_init_rx_ring_std(sc); 4342 4343 /* 4344 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 4345 * memory to insure that the chip has in fact read the first 4346 * entry of the ring. 4347 */ 4348 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 4349 uint32_t v, i; 4350 for (i = 0; i < 10; i++) { 4351 DELAY(20); 4352 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 4353 if (v == (MCLBYTES - ETHER_ALIGN)) 4354 break; 4355 } 4356 if (i == 10) 4357 aprint_error_dev(sc->bge_dev, 4358 "5705 A0 chip failed to load RX ring\n"); 4359 } 4360 4361 /* Init jumbo RX ring. */ 4362 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 4363 bge_init_rx_ring_jumbo(sc); 4364 4365 /* Init our RX return ring index */ 4366 sc->bge_rx_saved_considx = 0; 4367 4368 /* Init TX ring. */ 4369 bge_init_tx_ring(sc); 4370 4371 /* Turn on transmitter */ 4372 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 4373 4374 /* Turn on receiver */ 4375 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 4376 4377 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 4378 4379 /* Tell firmware we're alive. */ 4380 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4381 4382 /* Enable host interrupts. */ 4383 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 4384 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 4385 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 4386 4387 if ((error = bge_ifmedia_upd(ifp)) != 0) 4388 goto out; 4389 4390 ifp->if_flags |= IFF_RUNNING; 4391 ifp->if_flags &= ~IFF_OACTIVE; 4392 4393 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 4394 4395 out: 4396 sc->bge_if_flags = ifp->if_flags; 4397 splx(s); 4398 4399 return error; 4400 } 4401 4402 /* 4403 * Set media options. 4404 */ 4405 static int 4406 bge_ifmedia_upd(struct ifnet *ifp) 4407 { 4408 struct bge_softc *sc = ifp->if_softc; 4409 struct mii_data *mii = &sc->bge_mii; 4410 struct ifmedia *ifm = &sc->bge_ifmedia; 4411 int rc; 4412 4413 /* If this is a 1000baseX NIC, enable the TBI port. */ 4414 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 4415 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 4416 return EINVAL; 4417 switch (IFM_SUBTYPE(ifm->ifm_media)) { 4418 case IFM_AUTO: 4419 /* 4420 * The BCM5704 ASIC appears to have a special 4421 * mechanism for programming the autoneg 4422 * advertisement registers in TBI mode. 4423 */ 4424 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 4425 uint32_t sgdig; 4426 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS); 4427 if (sgdig & BGE_SGDIGSTS_DONE) { 4428 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 4429 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 4430 sgdig |= BGE_SGDIGCFG_AUTO | 4431 BGE_SGDIGCFG_PAUSE_CAP | 4432 BGE_SGDIGCFG_ASYM_PAUSE; 4433 CSR_WRITE_4(sc, BGE_SGDIG_CFG, 4434 sgdig | BGE_SGDIGCFG_SEND); 4435 DELAY(5); 4436 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); 4437 } 4438 } 4439 break; 4440 case IFM_1000_SX: 4441 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 4442 BGE_CLRBIT(sc, BGE_MAC_MODE, 4443 BGE_MACMODE_HALF_DUPLEX); 4444 } else { 4445 BGE_SETBIT(sc, BGE_MAC_MODE, 4446 BGE_MACMODE_HALF_DUPLEX); 4447 } 4448 break; 4449 default: 4450 return EINVAL; 4451 } 4452 /* XXX 802.3x flow control for 1000BASE-SX */ 4453 return 0; 4454 } 4455 4456 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT); 4457 if ((rc = mii_mediachg(mii)) == ENXIO) 4458 return 0; 4459 4460 /* 4461 * Force an interrupt so that we will call bge_link_upd 4462 * if needed and clear any pending link state attention. 4463 * Without this we are not getting any further interrupts 4464 * for link state changes and thus will not UP the link and 4465 * not be able to send in bge_start. The only way to get 4466 * things working was to receive a packet and get a RX intr. 4467 */ 4468 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 4469 sc->bge_flags & BGE_IS_5788) 4470 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 4471 else 4472 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 4473 4474 return rc; 4475 } 4476 4477 /* 4478 * Report current media status. 4479 */ 4480 static void 4481 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 4482 { 4483 struct bge_softc *sc = ifp->if_softc; 4484 struct mii_data *mii = &sc->bge_mii; 4485 4486 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 4487 ifmr->ifm_status = IFM_AVALID; 4488 ifmr->ifm_active = IFM_ETHER; 4489 if (CSR_READ_4(sc, BGE_MAC_STS) & 4490 BGE_MACSTAT_TBI_PCS_SYNCHED) 4491 ifmr->ifm_status |= IFM_ACTIVE; 4492 ifmr->ifm_active |= IFM_1000_SX; 4493 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 4494 ifmr->ifm_active |= IFM_HDX; 4495 else 4496 ifmr->ifm_active |= IFM_FDX; 4497 return; 4498 } 4499 4500 mii_pollstat(mii); 4501 ifmr->ifm_status = mii->mii_media_status; 4502 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) | 4503 sc->bge_flowflags; 4504 } 4505 4506 static int 4507 bge_ifflags_cb(struct ethercom *ec) 4508 { 4509 struct ifnet *ifp = &ec->ec_if; 4510 struct bge_softc *sc = ifp->if_softc; 4511 int change = ifp->if_flags ^ sc->bge_if_flags; 4512 4513 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) 4514 return ENETRESET; 4515 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0) 4516 return 0; 4517 4518 if ((ifp->if_flags & IFF_PROMISC) == 0) 4519 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4520 else 4521 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4522 4523 bge_setmulti(sc); 4524 4525 sc->bge_if_flags = ifp->if_flags; 4526 return 0; 4527 } 4528 4529 static int 4530 bge_ioctl(struct ifnet *ifp, u_long command, void *data) 4531 { 4532 struct bge_softc *sc = ifp->if_softc; 4533 struct ifreq *ifr = (struct ifreq *) data; 4534 int s, error = 0; 4535 struct mii_data *mii; 4536 4537 s = splnet(); 4538 4539 switch (command) { 4540 case SIOCSIFMEDIA: 4541 /* XXX Flow control is not supported for 1000BASE-SX */ 4542 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 4543 ifr->ifr_media &= ~IFM_ETH_FMASK; 4544 sc->bge_flowflags = 0; 4545 } 4546 4547 /* Flow control requires full-duplex mode. */ 4548 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 4549 (ifr->ifr_media & IFM_FDX) == 0) { 4550 ifr->ifr_media &= ~IFM_ETH_FMASK; 4551 } 4552 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 4553 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 4554 /* We can do both TXPAUSE and RXPAUSE. */ 4555 ifr->ifr_media |= 4556 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 4557 } 4558 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 4559 } 4560 /* FALLTHROUGH */ 4561 case SIOCGIFMEDIA: 4562 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 4563 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 4564 command); 4565 } else { 4566 mii = &sc->bge_mii; 4567 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 4568 command); 4569 } 4570 break; 4571 default: 4572 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) 4573 break; 4574 4575 error = 0; 4576 4577 if (command != SIOCADDMULTI && command != SIOCDELMULTI) 4578 ; 4579 else if (ifp->if_flags & IFF_RUNNING) 4580 bge_setmulti(sc); 4581 break; 4582 } 4583 4584 splx(s); 4585 4586 return error; 4587 } 4588 4589 static void 4590 bge_watchdog(struct ifnet *ifp) 4591 { 4592 struct bge_softc *sc; 4593 4594 sc = ifp->if_softc; 4595 4596 aprint_error_dev(sc->bge_dev, "watchdog timeout -- resetting\n"); 4597 4598 ifp->if_flags &= ~IFF_RUNNING; 4599 bge_init(ifp); 4600 4601 ifp->if_oerrors++; 4602 } 4603 4604 static void 4605 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit) 4606 { 4607 int i; 4608 4609 BGE_CLRBIT(sc, reg, bit); 4610 4611 for (i = 0; i < 1000; i++) { 4612 if ((CSR_READ_4(sc, reg) & bit) == 0) 4613 return; 4614 delay(100); 4615 } 4616 4617 /* 4618 * Doesn't print only when the register is BGE_SRS_MODE. It occurs 4619 * on some environment (and once after boot?) 4620 */ 4621 if (reg != BGE_SRS_MODE) 4622 aprint_error_dev(sc->bge_dev, 4623 "block failed to stop: reg 0x%lx, bit 0x%08x\n", 4624 (u_long)reg, bit); 4625 } 4626 4627 /* 4628 * Stop the adapter and free any mbufs allocated to the 4629 * RX and TX lists. 4630 */ 4631 static void 4632 bge_stop(struct ifnet *ifp, int disable) 4633 { 4634 struct bge_softc *sc = ifp->if_softc; 4635 4636 callout_stop(&sc->bge_timeout); 4637 4638 /* 4639 * Tell firmware we're shutting down. 4640 */ 4641 bge_stop_fw(sc); 4642 bge_sig_pre_reset(sc, BGE_RESET_STOP); 4643 4644 /* Disable host interrupts. */ 4645 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 4646 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 4647 4648 /* 4649 * Disable all of the receiver blocks 4650 */ 4651 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 4652 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 4653 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 4654 if (BGE_IS_5700_FAMILY(sc)) 4655 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 4656 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 4657 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 4658 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 4659 4660 /* 4661 * Disable all of the transmit blocks 4662 */ 4663 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 4664 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 4665 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 4666 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 4667 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 4668 if (BGE_IS_5700_FAMILY(sc)) 4669 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 4670 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 4671 4672 /* 4673 * Shut down all of the memory managers and related 4674 * state machines. 4675 */ 4676 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 4677 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 4678 if (BGE_IS_5700_FAMILY(sc)) 4679 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 4680 4681 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 4682 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 4683 4684 if (BGE_IS_5700_FAMILY(sc)) { 4685 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 4686 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 4687 } 4688 4689 bge_reset(sc); 4690 bge_sig_legacy(sc, BGE_RESET_STOP); 4691 bge_sig_post_reset(sc, BGE_RESET_STOP); 4692 4693 /* 4694 * Keep the ASF firmware running if up. 4695 */ 4696 if (sc->bge_asf_mode & ASF_STACKUP) 4697 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4698 else 4699 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4700 4701 /* Free the RX lists. */ 4702 bge_free_rx_ring_std(sc); 4703 4704 /* Free jumbo RX list. */ 4705 if (BGE_IS_JUMBO_CAPABLE(sc)) 4706 bge_free_rx_ring_jumbo(sc); 4707 4708 /* Free TX buffers. */ 4709 bge_free_tx_ring(sc); 4710 4711 /* 4712 * Isolate/power down the PHY. 4713 */ 4714 if (!(sc->bge_flags & BGE_PHY_FIBER_TBI)) 4715 mii_down(&sc->bge_mii); 4716 4717 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 4718 4719 /* Clear MAC's link state (PHY may still have link UP). */ 4720 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 4721 4722 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 4723 } 4724 4725 static void 4726 bge_link_upd(struct bge_softc *sc) 4727 { 4728 struct ifnet *ifp = &sc->ethercom.ec_if; 4729 struct mii_data *mii = &sc->bge_mii; 4730 uint32_t status; 4731 int link; 4732 4733 /* Clear 'pending link event' flag */ 4734 BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT); 4735 4736 /* 4737 * Process link state changes. 4738 * Grrr. The link status word in the status block does 4739 * not work correctly on the BCM5700 rev AX and BX chips, 4740 * according to all available information. Hence, we have 4741 * to enable MII interrupts in order to properly obtain 4742 * async link changes. Unfortunately, this also means that 4743 * we have to read the MAC status register to detect link 4744 * changes, thereby adding an additional register access to 4745 * the interrupt handler. 4746 */ 4747 4748 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) { 4749 status = CSR_READ_4(sc, BGE_MAC_STS); 4750 if (status & BGE_MACSTAT_MI_INTERRUPT) { 4751 mii_pollstat(mii); 4752 4753 if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 4754 mii->mii_media_status & IFM_ACTIVE && 4755 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 4756 BGE_STS_SETBIT(sc, BGE_STS_LINK); 4757 else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 4758 (!(mii->mii_media_status & IFM_ACTIVE) || 4759 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 4760 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 4761 4762 /* Clear the interrupt */ 4763 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 4764 BGE_EVTENB_MI_INTERRUPT); 4765 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); 4766 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, 4767 BRGPHY_INTRS); 4768 } 4769 return; 4770 } 4771 4772 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 4773 status = CSR_READ_4(sc, BGE_MAC_STS); 4774 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 4775 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) { 4776 BGE_STS_SETBIT(sc, BGE_STS_LINK); 4777 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 4778 BGE_CLRBIT(sc, BGE_MAC_MODE, 4779 BGE_MACMODE_TBI_SEND_CFGS); 4780 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 4781 if_link_state_change(ifp, LINK_STATE_UP); 4782 } 4783 } else if (BGE_STS_BIT(sc, BGE_STS_LINK)) { 4784 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 4785 if_link_state_change(ifp, LINK_STATE_DOWN); 4786 } 4787 /* 4788 * Discard link events for MII/GMII cards if MI auto-polling disabled. 4789 * This should not happen since mii callouts are locked now, but 4790 * we keep this check for debug. 4791 */ 4792 } else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) { 4793 /* 4794 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED 4795 * bit in status word always set. Workaround this bug by 4796 * reading PHY link status directly. 4797 */ 4798 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)? 4799 BGE_STS_LINK : 0; 4800 4801 if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) { 4802 mii_pollstat(mii); 4803 4804 if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 4805 mii->mii_media_status & IFM_ACTIVE && 4806 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 4807 BGE_STS_SETBIT(sc, BGE_STS_LINK); 4808 else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 4809 (!(mii->mii_media_status & IFM_ACTIVE) || 4810 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 4811 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 4812 } 4813 } 4814 4815 /* Clear the attention */ 4816 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 4817 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 4818 BGE_MACSTAT_LINK_CHANGED); 4819 } 4820 4821 static int 4822 sysctl_bge_verify(SYSCTLFN_ARGS) 4823 { 4824 int error, t; 4825 struct sysctlnode node; 4826 4827 node = *rnode; 4828 t = *(int*)rnode->sysctl_data; 4829 node.sysctl_data = &t; 4830 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 4831 if (error || newp == NULL) 4832 return error; 4833 4834 #if 0 4835 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t, 4836 node.sysctl_num, rnode->sysctl_num)); 4837 #endif 4838 4839 if (node.sysctl_num == bge_rxthresh_nodenum) { 4840 if (t < 0 || t >= NBGE_RX_THRESH) 4841 return EINVAL; 4842 bge_update_all_threshes(t); 4843 } else 4844 return EINVAL; 4845 4846 *(int*)rnode->sysctl_data = t; 4847 4848 return 0; 4849 } 4850 4851 /* 4852 * Set up sysctl(3) MIB, hw.bge.*. 4853 */ 4854 static void 4855 sysctl_bge_init(struct bge_softc *sc) 4856 { 4857 int rc, bge_root_num; 4858 const struct sysctlnode *node; 4859 4860 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, NULL, 4861 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL, 4862 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) { 4863 goto err; 4864 } 4865 4866 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node, 4867 0, CTLTYPE_NODE, "bge", 4868 SYSCTL_DESCR("BGE interface controls"), 4869 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 4870 goto err; 4871 } 4872 4873 bge_root_num = node->sysctl_num; 4874 4875 /* BGE Rx interrupt mitigation level */ 4876 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node, 4877 CTLFLAG_READWRITE, 4878 CTLTYPE_INT, "rx_lvl", 4879 SYSCTL_DESCR("BGE receive interrupt mitigation level"), 4880 sysctl_bge_verify, 0, 4881 &bge_rx_thresh_lvl, 4882 0, CTL_HW, bge_root_num, CTL_CREATE, 4883 CTL_EOL)) != 0) { 4884 goto err; 4885 } 4886 4887 bge_rxthresh_nodenum = node->sysctl_num; 4888 4889 return; 4890 4891 err: 4892 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc); 4893 } 4894 4895 #ifdef BGE_DEBUG 4896 void 4897 bge_debug_info(struct bge_softc *sc) 4898 { 4899 4900 printf("Hardware Flags:\n"); 4901 if (BGE_IS_5755_PLUS(sc)) 4902 printf(" - 5755 Plus\n"); 4903 if (BGE_IS_5750_OR_BEYOND(sc)) 4904 printf(" - 5750 Plus\n"); 4905 if (BGE_IS_5705_PLUS(sc)) 4906 printf(" - 5705 Plus\n"); 4907 if (BGE_IS_5714_FAMILY(sc)) 4908 printf(" - 5714 Family\n"); 4909 if (BGE_IS_5700_FAMILY(sc)) 4910 printf(" - 5700 Family\n"); 4911 if (sc->bge_flags & BGE_IS_5788) 4912 printf(" - 5788\n"); 4913 if (sc->bge_flags & BGE_JUMBO_CAPABLE) 4914 printf(" - Supports Jumbo Frames\n"); 4915 if (sc->bge_flags & BGE_NO_EEPROM) 4916 printf(" - No EEPROM\n"); 4917 if (sc->bge_flags & BGE_PCIX) 4918 printf(" - PCI-X Bus\n"); 4919 if (sc->bge_flags & BGE_PCIE) 4920 printf(" - PCI Express Bus\n"); 4921 if (sc->bge_flags & BGE_NO_3LED) 4922 printf(" - No 3 LEDs\n"); 4923 if (sc->bge_flags & BGE_RX_ALIGNBUG) 4924 printf(" - RX Alignment Bug\n"); 4925 if (sc->bge_flags & BGE_TSO) 4926 printf(" - TSO\n"); 4927 } 4928 #endif /* BGE_DEBUG */ 4929 4930 static int 4931 bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]) 4932 { 4933 prop_dictionary_t dict; 4934 prop_data_t ea; 4935 4936 if ((sc->bge_flags & BGE_NO_EEPROM) == 0) 4937 return 1; 4938 4939 dict = device_properties(sc->bge_dev); 4940 ea = prop_dictionary_get(dict, "mac-address"); 4941 if (ea != NULL) { 4942 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 4943 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 4944 memcpy(ether_addr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN); 4945 return 0; 4946 } 4947 4948 return 1; 4949 } 4950 4951 static int 4952 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[]) 4953 { 4954 uint32_t mac_addr; 4955 4956 mac_addr = bge_readmem_ind(sc, 0x0c14); 4957 if ((mac_addr >> 16) == 0x484b) { 4958 ether_addr[0] = (uint8_t)(mac_addr >> 8); 4959 ether_addr[1] = (uint8_t)mac_addr; 4960 mac_addr = bge_readmem_ind(sc, 0x0c18); 4961 ether_addr[2] = (uint8_t)(mac_addr >> 24); 4962 ether_addr[3] = (uint8_t)(mac_addr >> 16); 4963 ether_addr[4] = (uint8_t)(mac_addr >> 8); 4964 ether_addr[5] = (uint8_t)mac_addr; 4965 return 0; 4966 } 4967 return 1; 4968 } 4969 4970 static int 4971 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[]) 4972 { 4973 int mac_offset = BGE_EE_MAC_OFFSET; 4974 4975 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 4976 mac_offset = BGE_EE_MAC_OFFSET_5906; 4977 4978 return (bge_read_nvram(sc, ether_addr, mac_offset + 2, 4979 ETHER_ADDR_LEN)); 4980 } 4981 4982 static int 4983 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[]) 4984 { 4985 4986 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 4987 return 1; 4988 4989 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, 4990 ETHER_ADDR_LEN)); 4991 } 4992 4993 static int 4994 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[]) 4995 { 4996 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = { 4997 /* NOTE: Order is critical */ 4998 bge_get_eaddr_fw, 4999 bge_get_eaddr_mem, 5000 bge_get_eaddr_nvram, 5001 bge_get_eaddr_eeprom, 5002 NULL 5003 }; 5004 const bge_eaddr_fcn_t *func; 5005 5006 for (func = bge_eaddr_funcs; *func != NULL; ++func) { 5007 if ((*func)(sc, eaddr) == 0) 5008 break; 5009 } 5010 return (*func == NULL ? ENXIO : 0); 5011 } 5012