1 /* $NetBSD: if_bge.c,v 1.180 2010/02/03 15:36:36 msaitoh Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2001 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $ 36 */ 37 38 /* 39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD. 40 * 41 * NetBSD version by: 42 * 43 * Frank van der Linden <fvdl@wasabisystems.com> 44 * Jason Thorpe <thorpej@wasabisystems.com> 45 * Jonathan Stone <jonathan@dsg.stanford.edu> 46 * 47 * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com> 48 * Senior Engineer, Wind River Systems 49 */ 50 51 /* 52 * The Broadcom BCM5700 is based on technology originally developed by 53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 54 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 57 * frames, highly configurable RX filtering, and 16 RX and TX queues 58 * (which, along with RX filter rules, can be used for QOS applications). 59 * Other features, such as TCP segmentation, may be available as part 60 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 61 * firmware images can be stored in hardware and need not be compiled 62 * into the driver. 63 * 64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. 66 * 67 * The BCM5701 is a single-chip solution incorporating both the BCM5700 68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 69 * does not support external SSRAM. 70 * 71 * Broadcom also produces a variation of the BCM5700 under the "Altima" 72 * brand name, which is functionally similar but lacks PCI-X support. 73 * 74 * Without external SSRAM, you can only have at most 4 TX rings, 75 * and the use of the mini RX ring is disabled. This seems to imply 76 * that these features are simply not available on the BCM5701. As a 77 * result, this driver does not implement any support for the mini RX 78 * ring. 79 */ 80 81 #include <sys/cdefs.h> 82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.180 2010/02/03 15:36:36 msaitoh Exp $"); 83 84 #include "vlan.h" 85 #include "rnd.h" 86 87 #include <sys/param.h> 88 #include <sys/systm.h> 89 #include <sys/callout.h> 90 #include <sys/sockio.h> 91 #include <sys/mbuf.h> 92 #include <sys/malloc.h> 93 #include <sys/kernel.h> 94 #include <sys/device.h> 95 #include <sys/socket.h> 96 #include <sys/sysctl.h> 97 98 #include <net/if.h> 99 #include <net/if_dl.h> 100 #include <net/if_media.h> 101 #include <net/if_ether.h> 102 103 #if NRND > 0 104 #include <sys/rnd.h> 105 #endif 106 107 #ifdef INET 108 #include <netinet/in.h> 109 #include <netinet/in_systm.h> 110 #include <netinet/in_var.h> 111 #include <netinet/ip.h> 112 #endif 113 114 /* Headers for TCP Segmentation Offload (TSO) */ 115 #include <netinet/in_systm.h> /* n_time for <netinet/ip.h>... */ 116 #include <netinet/in.h> /* ip_{src,dst}, for <netinet/ip.h> */ 117 #include <netinet/ip.h> /* for struct ip */ 118 #include <netinet/tcp.h> /* for struct tcphdr */ 119 120 121 #include <net/bpf.h> 122 123 #include <dev/pci/pcireg.h> 124 #include <dev/pci/pcivar.h> 125 #include <dev/pci/pcidevs.h> 126 127 #include <dev/mii/mii.h> 128 #include <dev/mii/miivar.h> 129 #include <dev/mii/miidevs.h> 130 #include <dev/mii/brgphyreg.h> 131 132 #include <dev/pci/if_bgereg.h> 133 #include <dev/pci/if_bgevar.h> 134 135 #include <uvm/uvm_extern.h> 136 #include <prop/proplib.h> 137 138 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 139 140 141 /* 142 * Tunable thresholds for rx-side bge interrupt mitigation. 143 */ 144 145 /* 146 * The pairs of values below were obtained from empirical measurement 147 * on bcm5700 rev B2; they ar designed to give roughly 1 receive 148 * interrupt for every N packets received, where N is, approximately, 149 * the second value (rx_max_bds) in each pair. The values are chosen 150 * such that moving from one pair to the succeeding pair was observed 151 * to roughly halve interrupt rate under sustained input packet load. 152 * The values were empirically chosen to avoid overflowing internal 153 * limits on the bcm5700: inreasing rx_ticks much beyond 600 154 * results in internal wrapping and higher interrupt rates. 155 * The limit of 46 frames was chosen to match NFS workloads. 156 * 157 * These values also work well on bcm5701, bcm5704C, and (less 158 * tested) bcm5703. On other chipsets, (including the Altima chip 159 * family), the larger values may overflow internal chip limits, 160 * leading to increasing interrupt rates rather than lower interrupt 161 * rates. 162 * 163 * Applications using heavy interrupt mitigation (interrupting every 164 * 32 or 46 frames) in both directions may need to increase the TCP 165 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain 166 * full link bandwidth, due to ACKs and window updates lingering 167 * in the RX queue during the 30-to-40-frame interrupt-mitigation window. 168 */ 169 static const struct bge_load_rx_thresh { 170 int rx_ticks; 171 int rx_max_bds; } 172 bge_rx_threshes[] = { 173 { 32, 2 }, 174 { 50, 4 }, 175 { 100, 8 }, 176 { 192, 16 }, 177 { 416, 32 }, 178 { 598, 46 } 179 }; 180 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0])) 181 182 /* XXX patchable; should be sysctl'able */ 183 static int bge_auto_thresh = 1; 184 static int bge_rx_thresh_lvl; 185 186 static int bge_rxthresh_nodenum; 187 188 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]); 189 190 static int bge_probe(device_t, cfdata_t, void *); 191 static void bge_attach(device_t, device_t, void *); 192 static void bge_release_resources(struct bge_softc *); 193 194 static int bge_get_eaddr_fw(struct bge_softc *, uint8_t[]); 195 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]); 196 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]); 197 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]); 198 static int bge_get_eaddr(struct bge_softc *, uint8_t[]); 199 200 static void bge_txeof(struct bge_softc *); 201 static void bge_rxeof(struct bge_softc *); 202 203 static void bge_asf_driver_up (struct bge_softc *); 204 static void bge_tick(void *); 205 static void bge_stats_update(struct bge_softc *); 206 static void bge_stats_update_regs(struct bge_softc *); 207 static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *); 208 209 static int bge_intr(void *); 210 static void bge_start(struct ifnet *); 211 static int bge_ioctl(struct ifnet *, u_long, void *); 212 static int bge_init(struct ifnet *); 213 static void bge_stop(struct ifnet *, int); 214 static void bge_watchdog(struct ifnet *); 215 static int bge_ifmedia_upd(struct ifnet *); 216 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 217 218 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *); 219 static int bge_read_nvram(struct bge_softc *, uint8_t *, int, int); 220 221 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); 222 static int bge_read_eeprom(struct bge_softc *, void *, int, int); 223 static void bge_setmulti(struct bge_softc *); 224 225 static void bge_handle_events(struct bge_softc *); 226 static int bge_alloc_jumbo_mem(struct bge_softc *); 227 #if 0 /* XXX */ 228 static void bge_free_jumbo_mem(struct bge_softc *); 229 #endif 230 static void *bge_jalloc(struct bge_softc *); 231 static void bge_jfree(struct mbuf *, void *, size_t, void *); 232 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, 233 bus_dmamap_t); 234 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 235 static int bge_init_rx_ring_std(struct bge_softc *); 236 static void bge_free_rx_ring_std(struct bge_softc *); 237 static int bge_init_rx_ring_jumbo(struct bge_softc *); 238 static void bge_free_rx_ring_jumbo(struct bge_softc *); 239 static void bge_free_tx_ring(struct bge_softc *); 240 static int bge_init_tx_ring(struct bge_softc *); 241 242 static int bge_chipinit(struct bge_softc *); 243 static int bge_blockinit(struct bge_softc *); 244 static int bge_setpowerstate(struct bge_softc *, int); 245 static uint32_t bge_readmem_ind(struct bge_softc *, int); 246 static void bge_writemem_ind(struct bge_softc *, int, int); 247 static void bge_writembx(struct bge_softc *, int, int); 248 static void bge_writemem_direct(struct bge_softc *, int, int); 249 static void bge_writereg_ind(struct bge_softc *, int, int); 250 static void bge_set_max_readrq(struct bge_softc *); 251 252 static int bge_miibus_readreg(device_t, int, int); 253 static void bge_miibus_writereg(device_t, int, int, int); 254 static void bge_miibus_statchg(device_t); 255 256 #define BGE_RESET_START 1 257 #define BGE_RESET_STOP 2 258 static void bge_sig_post_reset(struct bge_softc *, int); 259 static void bge_sig_legacy(struct bge_softc *, int); 260 static void bge_sig_pre_reset(struct bge_softc *, int); 261 static void bge_stop_fw(struct bge_softc *); 262 static int bge_reset(struct bge_softc *); 263 static void bge_link_upd(struct bge_softc *); 264 265 #ifdef BGE_DEBUG 266 #define DPRINTF(x) if (bgedebug) printf x 267 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x 268 #define BGE_TSO_PRINTF(x) do { if (bge_tso_debug) printf x ;} while (0) 269 int bgedebug = 0; 270 int bge_tso_debug = 0; 271 void bge_debug_info(struct bge_softc *); 272 #else 273 #define DPRINTF(x) 274 #define DPRINTFN(n,x) 275 #define BGE_TSO_PRINTF(x) 276 #endif 277 278 #ifdef BGE_EVENT_COUNTERS 279 #define BGE_EVCNT_INCR(ev) (ev).ev_count++ 280 #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val) 281 #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val) 282 #else 283 #define BGE_EVCNT_INCR(ev) /* nothing */ 284 #define BGE_EVCNT_ADD(ev, val) /* nothing */ 285 #define BGE_EVCNT_UPD(ev, val) /* nothing */ 286 #endif 287 288 static const struct bge_product { 289 pci_vendor_id_t bp_vendor; 290 pci_product_id_t bp_product; 291 const char *bp_name; 292 } bge_products[] = { 293 /* 294 * The BCM5700 documentation seems to indicate that the hardware 295 * still has the Alteon vendor ID burned into it, though it 296 * should always be overridden by the value in the EEPROM. We'll 297 * check for it anyway. 298 */ 299 { PCI_VENDOR_ALTEON, 300 PCI_PRODUCT_ALTEON_BCM5700, 301 "Broadcom BCM5700 Gigabit Ethernet", 302 }, 303 { PCI_VENDOR_ALTEON, 304 PCI_PRODUCT_ALTEON_BCM5701, 305 "Broadcom BCM5701 Gigabit Ethernet", 306 }, 307 { PCI_VENDOR_ALTIMA, 308 PCI_PRODUCT_ALTIMA_AC1000, 309 "Altima AC1000 Gigabit Ethernet", 310 }, 311 { PCI_VENDOR_ALTIMA, 312 PCI_PRODUCT_ALTIMA_AC1001, 313 "Altima AC1001 Gigabit Ethernet", 314 }, 315 { PCI_VENDOR_ALTIMA, 316 PCI_PRODUCT_ALTIMA_AC9100, 317 "Altima AC9100 Gigabit Ethernet", 318 }, 319 { PCI_VENDOR_BROADCOM, 320 PCI_PRODUCT_BROADCOM_BCM5700, 321 "Broadcom BCM5700 Gigabit Ethernet", 322 }, 323 { PCI_VENDOR_BROADCOM, 324 PCI_PRODUCT_BROADCOM_BCM5701, 325 "Broadcom BCM5701 Gigabit Ethernet", 326 }, 327 { PCI_VENDOR_BROADCOM, 328 PCI_PRODUCT_BROADCOM_BCM5702, 329 "Broadcom BCM5702 Gigabit Ethernet", 330 }, 331 { PCI_VENDOR_BROADCOM, 332 PCI_PRODUCT_BROADCOM_BCM5702X, 333 "Broadcom BCM5702X Gigabit Ethernet" }, 334 { PCI_VENDOR_BROADCOM, 335 PCI_PRODUCT_BROADCOM_BCM5703, 336 "Broadcom BCM5703 Gigabit Ethernet", 337 }, 338 { PCI_VENDOR_BROADCOM, 339 PCI_PRODUCT_BROADCOM_BCM5703X, 340 "Broadcom BCM5703X Gigabit Ethernet", 341 }, 342 { PCI_VENDOR_BROADCOM, 343 PCI_PRODUCT_BROADCOM_BCM5703_ALT, 344 "Broadcom BCM5703 Gigabit Ethernet", 345 }, 346 { PCI_VENDOR_BROADCOM, 347 PCI_PRODUCT_BROADCOM_BCM5704C, 348 "Broadcom BCM5704C Dual Gigabit Ethernet", 349 }, 350 { PCI_VENDOR_BROADCOM, 351 PCI_PRODUCT_BROADCOM_BCM5704S, 352 "Broadcom BCM5704S Dual Gigabit Ethernet", 353 }, 354 { PCI_VENDOR_BROADCOM, 355 PCI_PRODUCT_BROADCOM_BCM5705, 356 "Broadcom BCM5705 Gigabit Ethernet", 357 }, 358 { PCI_VENDOR_BROADCOM, 359 PCI_PRODUCT_BROADCOM_BCM5705F, 360 "Broadcom BCM5705F Gigabit Ethernet", 361 }, 362 { PCI_VENDOR_BROADCOM, 363 PCI_PRODUCT_BROADCOM_BCM5705K, 364 "Broadcom BCM5705K Gigabit Ethernet", 365 }, 366 { PCI_VENDOR_BROADCOM, 367 PCI_PRODUCT_BROADCOM_BCM5705M, 368 "Broadcom BCM5705M Gigabit Ethernet", 369 }, 370 { PCI_VENDOR_BROADCOM, 371 PCI_PRODUCT_BROADCOM_BCM5705M_ALT, 372 "Broadcom BCM5705M Gigabit Ethernet", 373 }, 374 { PCI_VENDOR_BROADCOM, 375 PCI_PRODUCT_BROADCOM_BCM5714, 376 "Broadcom BCM5714 Gigabit Ethernet", 377 }, 378 { PCI_VENDOR_BROADCOM, 379 PCI_PRODUCT_BROADCOM_BCM5714S, 380 "Broadcom BCM5714S Gigabit Ethernet", 381 }, 382 { PCI_VENDOR_BROADCOM, 383 PCI_PRODUCT_BROADCOM_BCM5715, 384 "Broadcom BCM5715 Gigabit Ethernet", 385 }, 386 { PCI_VENDOR_BROADCOM, 387 PCI_PRODUCT_BROADCOM_BCM5715S, 388 "Broadcom BCM5715S Gigabit Ethernet", 389 }, 390 { PCI_VENDOR_BROADCOM, 391 PCI_PRODUCT_BROADCOM_BCM5717, 392 "Broadcom BCM5717 Gigabit Ethernet", 393 }, 394 { PCI_VENDOR_BROADCOM, 395 PCI_PRODUCT_BROADCOM_BCM5718, 396 "Broadcom BCM5718 Gigabit Ethernet", 397 }, 398 { PCI_VENDOR_BROADCOM, 399 PCI_PRODUCT_BROADCOM_BCM5720, 400 "Broadcom BCM5720 Gigabit Ethernet", 401 }, 402 { PCI_VENDOR_BROADCOM, 403 PCI_PRODUCT_BROADCOM_BCM5721, 404 "Broadcom BCM5721 Gigabit Ethernet", 405 }, 406 { PCI_VENDOR_BROADCOM, 407 PCI_PRODUCT_BROADCOM_BCM5722, 408 "Broadcom BCM5722 Gigabit Ethernet", 409 }, 410 { PCI_VENDOR_BROADCOM, 411 PCI_PRODUCT_BROADCOM_BCM5723, 412 "Broadcom BCM5723 Gigabit Ethernet", 413 }, 414 { PCI_VENDOR_BROADCOM, 415 PCI_PRODUCT_BROADCOM_BCM5724, 416 "Broadcom BCM5724 Gigabit Ethernet", 417 }, 418 { PCI_VENDOR_BROADCOM, 419 PCI_PRODUCT_BROADCOM_BCM5750, 420 "Broadcom BCM5750 Gigabit Ethernet", 421 }, 422 { PCI_VENDOR_BROADCOM, 423 PCI_PRODUCT_BROADCOM_BCM5750M, 424 "Broadcom BCM5750M Gigabit Ethernet", 425 }, 426 { PCI_VENDOR_BROADCOM, 427 PCI_PRODUCT_BROADCOM_BCM5751, 428 "Broadcom BCM5751 Gigabit Ethernet", 429 }, 430 { PCI_VENDOR_BROADCOM, 431 PCI_PRODUCT_BROADCOM_BCM5751F, 432 "Broadcom BCM5751F Gigabit Ethernet", 433 }, 434 { PCI_VENDOR_BROADCOM, 435 PCI_PRODUCT_BROADCOM_BCM5751M, 436 "Broadcom BCM5751M Gigabit Ethernet", 437 }, 438 { PCI_VENDOR_BROADCOM, 439 PCI_PRODUCT_BROADCOM_BCM5752, 440 "Broadcom BCM5752 Gigabit Ethernet", 441 }, 442 { PCI_VENDOR_BROADCOM, 443 PCI_PRODUCT_BROADCOM_BCM5752M, 444 "Broadcom BCM5752M Gigabit Ethernet", 445 }, 446 { PCI_VENDOR_BROADCOM, 447 PCI_PRODUCT_BROADCOM_BCM5753, 448 "Broadcom BCM5753 Gigabit Ethernet", 449 }, 450 { PCI_VENDOR_BROADCOM, 451 PCI_PRODUCT_BROADCOM_BCM5753F, 452 "Broadcom BCM5753F Gigabit Ethernet", 453 }, 454 { PCI_VENDOR_BROADCOM, 455 PCI_PRODUCT_BROADCOM_BCM5753M, 456 "Broadcom BCM5753M Gigabit Ethernet", 457 }, 458 { PCI_VENDOR_BROADCOM, 459 PCI_PRODUCT_BROADCOM_BCM5754, 460 "Broadcom BCM5754 Gigabit Ethernet", 461 }, 462 { PCI_VENDOR_BROADCOM, 463 PCI_PRODUCT_BROADCOM_BCM5754M, 464 "Broadcom BCM5754M Gigabit Ethernet", 465 }, 466 { PCI_VENDOR_BROADCOM, 467 PCI_PRODUCT_BROADCOM_BCM5755, 468 "Broadcom BCM5755 Gigabit Ethernet", 469 }, 470 { PCI_VENDOR_BROADCOM, 471 PCI_PRODUCT_BROADCOM_BCM5755M, 472 "Broadcom BCM5755M Gigabit Ethernet", 473 }, 474 { PCI_VENDOR_BROADCOM, 475 PCI_PRODUCT_BROADCOM_BCM5756, 476 "Broadcom BCM5756 Gigabit Ethernet", 477 }, 478 { PCI_VENDOR_BROADCOM, 479 PCI_PRODUCT_BROADCOM_BCM5761, 480 "Broadcom BCM5761 Gigabit Ethernet", 481 }, 482 { PCI_VENDOR_BROADCOM, 483 PCI_PRODUCT_BROADCOM_BCM5761E, 484 "Broadcom BCM5761E Gigabit Ethernet", 485 }, 486 { PCI_VENDOR_BROADCOM, 487 PCI_PRODUCT_BROADCOM_BCM5761S, 488 "Broadcom BCM5761S Gigabit Ethernet", 489 }, 490 { PCI_VENDOR_BROADCOM, 491 PCI_PRODUCT_BROADCOM_BCM5761SE, 492 "Broadcom BCM5761SE Gigabit Ethernet", 493 }, 494 { PCI_VENDOR_BROADCOM, 495 PCI_PRODUCT_BROADCOM_BCM5764, 496 "Broadcom BCM5764 Gigabit Ethernet", 497 }, 498 { PCI_VENDOR_BROADCOM, 499 PCI_PRODUCT_BROADCOM_BCM5780, 500 "Broadcom BCM5780 Gigabit Ethernet", 501 }, 502 { PCI_VENDOR_BROADCOM, 503 PCI_PRODUCT_BROADCOM_BCM5780S, 504 "Broadcom BCM5780S Gigabit Ethernet", 505 }, 506 { PCI_VENDOR_BROADCOM, 507 PCI_PRODUCT_BROADCOM_BCM5781, 508 "Broadcom BCM5781 Gigabit Ethernet", 509 }, 510 { PCI_VENDOR_BROADCOM, 511 PCI_PRODUCT_BROADCOM_BCM5782, 512 "Broadcom BCM5782 Gigabit Ethernet", 513 }, 514 { PCI_VENDOR_BROADCOM, 515 PCI_PRODUCT_BROADCOM_BCM5784M, 516 "BCM5784M NetLink 1000baseT Ethernet", 517 }, 518 { PCI_VENDOR_BROADCOM, 519 PCI_PRODUCT_BROADCOM_BCM5786, 520 "Broadcom BCM5786 Gigabit Ethernet", 521 }, 522 { PCI_VENDOR_BROADCOM, 523 PCI_PRODUCT_BROADCOM_BCM5787, 524 "Broadcom BCM5787 Gigabit Ethernet", 525 }, 526 { PCI_VENDOR_BROADCOM, 527 PCI_PRODUCT_BROADCOM_BCM5787M, 528 "Broadcom BCM5787M Gigabit Ethernet", 529 }, 530 { PCI_VENDOR_BROADCOM, 531 PCI_PRODUCT_BROADCOM_BCM5788, 532 "Broadcom BCM5788 Gigabit Ethernet", 533 }, 534 { PCI_VENDOR_BROADCOM, 535 PCI_PRODUCT_BROADCOM_BCM5789, 536 "Broadcom BCM5789 Gigabit Ethernet", 537 }, 538 { PCI_VENDOR_BROADCOM, 539 PCI_PRODUCT_BROADCOM_BCM5901, 540 "Broadcom BCM5901 Fast Ethernet", 541 }, 542 { PCI_VENDOR_BROADCOM, 543 PCI_PRODUCT_BROADCOM_BCM5901A2, 544 "Broadcom BCM5901A2 Fast Ethernet", 545 }, 546 { PCI_VENDOR_BROADCOM, 547 PCI_PRODUCT_BROADCOM_BCM5903M, 548 "Broadcom BCM5903M Fast Ethernet", 549 }, 550 { PCI_VENDOR_BROADCOM, 551 PCI_PRODUCT_BROADCOM_BCM5906, 552 "Broadcom BCM5906 Fast Ethernet", 553 }, 554 { PCI_VENDOR_BROADCOM, 555 PCI_PRODUCT_BROADCOM_BCM5906M, 556 "Broadcom BCM5906M Fast Ethernet", 557 }, 558 { PCI_VENDOR_BROADCOM, 559 PCI_PRODUCT_BROADCOM_BCM57760, 560 "Broadcom BCM57760 Fast Ethernet", 561 }, 562 { PCI_VENDOR_BROADCOM, 563 PCI_PRODUCT_BROADCOM_BCM57761, 564 "Broadcom BCM57761 Fast Ethernet", 565 }, 566 { PCI_VENDOR_BROADCOM, 567 PCI_PRODUCT_BROADCOM_BCM57765, 568 "Broadcom BCM57765 Fast Ethernet", 569 }, 570 { PCI_VENDOR_BROADCOM, 571 PCI_PRODUCT_BROADCOM_BCM57780, 572 "Broadcom BCM57780 Fast Ethernet", 573 }, 574 { PCI_VENDOR_BROADCOM, 575 PCI_PRODUCT_BROADCOM_BCM57781, 576 "Broadcom BCM57781 Fast Ethernet", 577 }, 578 { PCI_VENDOR_BROADCOM, 579 PCI_PRODUCT_BROADCOM_BCM57785, 580 "Broadcom BCM57785 Fast Ethernet", 581 }, 582 { PCI_VENDOR_BROADCOM, 583 PCI_PRODUCT_BROADCOM_BCM57788, 584 "Broadcom BCM57788 Fast Ethernet", 585 }, 586 { PCI_VENDOR_BROADCOM, 587 PCI_PRODUCT_BROADCOM_BCM57790, 588 "Broadcom BCM57790 Fast Ethernet", 589 }, 590 { PCI_VENDOR_BROADCOM, 591 PCI_PRODUCT_BROADCOM_BCM57791, 592 "Broadcom BCM57791 Fast Ethernet", 593 }, 594 { PCI_VENDOR_BROADCOM, 595 PCI_PRODUCT_BROADCOM_BCM57795, 596 "Broadcom BCM57795 Fast Ethernet", 597 }, 598 { PCI_VENDOR_SCHNEIDERKOCH, 599 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1, 600 "SysKonnect SK-9Dx1 Gigabit Ethernet", 601 }, 602 { PCI_VENDOR_3COM, 603 PCI_PRODUCT_3COM_3C996, 604 "3Com 3c996 Gigabit Ethernet", 605 }, 606 { 0, 607 0, 608 NULL }, 609 }; 610 611 /* 612 * XXX: how to handle variants based on 5750 and derivatives: 613 * 5750 5751, 5721, possibly 5714, 5752, and 5708?, which 614 * in general behave like a 5705, except with additional quirks. 615 * This driver's current handling of the 5721 is wrong; 616 * how we map ASIC revision to "quirks" needs more thought. 617 * (defined here until the thought is done). 618 */ 619 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_5700_FAMILY) 620 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_5714_FAMILY) 621 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_5705_PLUS) 622 #define BGE_IS_5750_OR_BEYOND(sc) ((sc)->bge_flags & BGE_5750_PLUS) 623 #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_5755_PLUS) 624 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_JUMBO_CAPABLE) 625 626 static const struct bge_revision { 627 uint32_t br_chipid; 628 const char *br_name; 629 } bge_revisions[] = { 630 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, 631 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, 632 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, 633 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, 634 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, 635 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, 636 /* This is treated like a BCM5700 Bx */ 637 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, 638 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, 639 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, 640 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, 641 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, 642 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, 643 { BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" }, 644 { BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" }, 645 { BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" }, 646 { BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" }, 647 { BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" }, 648 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, 649 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, 650 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, 651 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, 652 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, 653 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, 654 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, 655 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, 656 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, 657 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, 658 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, 659 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, 660 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, 661 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, 662 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, 663 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, 664 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, 665 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, 666 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, 667 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, 668 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, 669 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, 670 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, 671 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, 672 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, 673 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" }, 674 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" }, 675 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" }, 676 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" }, 677 { BGE_CHIPID_BCM5755_C0, "BCM5755 C0" }, 678 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" }, 679 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" }, 680 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" }, 681 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" }, 682 /* 5754 and 5787 share the same ASIC ID */ 683 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" }, 684 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" }, 685 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" }, 686 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" }, 687 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" }, 688 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" }, 689 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" }, 690 691 { 0, NULL } 692 }; 693 694 /* 695 * Some defaults for major revisions, so that newer steppings 696 * that we don't know about have a shot at working. 697 */ 698 static const struct bge_revision bge_majorrevs[] = { 699 { BGE_ASICREV_BCM5700, "unknown BCM5700" }, 700 { BGE_ASICREV_BCM5701, "unknown BCM5701" }, 701 { BGE_ASICREV_BCM5703, "unknown BCM5703" }, 702 { BGE_ASICREV_BCM5704, "unknown BCM5704" }, 703 { BGE_ASICREV_BCM5705, "unknown BCM5705" }, 704 { BGE_ASICREV_BCM5750, "unknown BCM5750" }, 705 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, 706 { BGE_ASICREV_BCM5752, "unknown BCM5752" }, 707 { BGE_ASICREV_BCM5780, "unknown BCM5780" }, 708 { BGE_ASICREV_BCM5714, "unknown BCM5714" }, 709 { BGE_ASICREV_BCM5755, "unknown BCM5755" }, 710 { BGE_ASICREV_BCM5761, "unknown BCM5761" }, 711 { BGE_ASICREV_BCM5784, "unknown BCM5784" }, 712 { BGE_ASICREV_BCM5785, "unknown BCM5785" }, 713 /* 5754 and 5787 share the same ASIC ID */ 714 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" }, 715 { BGE_ASICREV_BCM5906, "unknown BCM5906" }, 716 { BGE_ASICREV_BCM57780, "unknown BCM57780" }, 717 { BGE_ASICREV_BCM5717, "unknown BCM5717" }, 718 { BGE_ASICREV_BCM57765, "unknown BCM57765" }, 719 720 { 0, NULL } 721 }; 722 723 static int bge_allow_asf = 1; 724 725 CFATTACH_DECL_NEW(bge, sizeof(struct bge_softc), 726 bge_probe, bge_attach, NULL, NULL); 727 728 static uint32_t 729 bge_readmem_ind(struct bge_softc *sc, int off) 730 { 731 pcireg_t val; 732 733 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off); 734 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA); 735 return val; 736 } 737 738 static void 739 bge_writemem_ind(struct bge_softc *sc, int off, int val) 740 { 741 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off); 742 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA, val); 743 } 744 745 /* 746 * PCI Express only 747 */ 748 static void 749 bge_set_max_readrq(struct bge_softc *sc) 750 { 751 device_t dev; 752 pcireg_t val; 753 754 dev = sc->bge_dev; 755 756 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap 757 + PCI_PCIE_DCSR); 758 if ((val & PCI_PCIE_DCSR_MAX_READ_REQ) != 759 BGE_PCIE_DEVCTL_MAX_READRQ_4096) { 760 printf("adjust device control 0x%04x ", 761 val); 762 val &= ~PCI_PCIE_DCSR_MAX_READ_REQ; 763 val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096; 764 pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap 765 + PCI_PCIE_DCSR, val); 766 printf("-> 0x%04x\n", val); 767 } 768 } 769 770 #ifdef notdef 771 static uint32_t 772 bge_readreg_ind(struct bge_softc *sc, int off) 773 { 774 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off); 775 return (pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA)); 776 } 777 #endif 778 779 static void 780 bge_writereg_ind(struct bge_softc *sc, int off, int val) 781 { 782 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off); 783 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA, val); 784 } 785 786 static void 787 bge_writemem_direct(struct bge_softc *sc, int off, int val) 788 { 789 CSR_WRITE_4(sc, off, val); 790 } 791 792 static void 793 bge_writembx(struct bge_softc *sc, int off, int val) 794 { 795 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 796 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; 797 798 CSR_WRITE_4(sc, off, val); 799 } 800 801 static uint8_t 802 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 803 { 804 uint32_t access, byte = 0; 805 int i; 806 807 /* Lock. */ 808 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 809 for (i = 0; i < 8000; i++) { 810 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) 811 break; 812 DELAY(20); 813 } 814 if (i == 8000) 815 return 1; 816 817 /* Enable access. */ 818 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); 819 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); 820 821 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); 822 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); 823 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 824 DELAY(10); 825 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { 826 DELAY(10); 827 break; 828 } 829 } 830 831 if (i == BGE_TIMEOUT * 10) { 832 aprint_error_dev(sc->bge_dev, "nvram read timed out\n"); 833 return 1; 834 } 835 836 /* Get result. */ 837 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); 838 839 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; 840 841 /* Disable access. */ 842 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); 843 844 /* Unlock. */ 845 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); 846 CSR_READ_4(sc, BGE_NVRAM_SWARB); 847 848 return 0; 849 } 850 851 /* 852 * Read a sequence of bytes from NVRAM. 853 */ 854 static int 855 bge_read_nvram(struct bge_softc *sc, uint8_t *dest, int off, int cnt) 856 { 857 int err = 0, i; 858 uint8_t byte = 0; 859 860 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906) 861 return 1; 862 863 for (i = 0; i < cnt; i++) { 864 err = bge_nvram_getbyte(sc, off + i, &byte); 865 if (err) 866 break; 867 *(dest + i) = byte; 868 } 869 870 return (err ? 1 : 0); 871 } 872 873 /* 874 * Read a byte of data stored in the EEPROM at address 'addr.' The 875 * BCM570x supports both the traditional bitbang interface and an 876 * auto access interface for reading the EEPROM. We use the auto 877 * access method. 878 */ 879 static uint8_t 880 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 881 { 882 int i; 883 uint32_t byte = 0; 884 885 /* 886 * Enable use of auto EEPROM access so we can avoid 887 * having to use the bitbang method. 888 */ 889 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 890 891 /* Reset the EEPROM, load the clock period. */ 892 CSR_WRITE_4(sc, BGE_EE_ADDR, 893 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 894 DELAY(20); 895 896 /* Issue the read EEPROM command. */ 897 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 898 899 /* Wait for completion */ 900 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 901 DELAY(10); 902 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 903 break; 904 } 905 906 if (i == BGE_TIMEOUT * 10) { 907 aprint_error_dev(sc->bge_dev, "eeprom read timed out\n"); 908 return 1; 909 } 910 911 /* Get result. */ 912 byte = CSR_READ_4(sc, BGE_EE_DATA); 913 914 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 915 916 return 0; 917 } 918 919 /* 920 * Read a sequence of bytes from the EEPROM. 921 */ 922 static int 923 bge_read_eeprom(struct bge_softc *sc, void *destv, int off, int cnt) 924 { 925 int err = 0, i; 926 uint8_t byte = 0; 927 char *dest = destv; 928 929 for (i = 0; i < cnt; i++) { 930 err = bge_eeprom_getbyte(sc, off + i, &byte); 931 if (err) 932 break; 933 *(dest + i) = byte; 934 } 935 936 return (err ? 1 : 0); 937 } 938 939 static int 940 bge_miibus_readreg(device_t dev, int phy, int reg) 941 { 942 struct bge_softc *sc = device_private(dev); 943 uint32_t val; 944 uint32_t autopoll; 945 int i; 946 947 /* 948 * Broadcom's own driver always assumes the internal 949 * PHY is at GMII address 1. On some chips, the PHY responds 950 * to accesses at all addresses, which could cause us to 951 * bogusly attach the PHY 32 times at probe type. Always 952 * restricting the lookup to address 1 is simpler than 953 * trying to figure out which chips revisions should be 954 * special-cased. 955 */ 956 if (phy != 1) 957 return 0; 958 959 /* Reading with autopolling on may trigger PCI errors */ 960 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 961 if (autopoll & BGE_MIMODE_AUTOPOLL) { 962 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL); 963 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 964 DELAY(40); 965 } 966 967 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | 968 BGE_MIPHY(phy) | BGE_MIREG(reg)); 969 970 for (i = 0; i < BGE_TIMEOUT; i++) { 971 val = CSR_READ_4(sc, BGE_MI_COMM); 972 if (!(val & BGE_MICOMM_BUSY)) 973 break; 974 delay(10); 975 } 976 977 if (i == BGE_TIMEOUT) { 978 aprint_error_dev(sc->bge_dev, "PHY read timed out\n"); 979 val = 0; 980 goto done; 981 } 982 983 val = CSR_READ_4(sc, BGE_MI_COMM); 984 985 done: 986 if (autopoll & BGE_MIMODE_AUTOPOLL) { 987 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 988 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 989 DELAY(40); 990 } 991 992 if (val & BGE_MICOMM_READFAIL) 993 return 0; 994 995 return (val & 0xFFFF); 996 } 997 998 static void 999 bge_miibus_writereg(device_t dev, int phy, int reg, int val) 1000 { 1001 struct bge_softc *sc = device_private(dev); 1002 uint32_t autopoll; 1003 int i; 1004 1005 if (phy!=1) { 1006 return; 1007 } 1008 1009 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 && 1010 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL)) { 1011 return; 1012 } 1013 1014 /* Reading with autopolling on may trigger PCI errors */ 1015 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 1016 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1017 delay(40); 1018 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL); 1019 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1020 delay(10); /* 40 usec is supposed to be adequate */ 1021 } 1022 1023 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | 1024 BGE_MIPHY(phy) | BGE_MIREG(reg) | val); 1025 1026 for (i = 0; i < BGE_TIMEOUT; i++) { 1027 delay(10); 1028 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { 1029 delay(5); 1030 CSR_READ_4(sc, BGE_MI_COMM); 1031 break; 1032 } 1033 } 1034 1035 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1036 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 1037 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1038 delay(40); 1039 } 1040 1041 if (i == BGE_TIMEOUT) 1042 aprint_error_dev(sc->bge_dev, "PHY read timed out\n"); 1043 } 1044 1045 static void 1046 bge_miibus_statchg(device_t dev) 1047 { 1048 struct bge_softc *sc = device_private(dev); 1049 struct mii_data *mii = &sc->bge_mii; 1050 1051 /* 1052 * Get flow control negotiation result. 1053 */ 1054 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1055 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) { 1056 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1057 mii->mii_media_active &= ~IFM_ETH_FMASK; 1058 } 1059 1060 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 1061 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 1062 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 1063 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 1064 else 1065 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 1066 1067 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 1068 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 1069 else 1070 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 1071 1072 /* 1073 * 802.3x flow control 1074 */ 1075 if (sc->bge_flowflags & IFM_ETH_RXPAUSE) 1076 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 1077 else 1078 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 1079 1080 if (sc->bge_flowflags & IFM_ETH_TXPAUSE) 1081 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 1082 else 1083 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 1084 } 1085 1086 /* 1087 * Update rx threshold levels to values in a particular slot 1088 * of the interrupt-mitigation table bge_rx_threshes. 1089 */ 1090 static void 1091 bge_set_thresh(struct ifnet *ifp, int lvl) 1092 { 1093 struct bge_softc *sc = ifp->if_softc; 1094 int s; 1095 1096 /* For now, just save the new Rx-intr thresholds and record 1097 * that a threshold update is pending. Updating the hardware 1098 * registers here (even at splhigh()) is observed to 1099 * occasionaly cause glitches where Rx-interrupts are not 1100 * honoured for up to 10 seconds. jonathan@NetBSD.org, 2003-04-05 1101 */ 1102 s = splnet(); 1103 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks; 1104 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds; 1105 sc->bge_pending_rxintr_change = 1; 1106 splx(s); 1107 1108 return; 1109 } 1110 1111 1112 /* 1113 * Update Rx thresholds of all bge devices 1114 */ 1115 static void 1116 bge_update_all_threshes(int lvl) 1117 { 1118 struct ifnet *ifp; 1119 const char * const namebuf = "bge"; 1120 int namelen; 1121 1122 if (lvl < 0) 1123 lvl = 0; 1124 else if (lvl >= NBGE_RX_THRESH) 1125 lvl = NBGE_RX_THRESH - 1; 1126 1127 namelen = strlen(namebuf); 1128 /* 1129 * Now search all the interfaces for this name/number 1130 */ 1131 IFNET_FOREACH(ifp) { 1132 if (strncmp(ifp->if_xname, namebuf, namelen) != 0) 1133 continue; 1134 /* We got a match: update if doing auto-threshold-tuning */ 1135 if (bge_auto_thresh) 1136 bge_set_thresh(ifp, lvl); 1137 } 1138 } 1139 1140 /* 1141 * Handle events that have triggered interrupts. 1142 */ 1143 static void 1144 bge_handle_events(struct bge_softc *sc) 1145 { 1146 1147 return; 1148 } 1149 1150 /* 1151 * Memory management for jumbo frames. 1152 */ 1153 1154 static int 1155 bge_alloc_jumbo_mem(struct bge_softc *sc) 1156 { 1157 char *ptr, *kva; 1158 bus_dma_segment_t seg; 1159 int i, rseg, state, error; 1160 struct bge_jpool_entry *entry; 1161 1162 state = error = 0; 1163 1164 /* Grab a big chunk o' storage. */ 1165 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0, 1166 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 1167 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n"); 1168 return ENOBUFS; 1169 } 1170 1171 state = 1; 1172 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, (void **)&kva, 1173 BUS_DMA_NOWAIT)) { 1174 aprint_error_dev(sc->bge_dev, 1175 "can't map DMA buffers (%d bytes)\n", (int)BGE_JMEM); 1176 error = ENOBUFS; 1177 goto out; 1178 } 1179 1180 state = 2; 1181 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0, 1182 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) { 1183 aprint_error_dev(sc->bge_dev, "can't create DMA map\n"); 1184 error = ENOBUFS; 1185 goto out; 1186 } 1187 1188 state = 3; 1189 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 1190 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) { 1191 aprint_error_dev(sc->bge_dev, "can't load DMA map\n"); 1192 error = ENOBUFS; 1193 goto out; 1194 } 1195 1196 state = 4; 1197 sc->bge_cdata.bge_jumbo_buf = (void *)kva; 1198 DPRINTFN(1,("bge_jumbo_buf = %p\n", sc->bge_cdata.bge_jumbo_buf)); 1199 1200 SLIST_INIT(&sc->bge_jfree_listhead); 1201 SLIST_INIT(&sc->bge_jinuse_listhead); 1202 1203 /* 1204 * Now divide it up into 9K pieces and save the addresses 1205 * in an array. 1206 */ 1207 ptr = sc->bge_cdata.bge_jumbo_buf; 1208 for (i = 0; i < BGE_JSLOTS; i++) { 1209 sc->bge_cdata.bge_jslots[i] = ptr; 1210 ptr += BGE_JLEN; 1211 entry = malloc(sizeof(struct bge_jpool_entry), 1212 M_DEVBUF, M_NOWAIT); 1213 if (entry == NULL) { 1214 aprint_error_dev(sc->bge_dev, 1215 "no memory for jumbo buffer queue!\n"); 1216 error = ENOBUFS; 1217 goto out; 1218 } 1219 entry->slot = i; 1220 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 1221 entry, jpool_entries); 1222 } 1223 out: 1224 if (error != 0) { 1225 switch (state) { 1226 case 4: 1227 bus_dmamap_unload(sc->bge_dmatag, 1228 sc->bge_cdata.bge_rx_jumbo_map); 1229 case 3: 1230 bus_dmamap_destroy(sc->bge_dmatag, 1231 sc->bge_cdata.bge_rx_jumbo_map); 1232 case 2: 1233 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 1234 case 1: 1235 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 1236 break; 1237 default: 1238 break; 1239 } 1240 } 1241 1242 return error; 1243 } 1244 1245 /* 1246 * Allocate a jumbo buffer. 1247 */ 1248 static void * 1249 bge_jalloc(struct bge_softc *sc) 1250 { 1251 struct bge_jpool_entry *entry; 1252 1253 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 1254 1255 if (entry == NULL) { 1256 aprint_error_dev(sc->bge_dev, "no free jumbo buffers\n"); 1257 return NULL; 1258 } 1259 1260 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 1261 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 1262 return (sc->bge_cdata.bge_jslots[entry->slot]); 1263 } 1264 1265 /* 1266 * Release a jumbo buffer. 1267 */ 1268 static void 1269 bge_jfree(struct mbuf *m, void *buf, size_t size, void *arg) 1270 { 1271 struct bge_jpool_entry *entry; 1272 struct bge_softc *sc; 1273 int i, s; 1274 1275 /* Extract the softc struct pointer. */ 1276 sc = (struct bge_softc *)arg; 1277 1278 if (sc == NULL) 1279 panic("bge_jfree: can't find softc pointer!"); 1280 1281 /* calculate the slot this buffer belongs to */ 1282 1283 i = ((char *)buf 1284 - (char *)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 1285 1286 if ((i < 0) || (i >= BGE_JSLOTS)) 1287 panic("bge_jfree: asked to free buffer that we don't manage!"); 1288 1289 s = splvm(); 1290 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 1291 if (entry == NULL) 1292 panic("bge_jfree: buffer not in use!"); 1293 entry->slot = i; 1294 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 1295 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 1296 1297 if (__predict_true(m != NULL)) 1298 pool_cache_put(mb_cache, m); 1299 splx(s); 1300 } 1301 1302 1303 /* 1304 * Intialize a standard receive ring descriptor. 1305 */ 1306 static int 1307 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m, 1308 bus_dmamap_t dmamap) 1309 { 1310 struct mbuf *m_new = NULL; 1311 struct bge_rx_bd *r; 1312 int error; 1313 1314 if (dmamap == NULL) { 1315 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, 1316 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap); 1317 if (error != 0) 1318 return error; 1319 } 1320 1321 sc->bge_cdata.bge_rx_std_map[i] = dmamap; 1322 1323 if (m == NULL) { 1324 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1325 if (m_new == NULL) 1326 return ENOBUFS; 1327 1328 MCLGET(m_new, M_DONTWAIT); 1329 if (!(m_new->m_flags & M_EXT)) { 1330 m_freem(m_new); 1331 return ENOBUFS; 1332 } 1333 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1334 1335 } else { 1336 m_new = m; 1337 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1338 m_new->m_data = m_new->m_ext.ext_buf; 1339 } 1340 if (!(sc->bge_flags & BGE_RX_ALIGNBUG)) 1341 m_adj(m_new, ETHER_ALIGN); 1342 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new, 1343 BUS_DMA_READ|BUS_DMA_NOWAIT)) 1344 return ENOBUFS; 1345 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 1346 BUS_DMASYNC_PREREAD); 1347 1348 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 1349 r = &sc->bge_rdata->bge_rx_std_ring[i]; 1350 BGE_HOSTADDR(r->bge_addr, dmamap->dm_segs[0].ds_addr); 1351 r->bge_flags = BGE_RXBDFLAG_END; 1352 r->bge_len = m_new->m_len; 1353 r->bge_idx = i; 1354 1355 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1356 offsetof(struct bge_ring_data, bge_rx_std_ring) + 1357 i * sizeof (struct bge_rx_bd), 1358 sizeof (struct bge_rx_bd), 1359 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1360 1361 return 0; 1362 } 1363 1364 /* 1365 * Initialize a jumbo receive ring descriptor. This allocates 1366 * a jumbo buffer from the pool managed internally by the driver. 1367 */ 1368 static int 1369 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m) 1370 { 1371 struct mbuf *m_new = NULL; 1372 struct bge_rx_bd *r; 1373 void *buf = NULL; 1374 1375 if (m == NULL) { 1376 1377 /* Allocate the mbuf. */ 1378 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1379 if (m_new == NULL) 1380 return ENOBUFS; 1381 1382 /* Allocate the jumbo buffer */ 1383 buf = bge_jalloc(sc); 1384 if (buf == NULL) { 1385 m_freem(m_new); 1386 aprint_error_dev(sc->bge_dev, 1387 "jumbo allocation failed -- packet dropped!\n"); 1388 return ENOBUFS; 1389 } 1390 1391 /* Attach the buffer to the mbuf. */ 1392 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 1393 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF, 1394 bge_jfree, sc); 1395 m_new->m_flags |= M_EXT_RW; 1396 } else { 1397 m_new = m; 1398 buf = m_new->m_data = m_new->m_ext.ext_buf; 1399 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 1400 } 1401 if (!(sc->bge_flags & BGE_RX_ALIGNBUG)) 1402 m_adj(m_new, ETHER_ALIGN); 1403 bus_dmamap_sync(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 1404 mtod(m_new, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, BGE_JLEN, 1405 BUS_DMASYNC_PREREAD); 1406 /* Set up the descriptor. */ 1407 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 1408 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 1409 BGE_HOSTADDR(r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new)); 1410 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 1411 r->bge_len = m_new->m_len; 1412 r->bge_idx = i; 1413 1414 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1415 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 1416 i * sizeof (struct bge_rx_bd), 1417 sizeof (struct bge_rx_bd), 1418 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1419 1420 return 0; 1421 } 1422 1423 /* 1424 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 1425 * that's 1MB or memory, which is a lot. For now, we fill only the first 1426 * 256 ring entries and hope that our CPU is fast enough to keep up with 1427 * the NIC. 1428 */ 1429 static int 1430 bge_init_rx_ring_std(struct bge_softc *sc) 1431 { 1432 int i; 1433 1434 if (sc->bge_flags & BGE_RXRING_VALID) 1435 return 0; 1436 1437 for (i = 0; i < BGE_SSLOTS; i++) { 1438 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 1439 return ENOBUFS; 1440 } 1441 1442 sc->bge_std = i - 1; 1443 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 1444 1445 sc->bge_flags |= BGE_RXRING_VALID; 1446 1447 return 0; 1448 } 1449 1450 static void 1451 bge_free_rx_ring_std(struct bge_softc *sc) 1452 { 1453 int i; 1454 1455 if (!(sc->bge_flags & BGE_RXRING_VALID)) 1456 return; 1457 1458 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1459 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 1460 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 1461 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 1462 bus_dmamap_destroy(sc->bge_dmatag, 1463 sc->bge_cdata.bge_rx_std_map[i]); 1464 } 1465 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0, 1466 sizeof(struct bge_rx_bd)); 1467 } 1468 1469 sc->bge_flags &= ~BGE_RXRING_VALID; 1470 } 1471 1472 static int 1473 bge_init_rx_ring_jumbo(struct bge_softc *sc) 1474 { 1475 int i; 1476 volatile struct bge_rcb *rcb; 1477 1478 if (sc->bge_flags & BGE_JUMBO_RXRING_VALID) 1479 return 0; 1480 1481 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1482 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 1483 return ENOBUFS; 1484 }; 1485 1486 sc->bge_jumbo = i - 1; 1487 sc->bge_flags |= BGE_JUMBO_RXRING_VALID; 1488 1489 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1490 rcb->bge_maxlen_flags = 0; 1491 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1492 1493 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1494 1495 return 0; 1496 } 1497 1498 static void 1499 bge_free_rx_ring_jumbo(struct bge_softc *sc) 1500 { 1501 int i; 1502 1503 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID)) 1504 return; 1505 1506 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1507 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 1508 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 1509 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 1510 } 1511 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0, 1512 sizeof(struct bge_rx_bd)); 1513 } 1514 1515 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID; 1516 } 1517 1518 static void 1519 bge_free_tx_ring(struct bge_softc *sc) 1520 { 1521 int i, freed; 1522 struct txdmamap_pool_entry *dma; 1523 1524 if (!(sc->bge_flags & BGE_TXRING_VALID)) 1525 return; 1526 1527 freed = 0; 1528 1529 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1530 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 1531 freed++; 1532 m_freem(sc->bge_cdata.bge_tx_chain[i]); 1533 sc->bge_cdata.bge_tx_chain[i] = NULL; 1534 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i], 1535 link); 1536 sc->txdma[i] = 0; 1537 } 1538 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0, 1539 sizeof(struct bge_tx_bd)); 1540 } 1541 1542 while ((dma = SLIST_FIRST(&sc->txdma_list))) { 1543 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 1544 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap); 1545 free(dma, M_DEVBUF); 1546 } 1547 1548 sc->bge_flags &= ~BGE_TXRING_VALID; 1549 } 1550 1551 static int 1552 bge_init_tx_ring(struct bge_softc *sc) 1553 { 1554 int i; 1555 bus_dmamap_t dmamap; 1556 struct txdmamap_pool_entry *dma; 1557 1558 if (sc->bge_flags & BGE_TXRING_VALID) 1559 return 0; 1560 1561 sc->bge_txcnt = 0; 1562 sc->bge_tx_saved_considx = 0; 1563 1564 /* Initialize transmit producer index for host-memory send ring. */ 1565 sc->bge_tx_prodidx = 0; 1566 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1567 /* 5700 b2 errata */ 1568 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 1569 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1570 1571 /* NIC-memory send ring not used; initialize to zero. */ 1572 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1573 /* 5700 b2 errata */ 1574 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 1575 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1576 1577 SLIST_INIT(&sc->txdma_list); 1578 for (i = 0; i < BGE_RSLOTS; i++) { 1579 if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX, 1580 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT, 1581 &dmamap)) 1582 return ENOBUFS; 1583 if (dmamap == NULL) 1584 panic("dmamap NULL in bge_init_tx_ring"); 1585 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT); 1586 if (dma == NULL) { 1587 aprint_error_dev(sc->bge_dev, 1588 "can't alloc txdmamap_pool_entry\n"); 1589 bus_dmamap_destroy(sc->bge_dmatag, dmamap); 1590 return ENOMEM; 1591 } 1592 dma->dmamap = dmamap; 1593 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 1594 } 1595 1596 sc->bge_flags |= BGE_TXRING_VALID; 1597 1598 return 0; 1599 } 1600 1601 static void 1602 bge_setmulti(struct bge_softc *sc) 1603 { 1604 struct ethercom *ac = &sc->ethercom; 1605 struct ifnet *ifp = &ac->ec_if; 1606 struct ether_multi *enm; 1607 struct ether_multistep step; 1608 uint32_t hashes[4] = { 0, 0, 0, 0 }; 1609 uint32_t h; 1610 int i; 1611 1612 if (ifp->if_flags & IFF_PROMISC) 1613 goto allmulti; 1614 1615 /* Now program new ones. */ 1616 ETHER_FIRST_MULTI(step, ac, enm); 1617 while (enm != NULL) { 1618 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1619 /* 1620 * We must listen to a range of multicast addresses. 1621 * For now, just accept all multicasts, rather than 1622 * trying to set only those filter bits needed to match 1623 * the range. (At this time, the only use of address 1624 * ranges is for IP multicast routing, for which the 1625 * range is big enough to require all bits set.) 1626 */ 1627 goto allmulti; 1628 } 1629 1630 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1631 1632 /* Just want the 7 least-significant bits. */ 1633 h &= 0x7f; 1634 1635 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1636 ETHER_NEXT_MULTI(step, enm); 1637 } 1638 1639 ifp->if_flags &= ~IFF_ALLMULTI; 1640 goto setit; 1641 1642 allmulti: 1643 ifp->if_flags |= IFF_ALLMULTI; 1644 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff; 1645 1646 setit: 1647 for (i = 0; i < 4; i++) 1648 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1649 } 1650 1651 static void 1652 bge_sig_pre_reset(struct bge_softc *sc, int type) 1653 { 1654 /* 1655 * Some chips don't like this so only do this if ASF is enabled 1656 */ 1657 if (sc->bge_asf_mode) 1658 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 1659 1660 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1661 switch (type) { 1662 case BGE_RESET_START: 1663 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 1664 break; 1665 case BGE_RESET_STOP: 1666 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 1667 break; 1668 } 1669 } 1670 } 1671 1672 static void 1673 bge_sig_post_reset(struct bge_softc *sc, int type) 1674 { 1675 1676 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1677 switch (type) { 1678 case BGE_RESET_START: 1679 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001); 1680 /* START DONE */ 1681 break; 1682 case BGE_RESET_STOP: 1683 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002); 1684 break; 1685 } 1686 } 1687 } 1688 1689 static void 1690 bge_sig_legacy(struct bge_softc *sc, int type) 1691 { 1692 1693 if (sc->bge_asf_mode) { 1694 switch (type) { 1695 case BGE_RESET_START: 1696 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 1697 break; 1698 case BGE_RESET_STOP: 1699 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 1700 break; 1701 } 1702 } 1703 } 1704 1705 static void 1706 bge_stop_fw(struct bge_softc *sc) 1707 { 1708 int i; 1709 1710 if (sc->bge_asf_mode) { 1711 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE); 1712 CSR_WRITE_4(sc, BGE_CPU_EVENT, 1713 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14)); 1714 1715 for (i = 0; i < 100; i++) { 1716 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14))) 1717 break; 1718 DELAY(10); 1719 } 1720 } 1721 } 1722 1723 static int 1724 bge_poll_fw(struct bge_softc *sc) 1725 { 1726 uint32_t val; 1727 int i; 1728 1729 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 1730 for (i = 0; i < BGE_TIMEOUT; i++) { 1731 val = CSR_READ_4(sc, BGE_VCPU_STATUS); 1732 if (val & BGE_VCPU_STATUS_INIT_DONE) 1733 break; 1734 DELAY(100); 1735 } 1736 if (i >= BGE_TIMEOUT) { 1737 aprint_error_dev(sc->bge_dev, "reset timed out\n"); 1738 return -1; 1739 } 1740 } else if ((sc->bge_flags & BGE_NO_EEPROM) == 0) { 1741 /* 1742 * Poll the value location we just wrote until 1743 * we see the 1's complement of the magic number. 1744 * This indicates that the firmware initialization 1745 * is complete. 1746 * XXX 1000ms for Flash and 10000ms for SEEPROM. 1747 */ 1748 for (i = 0; i < BGE_TIMEOUT; i++) { 1749 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 1750 if (val == ~BGE_MAGIC_NUMBER) 1751 break; 1752 DELAY(10); 1753 } 1754 1755 if (i >= BGE_TIMEOUT) { 1756 aprint_error_dev(sc->bge_dev, 1757 "firmware handshake timed out, val = %x\n", val); 1758 return -1; 1759 } 1760 } 1761 1762 return 0; 1763 } 1764 1765 /* 1766 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1767 * self-test results. 1768 */ 1769 static int 1770 bge_chipinit(struct bge_softc *sc) 1771 { 1772 int i; 1773 uint32_t dma_rw_ctl; 1774 1775 /* Set endianness before we access any non-PCI registers. */ 1776 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 1777 BGE_INIT); 1778 1779 /* Set power state to D0. */ 1780 bge_setpowerstate(sc, 0); 1781 1782 /* Clear the MAC control register */ 1783 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1784 1785 /* 1786 * Clear the MAC statistics block in the NIC's 1787 * internal memory. 1788 */ 1789 for (i = BGE_STATS_BLOCK; 1790 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 1791 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0); 1792 1793 for (i = BGE_STATUS_BLOCK; 1794 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 1795 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0); 1796 1797 /* Set up the PCI DMA control register. */ 1798 dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD; 1799 if (sc->bge_flags & BGE_PCIE) { 1800 /* Read watermark not used, 128 bytes for write. */ 1801 DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n", 1802 device_xname(sc->bge_dev))); 1803 dma_rw_ctl |= (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1804 } else if (sc->bge_flags & BGE_PCIX) { 1805 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", 1806 device_xname(sc->bge_dev))); 1807 /* PCI-X bus */ 1808 if (BGE_IS_5714_FAMILY(sc)) { 1809 /* 256 bytes for read and write. */ 1810 dma_rw_ctl |= (0x02 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1811 (0x02 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1812 1813 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780) 1814 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 1815 else 1816 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL; 1817 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 1818 /* 1536 bytes for read, 384 bytes for write. */ 1819 dma_rw_ctl |= 1820 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1821 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1822 } else { 1823 /* 384 bytes for read and write. */ 1824 dma_rw_ctl |= (0x03 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1825 (0x03 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1826 (0x0F); 1827 } 1828 1829 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 1830 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 1831 uint32_t tmp; 1832 1833 /* Set ONEDMA_ATONCE for hardware workaround. */ 1834 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; 1835 if (tmp == 6 || tmp == 7) 1836 dma_rw_ctl |= 1837 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 1838 1839 /* Set PCI-X DMA write workaround. */ 1840 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE; 1841 } 1842 } else { 1843 /* Conventional PCI bus: 256 bytes for read and write. */ 1844 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", 1845 device_xname(sc->bge_dev))); 1846 dma_rw_ctl = (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1847 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1848 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 && 1849 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750) 1850 dma_rw_ctl |= 0x0F; 1851 } 1852 1853 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 1854 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) 1855 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM | 1856 BGE_PCIDMARWCTL_ASRT_ALL_BE; 1857 1858 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 1859 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 1860 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 1861 1862 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, 1863 dma_rw_ctl); 1864 1865 /* 1866 * Set up general mode register. 1867 */ 1868 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS | 1869 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS | 1870 BGE_MODECTL_TX_NO_PHDR_CSUM | BGE_MODECTL_RX_NO_PHDR_CSUM); 1871 1872 /* 1873 * BCM5701 B5 have a bug causing data corruption when using 1874 * 64-bit DMA reads, which can be terminated early and then 1875 * completed later as 32-bit accesses, in combination with 1876 * certain bridges. 1877 */ 1878 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 && 1879 sc->bge_chipid == BGE_CHIPID_BCM5701_B5) 1880 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32); 1881 1882 /* 1883 * Tell the firmware the driver is running 1884 */ 1885 if (sc->bge_asf_mode & ASF_STACKUP) 1886 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 1887 1888 /* 1889 * Disable memory write invalidate. Apparently it is not supported 1890 * properly by these devices. 1891 */ 1892 PCI_CLRBIT(sc->sc_pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, 1893 PCI_COMMAND_INVALIDATE_ENABLE); 1894 1895 #ifdef __brokenalpha__ 1896 /* 1897 * Must insure that we do not cross an 8K (bytes) boundary 1898 * for DMA reads. Our highest limit is 1K bytes. This is a 1899 * restriction on some ALPHA platforms with early revision 1900 * 21174 PCI chipsets, such as the AlphaPC 164lx 1901 */ 1902 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 1903 #endif 1904 1905 /* Set the timer prescaler (always 66MHz) */ 1906 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1907 1908 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 1909 DELAY(40); /* XXX */ 1910 1911 /* Put PHY into ready state */ 1912 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); 1913 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */ 1914 DELAY(40); 1915 } 1916 1917 return 0; 1918 } 1919 1920 static int 1921 bge_blockinit(struct bge_softc *sc) 1922 { 1923 volatile struct bge_rcb *rcb; 1924 bus_size_t rcb_addr; 1925 int i; 1926 struct ifnet *ifp = &sc->ethercom.ec_if; 1927 bge_hostaddr taddr; 1928 uint32_t val; 1929 1930 /* 1931 * Initialize the memory window pointer register so that 1932 * we can access the first 32K of internal NIC RAM. This will 1933 * allow us to set up the TX send ring RCBs and the RX return 1934 * ring RCBs, plus other things which live in NIC memory. 1935 */ 1936 1937 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0); 1938 1939 /* Step 33: Configure mbuf memory pool */ 1940 if (BGE_IS_5700_FAMILY(sc)) { 1941 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1942 BGE_BUFFPOOL_1); 1943 1944 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 1945 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1946 else 1947 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1948 1949 /* Configure DMA resource pool */ 1950 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1951 BGE_DMA_DESCRIPTORS); 1952 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1953 } 1954 1955 /* Step 35: Configure mbuf pool watermarks */ 1956 #ifdef ORIG_WPAUL_VALUES 1957 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24); 1958 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24); 1959 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48); 1960 #else 1961 1962 /* new broadcom docs strongly recommend these: */ 1963 if (!BGE_IS_5705_PLUS(sc)) { 1964 if (ifp->if_mtu > ETHER_MAX_LEN) { 1965 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1966 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1967 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1968 } else { 1969 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 304); 1970 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 152); 1971 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 380); 1972 } 1973 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 1974 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1975 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); 1976 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); 1977 } else { 1978 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1979 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1980 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1981 } 1982 #endif 1983 1984 /* Step 36: Configure DMA resource watermarks */ 1985 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1986 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1987 1988 /* Step 38: Enable buffer manager */ 1989 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1990 BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN); 1991 1992 /* Step 39: Poll for buffer manager start indication */ 1993 for (i = 0; i < BGE_TIMEOUT * 2; i++) { 1994 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1995 break; 1996 DELAY(10); 1997 } 1998 1999 if (i == BGE_TIMEOUT * 2) { 2000 aprint_error_dev(sc->bge_dev, 2001 "buffer manager failed to start\n"); 2002 return ENXIO; 2003 } 2004 2005 /* Step 40: Enable flow-through queues */ 2006 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 2007 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 2008 2009 /* Wait until queue initialization is complete */ 2010 for (i = 0; i < BGE_TIMEOUT * 2; i++) { 2011 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 2012 break; 2013 DELAY(10); 2014 } 2015 2016 if (i == BGE_TIMEOUT * 2) { 2017 aprint_error_dev(sc->bge_dev, 2018 "flow-through queue init failed\n"); 2019 return ENXIO; 2020 } 2021 2022 /* Step 41: Initialize the standard RX ring control block */ 2023 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 2024 BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); 2025 if (BGE_IS_5705_PLUS(sc)) 2026 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 2027 else 2028 rcb->bge_maxlen_flags = 2029 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 2030 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 2031 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 2032 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 2033 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 2034 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 2035 2036 /* 2037 * Step 42: Initialize the jumbo RX ring control block 2038 * We set the 'ring disabled' bit in the flags 2039 * field until we're actually ready to start 2040 * using this ring (i.e. once we set the MTU 2041 * high enough to require it). 2042 */ 2043 if (BGE_IS_JUMBO_CAPABLE(sc)) { 2044 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 2045 BGE_HOSTADDR(rcb->bge_hostaddr, 2046 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); 2047 rcb->bge_maxlen_flags = 2048 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 2049 BGE_RCB_FLAG_RING_DISABLED); 2050 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 2051 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 2052 rcb->bge_hostaddr.bge_addr_hi); 2053 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 2054 rcb->bge_hostaddr.bge_addr_lo); 2055 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 2056 rcb->bge_maxlen_flags); 2057 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 2058 2059 /* Set up dummy disabled mini ring RCB */ 2060 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 2061 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 2062 BGE_RCB_FLAG_RING_DISABLED); 2063 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 2064 rcb->bge_maxlen_flags); 2065 2066 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2067 offsetof(struct bge_ring_data, bge_info), 2068 sizeof (struct bge_gib), 2069 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2070 } 2071 2072 /* 2073 * Set the BD ring replenish thresholds. The recommended 2074 * values are 1/8th the number of descriptors allocated to 2075 * each ring. 2076 */ 2077 i = BGE_STD_RX_RING_CNT / 8; 2078 2079 /* 2080 * Use a value of 8 for the following chips to workaround HW errata. 2081 * Some of these chips have been added based on empirical 2082 * evidence (they don't work unless this is done). 2083 */ 2084 if (BGE_IS_5705_PLUS(sc)) 2085 i = 8; 2086 2087 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, i); 2088 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT / 8); 2089 2090 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2091 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765) { 2092 CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4); 2093 CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4); 2094 } 2095 2096 /* 2097 * Disable all unused send rings by setting the 'ring disabled' 2098 * bit in the flags field of all the TX send ring control blocks. 2099 * These are located in NIC memory. 2100 */ 2101 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 2102 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 2103 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2104 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 2105 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 2106 rcb_addr += sizeof(struct bge_rcb); 2107 } 2108 2109 /* Configure TX RCB 0 (we use only the first ring) */ 2110 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 2111 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 2112 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 2113 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 2114 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 2115 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 2116 if (BGE_IS_5700_FAMILY(sc)) 2117 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2118 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 2119 2120 /* Disable all unused RX return rings */ 2121 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 2122 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 2123 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 2124 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 2125 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2126 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 2127 BGE_RCB_FLAG_RING_DISABLED)); 2128 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 2129 bge_writembx(sc, BGE_MBX_RX_CONS0_LO + 2130 (i * (sizeof(uint64_t))), 0); 2131 rcb_addr += sizeof(struct bge_rcb); 2132 } 2133 2134 /* Initialize RX ring indexes */ 2135 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); 2136 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 2137 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 2138 2139 /* 2140 * Set up RX return ring 0 2141 * Note that the NIC address for RX return rings is 0x00000000. 2142 * The return rings live entirely within the host, so the 2143 * nicaddr field in the RCB isn't used. 2144 */ 2145 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 2146 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 2147 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 2148 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 2149 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 2150 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2151 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 2152 2153 /* Set random backoff seed for TX */ 2154 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 2155 CLLADDR(ifp->if_sadl)[0] + CLLADDR(ifp->if_sadl)[1] + 2156 CLLADDR(ifp->if_sadl)[2] + CLLADDR(ifp->if_sadl)[3] + 2157 CLLADDR(ifp->if_sadl)[4] + CLLADDR(ifp->if_sadl)[5] + 2158 BGE_TX_BACKOFF_SEED_MASK); 2159 2160 /* Set inter-packet gap */ 2161 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 2162 2163 /* 2164 * Specify which ring to use for packets that don't match 2165 * any RX rules. 2166 */ 2167 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 2168 2169 /* 2170 * Configure number of RX lists. One interrupt distribution 2171 * list, sixteen active lists, one bad frames class. 2172 */ 2173 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 2174 2175 /* Inialize RX list placement stats mask. */ 2176 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 2177 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 2178 2179 /* Disable host coalescing until we get it set up */ 2180 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 2181 2182 /* Poll to make sure it's shut down. */ 2183 for (i = 0; i < BGE_TIMEOUT * 2; i++) { 2184 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 2185 break; 2186 DELAY(10); 2187 } 2188 2189 if (i == BGE_TIMEOUT * 2) { 2190 aprint_error_dev(sc->bge_dev, 2191 "host coalescing engine failed to idle\n"); 2192 return ENXIO; 2193 } 2194 2195 /* Set up host coalescing defaults */ 2196 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 2197 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 2198 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 2199 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 2200 if (BGE_IS_5700_FAMILY(sc)) { 2201 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 2202 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 2203 } 2204 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 2205 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 2206 2207 /* Set up address of statistics block */ 2208 if (BGE_IS_5700_FAMILY(sc)) { 2209 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 2210 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 2211 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 2212 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi); 2213 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo); 2214 } 2215 2216 /* Set up address of status block */ 2217 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block)); 2218 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 2219 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi); 2220 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo); 2221 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 2222 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 2223 2224 /* Turn on host coalescing state machine */ 2225 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 2226 2227 /* Turn on RX BD completion state machine and enable attentions */ 2228 CSR_WRITE_4(sc, BGE_RBDC_MODE, 2229 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN); 2230 2231 /* Turn on RX list placement state machine */ 2232 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 2233 2234 /* Turn on RX list selector state machine. */ 2235 if (BGE_IS_5700_FAMILY(sc)) 2236 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 2237 2238 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | 2239 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | 2240 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | 2241 BGE_MACMODE_FRMHDR_DMA_ENB; 2242 2243 if (sc->bge_flags & BGE_PHY_FIBER_TBI) 2244 val |= BGE_PORTMODE_TBI; 2245 else if (sc->bge_flags & BGE_PHY_FIBER_MII) 2246 val |= BGE_PORTMODE_GMII; 2247 else 2248 val |= BGE_PORTMODE_MII; 2249 2250 /* Turn on DMA, clear stats */ 2251 CSR_WRITE_4(sc, BGE_MAC_MODE, val); 2252 2253 /* Set misc. local control, enable interrupts on attentions */ 2254 sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM; 2255 2256 #ifdef notdef 2257 /* Assert GPIO pins for PHY reset */ 2258 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 2259 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 2260 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 2261 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 2262 #endif 2263 2264 #if defined(not_quite_yet) 2265 /* Linux driver enables enable gpio pin #1 on 5700s */ 2266 if (sc->bge_chipid == BGE_CHIPID_BCM5700) { 2267 sc->bge_local_ctrl_reg |= 2268 (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1); 2269 } 2270 #endif 2271 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 2272 2273 /* Turn on DMA completion state machine */ 2274 if (BGE_IS_5700_FAMILY(sc)) 2275 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 2276 2277 /* Turn on write DMA state machine */ 2278 { 2279 uint32_t bge_wdma_mode = 2280 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS; 2281 2282 /* Enable host coalescing bug fix; see Linux tg3.c */ 2283 if (BGE_IS_5755_PLUS(sc)) 2284 bge_wdma_mode |= BGE_WDMAMODE_STATUS_TAG_FIX; 2285 2286 CSR_WRITE_4(sc, BGE_WDMA_MODE, bge_wdma_mode); 2287 } 2288 2289 /* Turn on read DMA state machine */ 2290 { 2291 uint32_t dma_read_modebits; 2292 2293 dma_read_modebits = 2294 BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 2295 2296 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2297 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 2298 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) 2299 dma_read_modebits |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | 2300 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | 2301 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; 2302 2303 if (sc->bge_flags & BGE_PCIE) 2304 dma_read_modebits |= BGE_RDMA_MODE_FIFO_LONG_BURST; 2305 if (sc->bge_flags & BGE_TSO) 2306 dma_read_modebits |= BGE_RDMAMODE_TSO4_ENABLE; 2307 CSR_WRITE_4(sc, BGE_RDMA_MODE, dma_read_modebits); 2308 delay(40); 2309 } 2310 2311 /* Turn on RX data completion state machine */ 2312 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 2313 2314 /* Turn on RX BD initiator state machine */ 2315 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 2316 2317 /* Turn on RX data and RX BD initiator state machine */ 2318 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 2319 2320 /* Turn on Mbuf cluster free state machine */ 2321 if (BGE_IS_5700_FAMILY(sc)) 2322 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 2323 2324 /* Turn on send BD completion state machine */ 2325 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 2326 2327 /* Turn on send data completion state machine */ 2328 val = BGE_SDCMODE_ENABLE; 2329 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 2330 val |= BGE_SDCMODE_CDELAY; 2331 CSR_WRITE_4(sc, BGE_SDC_MODE, val); 2332 2333 /* Turn on send data initiator state machine */ 2334 if (sc->bge_flags & BGE_TSO) { 2335 /* XXX: magic value from Linux driver */ 2336 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08); 2337 } else 2338 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 2339 2340 /* Turn on send BD initiator state machine */ 2341 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 2342 2343 /* Turn on send BD selector state machine */ 2344 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 2345 2346 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 2347 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 2348 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER); 2349 2350 /* ack/clear link change events */ 2351 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 2352 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 2353 BGE_MACSTAT_LINK_CHANGED); 2354 CSR_WRITE_4(sc, BGE_MI_STS, 0); 2355 2356 /* Enable PHY auto polling (for MII/GMII only) */ 2357 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 2358 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 2359 } else { 2360 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 2361 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL | (10 << 16)); 2362 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) 2363 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 2364 BGE_EVTENB_MI_INTERRUPT); 2365 } 2366 2367 /* 2368 * Clear any pending link state attention. 2369 * Otherwise some link state change events may be lost until attention 2370 * is cleared by bge_intr() -> bge_link_upd() sequence. 2371 * It's not necessary on newer BCM chips - perhaps enabling link 2372 * state change attentions implies clearing pending attention. 2373 */ 2374 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 2375 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 2376 BGE_MACSTAT_LINK_CHANGED); 2377 2378 /* Enable link state change attentions. */ 2379 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 2380 2381 return 0; 2382 } 2383 2384 static const struct bge_revision * 2385 bge_lookup_rev(uint32_t chipid) 2386 { 2387 const struct bge_revision *br; 2388 2389 for (br = bge_revisions; br->br_name != NULL; br++) { 2390 if (br->br_chipid == chipid) 2391 return br; 2392 } 2393 2394 for (br = bge_majorrevs; br->br_name != NULL; br++) { 2395 if (br->br_chipid == BGE_ASICREV(chipid)) 2396 return br; 2397 } 2398 2399 return NULL; 2400 } 2401 2402 static const struct bge_product * 2403 bge_lookup(const struct pci_attach_args *pa) 2404 { 2405 const struct bge_product *bp; 2406 2407 for (bp = bge_products; bp->bp_name != NULL; bp++) { 2408 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor && 2409 PCI_PRODUCT(pa->pa_id) == bp->bp_product) 2410 return bp; 2411 } 2412 2413 return NULL; 2414 } 2415 2416 static int 2417 bge_setpowerstate(struct bge_softc *sc, int powerlevel) 2418 { 2419 #ifdef NOTYET 2420 uint32_t pm_ctl = 0; 2421 2422 /* XXX FIXME: make sure indirect accesses enabled? */ 2423 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4); 2424 pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS; 2425 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4); 2426 2427 /* clear the PME_assert bit and power state bits, enable PME */ 2428 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2); 2429 pm_ctl &= ~PCIM_PSTAT_DMASK; 2430 pm_ctl |= (1 << 8); 2431 2432 if (powerlevel == 0) { 2433 pm_ctl |= PCIM_PSTAT_D0; 2434 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2435 pm_ctl, 2); 2436 DELAY(10000); 2437 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 2438 DELAY(10000); 2439 2440 #ifdef NOTYET 2441 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */ 2442 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02); 2443 #endif 2444 DELAY(40); DELAY(40); DELAY(40); 2445 DELAY(10000); /* above not quite adequate on 5700 */ 2446 return 0; 2447 } 2448 2449 2450 /* 2451 * Entering ACPI power states D1-D3 is achieved by wiggling 2452 * GMII gpio pins. Example code assumes all hardware vendors 2453 * followed Broadom's sample pcb layout. Until we verify that 2454 * for all supported OEM cards, states D1-D3 are unsupported. 2455 */ 2456 aprint_error_dev(sc->bge_dev, 2457 "power state %d unimplemented; check GPIO pins\n", 2458 powerlevel); 2459 #endif 2460 return EOPNOTSUPP; 2461 } 2462 2463 2464 /* 2465 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 2466 * against our list and return its name if we find a match. Note 2467 * that since the Broadcom controller contains VPD support, we 2468 * can get the device name string from the controller itself instead 2469 * of the compiled-in string. This is a little slow, but it guarantees 2470 * we'll always announce the right product name. 2471 */ 2472 static int 2473 bge_probe(device_t parent, cfdata_t match, void *aux) 2474 { 2475 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 2476 2477 if (bge_lookup(pa) != NULL) 2478 return 1; 2479 2480 return 0; 2481 } 2482 2483 static void 2484 bge_attach(device_t parent, device_t self, void *aux) 2485 { 2486 struct bge_softc *sc = device_private(self); 2487 struct pci_attach_args *pa = aux; 2488 prop_dictionary_t dict; 2489 const struct bge_product *bp; 2490 const struct bge_revision *br; 2491 pci_chipset_tag_t pc; 2492 pci_intr_handle_t ih; 2493 const char *intrstr = NULL; 2494 bus_dma_segment_t seg; 2495 int rseg; 2496 uint32_t hwcfg = 0; 2497 uint32_t command; 2498 struct ifnet *ifp; 2499 uint32_t misccfg; 2500 void * kva; 2501 u_char eaddr[ETHER_ADDR_LEN]; 2502 pcireg_t memtype, subid; 2503 bus_addr_t memaddr; 2504 bus_size_t memsize; 2505 uint32_t pm_ctl; 2506 bool no_seeprom; 2507 2508 bp = bge_lookup(pa); 2509 KASSERT(bp != NULL); 2510 2511 sc->sc_pc = pa->pa_pc; 2512 sc->sc_pcitag = pa->pa_tag; 2513 sc->bge_dev = self; 2514 2515 pc = sc->sc_pc; 2516 subid = pci_conf_read(pc, sc->sc_pcitag, PCI_SUBSYS_ID_REG); 2517 2518 aprint_naive(": Ethernet controller\n"); 2519 aprint_normal(": %s\n", bp->bp_name); 2520 2521 /* 2522 * Map control/status registers. 2523 */ 2524 DPRINTFN(5, ("Map control/status regs\n")); 2525 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 2526 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 2527 pci_conf_write(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, command); 2528 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 2529 2530 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 2531 aprint_error_dev(sc->bge_dev, 2532 "failed to enable memory mapping!\n"); 2533 return; 2534 } 2535 2536 DPRINTFN(5, ("pci_mem_find\n")); 2537 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_pcitag, BGE_PCI_BAR0); 2538 switch (memtype) { 2539 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 2540 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 2541 if (pci_mapreg_map(pa, BGE_PCI_BAR0, 2542 memtype, 0, &sc->bge_btag, &sc->bge_bhandle, 2543 &memaddr, &memsize) == 0) 2544 break; 2545 default: 2546 aprint_error_dev(sc->bge_dev, "can't find mem space\n"); 2547 return; 2548 } 2549 2550 DPRINTFN(5, ("pci_intr_map\n")); 2551 if (pci_intr_map(pa, &ih)) { 2552 aprint_error_dev(sc->bge_dev, "couldn't map interrupt\n"); 2553 return; 2554 } 2555 2556 DPRINTFN(5, ("pci_intr_string\n")); 2557 intrstr = pci_intr_string(pc, ih); 2558 2559 DPRINTFN(5, ("pci_intr_establish\n")); 2560 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc); 2561 2562 if (sc->bge_intrhand == NULL) { 2563 aprint_error_dev(sc->bge_dev, 2564 "couldn't establish interrupt%s%s\n", 2565 intrstr ? " at " : "", intrstr ? intrstr : ""); 2566 return; 2567 } 2568 aprint_normal_dev(sc->bge_dev, "interrupting at %s\n", intrstr); 2569 2570 /* 2571 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) 2572 * can clobber the chip's PCI config-space power control registers, 2573 * leaving the card in D3 powersave state. 2574 * We do not have memory-mapped registers in this state, 2575 * so force device into D0 state before starting initialization. 2576 */ 2577 pm_ctl = pci_conf_read(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD); 2578 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3); 2579 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */ 2580 pci_conf_write(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD, pm_ctl); 2581 DELAY(1000); /* 27 usec is allegedly sufficent */ 2582 2583 /* 2584 * Save ASIC rev. 2585 */ 2586 sc->bge_chipid = 2587 pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) 2588 >> BGE_PCIMISCCTL_ASICREV_SHIFT; 2589 2590 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) { 2591 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5717 || 2592 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5718 || 2593 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5724) 2594 sc->bge_chipid = pci_conf_read(pc, pa->pa_tag, 2595 BGE_PCI_GEN2_PRODID_ASICREV); 2596 else if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57761 || 2597 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57765 || 2598 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57781 || 2599 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57785 || 2600 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57791 || 2601 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57795) 2602 sc->bge_chipid = pci_conf_read(pc, pa->pa_tag, 2603 BGE_PCI_GEN15_PRODID_ASICREV); 2604 else 2605 sc->bge_chipid = pci_conf_read(pc, pa->pa_tag, 2606 BGE_PCI_PRODID_ASICREV); 2607 } 2608 2609 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PCIEXPRESS, 2610 &sc->bge_pciecap, NULL) != 0) { 2611 /* PCIe */ 2612 sc->bge_flags |= BGE_PCIE; 2613 bge_set_max_readrq(sc); 2614 } else if ((pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE) & 2615 BGE_PCISTATE_PCI_BUSMODE) == 0) { 2616 /* PCI-X */ 2617 sc->bge_flags |= BGE_PCIX; 2618 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIX, 2619 &sc->bge_pcixcap, NULL) == 0) 2620 aprint_error_dev(sc->bge_dev, 2621 "unable to find PCIX capability\n"); 2622 } 2623 2624 /* chipid */ 2625 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 2626 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 || 2627 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 2628 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 2629 sc->bge_flags |= BGE_5700_FAMILY; 2630 2631 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 || 2632 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780 || 2633 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714) 2634 sc->bge_flags |= BGE_5714_FAMILY; 2635 2636 /* Intentionally exclude BGE_ASICREV_BCM5906 */ 2637 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2638 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 2639 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 2640 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2641 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 2642 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787 || 2643 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 || 2644 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) 2645 sc->bge_flags |= BGE_5755_PLUS; 2646 2647 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 || 2648 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || 2649 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 || 2650 BGE_IS_5755_PLUS(sc) || 2651 BGE_IS_5714_FAMILY(sc)) 2652 sc->bge_flags |= BGE_5750_PLUS; 2653 2654 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 || 2655 BGE_IS_5750_OR_BEYOND(sc)) 2656 sc->bge_flags |= BGE_5705_PLUS; 2657 2658 /* 2659 * When using the BCM5701 in PCI-X mode, data corruption has 2660 * been observed in the first few bytes of some received packets. 2661 * Aligning the packet buffer in memory eliminates the corruption. 2662 * Unfortunately, this misaligns the packet payloads. On platforms 2663 * which do not support unaligned accesses, we will realign the 2664 * payloads by copying the received packets. 2665 */ 2666 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 && 2667 sc->bge_flags & BGE_PCIX) 2668 sc->bge_flags |= BGE_RX_ALIGNBUG; 2669 2670 if (BGE_IS_5700_FAMILY(sc)) 2671 sc->bge_flags |= BGE_JUMBO_CAPABLE; 2672 2673 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 2674 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) && 2675 PCI_VENDOR(subid) == PCI_VENDOR_DELL) 2676 sc->bge_flags |= BGE_NO_3LED; 2677 2678 misccfg = CSR_READ_4(sc, BGE_MISC_CFG); 2679 misccfg &= BGE_MISCCFG_BOARD_ID_MASK; 2680 2681 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 2682 (misccfg == BGE_MISCCFG_BOARD_ID_5788 || 2683 misccfg == BGE_MISCCFG_BOARD_ID_5788M)) 2684 sc->bge_flags |= BGE_IS_5788; 2685 2686 /* 2687 * Some controllers seem to require a special firmware to use 2688 * TSO. But the firmware is not available to FreeBSD and Linux 2689 * claims that the TSO performed by the firmware is slower than 2690 * hardware based TSO. Moreover the firmware based TSO has one 2691 * known bug which can't handle TSO if ethernet header + IP/TCP 2692 * header is greater than 80 bytes. The workaround for the TSO 2693 * bug exist but it seems it's too expensive than not using 2694 * TSO at all. Some hardwares also have the TSO bug so limit 2695 * the TSO to the controllers that are not affected TSO issues 2696 * (e.g. 5755 or higher). 2697 */ 2698 if (BGE_IS_5755_PLUS(sc)) { 2699 /* 2700 * BCM5754 and BCM5787 shares the same ASIC id so 2701 * explicit device id check is required. 2702 */ 2703 if ((PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754) && 2704 (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754M)) 2705 sc->bge_flags |= BGE_TSO; 2706 } 2707 2708 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 && 2709 (misccfg == 0x4000 || misccfg == 0x8000)) || 2710 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 2711 PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && 2712 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 || 2713 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 || 2714 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) || 2715 (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && 2716 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F || 2717 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F || 2718 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) || 2719 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 || 2720 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 2721 sc->bge_flags |= BGE_10_100_ONLY; 2722 2723 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 2724 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 2725 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && 2726 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) || 2727 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 2728 sc->bge_flags |= BGE_NO_ETH_WIRE_SPEED; 2729 2730 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || 2731 sc->bge_chipid == BGE_CHIPID_BCM5701_B0) 2732 sc->bge_flags |= BGE_PHY_CRC_BUG; 2733 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX || 2734 BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX) 2735 sc->bge_flags |= BGE_PHY_ADC_BUG; 2736 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) 2737 sc->bge_flags |= BGE_PHY_5704_A0_BUG; 2738 2739 if (BGE_IS_5705_PLUS(sc) && 2740 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 && 2741 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 && 2742 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 && 2743 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57765 && 2744 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780) { 2745 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 2746 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 2747 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2748 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) { 2749 if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 && 2750 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756) 2751 sc->bge_flags |= BGE_PHY_JITTER_BUG; 2752 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M) 2753 sc->bge_flags |= BGE_PHY_ADJUST_TRIM; 2754 } else if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906) 2755 sc->bge_flags |= BGE_PHY_BER_BUG; 2756 } 2757 2758 /* 2759 * SEEPROM check. 2760 * First check if firmware knows we do not have SEEPROM. 2761 */ 2762 if (prop_dictionary_get_bool(device_properties(self), 2763 "without-seeprom", &no_seeprom) && no_seeprom) 2764 sc->bge_flags |= BGE_NO_EEPROM; 2765 2766 /* Now check the 'ROM failed' bit on the RX CPU */ 2767 else if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) 2768 sc->bge_flags |= BGE_NO_EEPROM; 2769 2770 /* Try to reset the chip. */ 2771 DPRINTFN(5, ("bge_reset\n")); 2772 bge_reset(sc); 2773 2774 sc->bge_asf_mode = 0; 2775 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) 2776 == BGE_MAGIC_NUMBER)) { 2777 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG) 2778 & BGE_HWCFG_ASF) { 2779 sc->bge_asf_mode |= ASF_ENABLE; 2780 sc->bge_asf_mode |= ASF_STACKUP; 2781 if (BGE_IS_5750_OR_BEYOND(sc)) { 2782 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; 2783 } 2784 } 2785 } 2786 2787 /* Try to reset the chip again the nice way. */ 2788 bge_stop_fw(sc); 2789 bge_sig_pre_reset(sc, BGE_RESET_STOP); 2790 if (bge_reset(sc)) 2791 aprint_error_dev(sc->bge_dev, "chip reset failed\n"); 2792 2793 bge_sig_legacy(sc, BGE_RESET_STOP); 2794 bge_sig_post_reset(sc, BGE_RESET_STOP); 2795 2796 if (bge_chipinit(sc)) { 2797 aprint_error_dev(sc->bge_dev, "chip initialization failed\n"); 2798 bge_release_resources(sc); 2799 return; 2800 } 2801 2802 /* 2803 * Get station address from the EEPROM 2804 */ 2805 if (bge_get_eaddr(sc, eaddr)) { 2806 aprint_error_dev(sc->bge_dev, 2807 "failed to read station address\n"); 2808 bge_release_resources(sc); 2809 return; 2810 } 2811 2812 br = bge_lookup_rev(sc->bge_chipid); 2813 2814 if (br == NULL) { 2815 aprint_normal_dev(sc->bge_dev, "unknown ASIC (0x%x)", 2816 sc->bge_chipid); 2817 } else { 2818 aprint_normal_dev(sc->bge_dev, "ASIC %s (0x%x)", 2819 br->br_name, sc->bge_chipid); 2820 } 2821 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr)); 2822 2823 /* Allocate the general information block and ring buffers. */ 2824 if (pci_dma64_available(pa)) 2825 sc->bge_dmatag = pa->pa_dmat64; 2826 else 2827 sc->bge_dmatag = pa->pa_dmat; 2828 DPRINTFN(5, ("bus_dmamem_alloc\n")); 2829 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 2830 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 2831 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n"); 2832 return; 2833 } 2834 DPRINTFN(5, ("bus_dmamem_map\n")); 2835 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, 2836 sizeof(struct bge_ring_data), &kva, 2837 BUS_DMA_NOWAIT)) { 2838 aprint_error_dev(sc->bge_dev, 2839 "can't map DMA buffers (%zu bytes)\n", 2840 sizeof(struct bge_ring_data)); 2841 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2842 return; 2843 } 2844 DPRINTFN(5, ("bus_dmamem_create\n")); 2845 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 2846 sizeof(struct bge_ring_data), 0, 2847 BUS_DMA_NOWAIT, &sc->bge_ring_map)) { 2848 aprint_error_dev(sc->bge_dev, "can't create DMA map\n"); 2849 bus_dmamem_unmap(sc->bge_dmatag, kva, 2850 sizeof(struct bge_ring_data)); 2851 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2852 return; 2853 } 2854 DPRINTFN(5, ("bus_dmamem_load\n")); 2855 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 2856 sizeof(struct bge_ring_data), NULL, 2857 BUS_DMA_NOWAIT)) { 2858 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 2859 bus_dmamem_unmap(sc->bge_dmatag, kva, 2860 sizeof(struct bge_ring_data)); 2861 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2862 return; 2863 } 2864 2865 DPRINTFN(5, ("bzero\n")); 2866 sc->bge_rdata = (struct bge_ring_data *)kva; 2867 2868 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data)); 2869 2870 /* Try to allocate memory for jumbo buffers. */ 2871 if (BGE_IS_JUMBO_CAPABLE(sc)) { 2872 if (bge_alloc_jumbo_mem(sc)) { 2873 aprint_error_dev(sc->bge_dev, 2874 "jumbo buffer allocation failed\n"); 2875 } else 2876 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 2877 } 2878 2879 /* Set default tuneable values. */ 2880 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2881 sc->bge_rx_coal_ticks = 150; 2882 sc->bge_rx_max_coal_bds = 64; 2883 #ifdef ORIG_WPAUL_VALUES 2884 sc->bge_tx_coal_ticks = 150; 2885 sc->bge_tx_max_coal_bds = 128; 2886 #else 2887 sc->bge_tx_coal_ticks = 300; 2888 sc->bge_tx_max_coal_bds = 400; 2889 #endif 2890 if (BGE_IS_5705_PLUS(sc)) { 2891 sc->bge_tx_coal_ticks = (12 * 5); 2892 sc->bge_tx_max_coal_bds = (12 * 5); 2893 aprint_verbose_dev(sc->bge_dev, 2894 "setting short Tx thresholds\n"); 2895 } 2896 2897 if (BGE_IS_5705_PLUS(sc)) 2898 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 2899 else 2900 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 2901 2902 /* Set up ifnet structure */ 2903 ifp = &sc->ethercom.ec_if; 2904 ifp->if_softc = sc; 2905 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2906 ifp->if_ioctl = bge_ioctl; 2907 ifp->if_stop = bge_stop; 2908 ifp->if_start = bge_start; 2909 ifp->if_init = bge_init; 2910 ifp->if_watchdog = bge_watchdog; 2911 IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN)); 2912 IFQ_SET_READY(&ifp->if_snd); 2913 DPRINTFN(5, ("strcpy if_xname\n")); 2914 strcpy(ifp->if_xname, device_xname(sc->bge_dev)); 2915 2916 if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) 2917 sc->ethercom.ec_if.if_capabilities |= 2918 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx; 2919 #if 1 /* XXX TCP/UDP checksum offload breaks with pf(4) */ 2920 sc->ethercom.ec_if.if_capabilities |= 2921 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 2922 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 2923 #endif 2924 sc->ethercom.ec_capabilities |= 2925 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 2926 2927 if (sc->bge_flags & BGE_TSO) 2928 sc->ethercom.ec_if.if_capabilities |= IFCAP_TSOv4; 2929 2930 /* 2931 * Do MII setup. 2932 */ 2933 DPRINTFN(5, ("mii setup\n")); 2934 sc->bge_mii.mii_ifp = ifp; 2935 sc->bge_mii.mii_readreg = bge_miibus_readreg; 2936 sc->bge_mii.mii_writereg = bge_miibus_writereg; 2937 sc->bge_mii.mii_statchg = bge_miibus_statchg; 2938 2939 /* 2940 * Figure out what sort of media we have by checking the 2941 * hardware config word in the first 32k of NIC internal memory, 2942 * or fall back to the config word in the EEPROM. Note: on some BCM5700 2943 * cards, this value appears to be unset. If that's the 2944 * case, we have to rely on identifying the NIC by its PCI 2945 * subsystem ID, as we do below for the SysKonnect SK-9D41. 2946 */ 2947 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) { 2948 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2949 } else if (!(sc->bge_flags & BGE_NO_EEPROM)) { 2950 bge_read_eeprom(sc, (void *)&hwcfg, 2951 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 2952 hwcfg = be32toh(hwcfg); 2953 } 2954 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2955 if (PCI_PRODUCT(pa->pa_id) == SK_SUBSYSID_9D41 || 2956 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) { 2957 if (BGE_IS_5714_FAMILY(sc)) 2958 sc->bge_flags |= BGE_PHY_FIBER_MII; 2959 else 2960 sc->bge_flags |= BGE_PHY_FIBER_TBI; 2961 } 2962 2963 /* set phyflags before mii_attach() */ 2964 dict = device_properties(self); 2965 prop_dictionary_set_uint32(dict, "phyflags", sc->bge_flags); 2966 2967 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 2968 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 2969 bge_ifmedia_sts); 2970 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER |IFM_1000_SX, 0, NULL); 2971 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX|IFM_FDX, 2972 0, NULL); 2973 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); 2974 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO); 2975 /* Pretend the user requested this setting */ 2976 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 2977 } else { 2978 /* 2979 * Do transceiver setup and tell the firmware the 2980 * driver is down so we can try to get access the 2981 * probe if ASF is running. Retry a couple of times 2982 * if we get a conflict with the ASF firmware accessing 2983 * the PHY. 2984 */ 2985 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2986 bge_asf_driver_up(sc); 2987 2988 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd, 2989 bge_ifmedia_sts); 2990 mii_attach(sc->bge_dev, &sc->bge_mii, 0xffffffff, 2991 MII_PHY_ANY, MII_OFFSET_ANY, 2992 MIIF_FORCEANEG|MIIF_DOPAUSE); 2993 2994 if (LIST_EMPTY(&sc->bge_mii.mii_phys)) { 2995 aprint_error_dev(sc->bge_dev, "no PHY found!\n"); 2996 ifmedia_add(&sc->bge_mii.mii_media, 2997 IFM_ETHER|IFM_MANUAL, 0, NULL); 2998 ifmedia_set(&sc->bge_mii.mii_media, 2999 IFM_ETHER|IFM_MANUAL); 3000 } else 3001 ifmedia_set(&sc->bge_mii.mii_media, 3002 IFM_ETHER|IFM_AUTO); 3003 3004 /* 3005 * Now tell the firmware we are going up after probing the PHY 3006 */ 3007 if (sc->bge_asf_mode & ASF_STACKUP) 3008 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3009 } 3010 3011 /* 3012 * Call MI attach routine. 3013 */ 3014 DPRINTFN(5, ("if_attach\n")); 3015 if_attach(ifp); 3016 DPRINTFN(5, ("ether_ifattach\n")); 3017 ether_ifattach(ifp, eaddr); 3018 #if NRND > 0 3019 rnd_attach_source(&sc->rnd_source, device_xname(sc->bge_dev), 3020 RND_TYPE_NET, 0); 3021 #endif 3022 #ifdef BGE_EVENT_COUNTERS 3023 /* 3024 * Attach event counters. 3025 */ 3026 evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR, 3027 NULL, device_xname(sc->bge_dev), "intr"); 3028 evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC, 3029 NULL, device_xname(sc->bge_dev), "tx_xoff"); 3030 evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC, 3031 NULL, device_xname(sc->bge_dev), "tx_xon"); 3032 evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC, 3033 NULL, device_xname(sc->bge_dev), "rx_xoff"); 3034 evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC, 3035 NULL, device_xname(sc->bge_dev), "rx_xon"); 3036 evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC, 3037 NULL, device_xname(sc->bge_dev), "rx_macctl"); 3038 evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC, 3039 NULL, device_xname(sc->bge_dev), "xoffentered"); 3040 #endif /* BGE_EVENT_COUNTERS */ 3041 DPRINTFN(5, ("callout_init\n")); 3042 callout_init(&sc->bge_timeout, 0); 3043 3044 if (pmf_device_register(self, NULL, NULL)) 3045 pmf_class_network_register(self, ifp); 3046 else 3047 aprint_error_dev(self, "couldn't establish power handler\n"); 3048 3049 #ifdef BGE_DEBUG 3050 bge_debug_info(sc); 3051 #endif 3052 } 3053 3054 static void 3055 bge_release_resources(struct bge_softc *sc) 3056 { 3057 if (sc->bge_vpd_prodname != NULL) 3058 free(sc->bge_vpd_prodname, M_DEVBUF); 3059 3060 if (sc->bge_vpd_readonly != NULL) 3061 free(sc->bge_vpd_readonly, M_DEVBUF); 3062 } 3063 3064 static int 3065 bge_reset(struct bge_softc *sc) 3066 { 3067 uint32_t cachesize, command, pcistate, marbmode; 3068 #if 0 3069 uint32_t new_pcistate; 3070 #endif 3071 pcireg_t devctl, reg; 3072 int i, val; 3073 void (*write_op)(struct bge_softc *, int, int); 3074 3075 if (BGE_IS_5750_OR_BEYOND(sc) && !BGE_IS_5714_FAMILY(sc) 3076 && (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)) { 3077 if (sc->bge_flags & BGE_PCIE) 3078 write_op = bge_writemem_direct; 3079 else 3080 write_op = bge_writemem_ind; 3081 } else 3082 write_op = bge_writereg_ind; 3083 3084 /* Save some important PCI state. */ 3085 cachesize = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ); 3086 command = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD); 3087 pcistate = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE); 3088 3089 /* Step 5a: Enable memory arbiter. */ 3090 marbmode = 0; 3091 if (BGE_IS_5714_FAMILY(sc)) 3092 marbmode = CSR_READ_4(sc, BGE_MARB_MODE); 3093 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode); 3094 3095 /* Step 5b-5d: */ 3096 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 3097 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 3098 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW); 3099 3100 /* XXX ???: Disable fastboot on controllers that support it. */ 3101 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || 3102 BGE_IS_5755_PLUS(sc)) 3103 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0); 3104 3105 /* 3106 * Step 6: Write the magic number to SRAM at offset 0xB50. 3107 * When firmware finishes its initialization it will 3108 * write ~BGE_MAGIC_NUMBER to the same location. 3109 */ 3110 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 3111 3112 /* Step 7: */ 3113 val = BGE_MISCCFG_RESET_CORE_CLOCKS | (65<<1); 3114 /* 3115 * XXX: from FreeBSD/Linux; no documentation 3116 */ 3117 if (sc->bge_flags & BGE_PCIE) { 3118 if (CSR_READ_4(sc, BGE_PCIE_CTL1) == 0x60) 3119 /* PCI Express 1.0 system */ 3120 CSR_WRITE_4(sc, BGE_PCIE_CTL1, 0x20); 3121 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 3122 /* 3123 * Prevent PCI Express link training 3124 * during global reset. 3125 */ 3126 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); 3127 val |= (1<<29); 3128 } 3129 } 3130 3131 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 3132 i = CSR_READ_4(sc, BGE_VCPU_STATUS); 3133 CSR_WRITE_4(sc, BGE_VCPU_STATUS, 3134 i | BGE_VCPU_STATUS_DRV_RESET); 3135 i = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); 3136 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, 3137 i & ~BGE_VCPU_EXT_CTRL_HALT_CPU); 3138 } 3139 3140 /* 3141 * Set GPHY Power Down Override to leave GPHY 3142 * powered up in D0 uninitialized. 3143 */ 3144 if (BGE_IS_5705_PLUS(sc)) 3145 val |= BGE_MISCCFG_KEEP_GPHY_POWER; 3146 3147 /* XXX 5721, 5751 and 5752 */ 3148 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750) 3149 val |= BGE_MISCCFG_GRC_RESET_DISABLE; 3150 3151 /* Issue global reset */ 3152 write_op(sc, BGE_MISC_CFG, val); 3153 3154 /* Step 8: wait for complete */ 3155 if (sc->bge_flags & BGE_PCIE) 3156 delay(100*1000); /* too big */ 3157 else 3158 delay(100); 3159 3160 /* From Linux: dummy read to flush PCI posted writes */ 3161 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD); 3162 3163 /* Step 9-10: Reset some of the PCI state that got zapped by reset */ 3164 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 3165 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 3166 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW 3167 | BGE_PCIMISCCTL_CLOCKCTL_RW); 3168 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, command); 3169 write_op(sc, BGE_MISC_CFG, (65 << 1)); 3170 3171 /* Step 11: disable PCI-X Relaxed Ordering. */ 3172 if (sc->bge_flags & BGE_PCIX) { 3173 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap 3174 + PCI_PCIX_CMD); 3175 pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap 3176 + PCI_PCIX_CMD, reg & ~PCI_PCIX_CMD_RELAXED_ORDER); 3177 } 3178 3179 if (sc->bge_flags & BGE_PCIE) { 3180 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 3181 DELAY(500000); 3182 /* XXX: Magic Numbers */ 3183 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 3184 BGE_PCI_UNKNOWN0); 3185 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 3186 BGE_PCI_UNKNOWN0, 3187 reg | (1 << 15)); 3188 } 3189 devctl = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 3190 sc->bge_pciecap + PCI_PCIE_DCSR); 3191 /* Clear enable no snoop and disable relaxed ordering. */ 3192 devctl &= ~(0x0010 | PCI_PCIE_DCSR_ENA_NO_SNOOP); 3193 /* Set PCIE max payload size to 128. */ 3194 devctl &= ~(0x00e0); 3195 /* Clear device status register. Write 1b to clear */ 3196 devctl |= PCI_PCIE_DCSR_URD | PCI_PCIE_DCSR_FED 3197 | PCI_PCIE_DCSR_NFED | PCI_PCIE_DCSR_CED; 3198 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 3199 sc->bge_pciecap + PCI_PCIE_DCSR, devctl); 3200 } 3201 3202 /* Step 12: Enable memory arbiter. */ 3203 marbmode = 0; 3204 if (BGE_IS_5714_FAMILY(sc)) 3205 marbmode = CSR_READ_4(sc, BGE_MARB_MODE); 3206 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode); 3207 3208 /* Step 17: Poll until the firmware iitializeation is complete */ 3209 bge_poll_fw(sc); 3210 3211 /* XXX 5721, 5751 and 5752 */ 3212 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750) { 3213 /* Step 19: */ 3214 BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, 1 << 29 | 1 << 25); 3215 /* Step 20: */ 3216 BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, BGE_TLP_DATA_FIFO_PROTECT); 3217 } 3218 3219 /* 3220 * Step 18: wirte mac mode 3221 * XXX Write 0x0c for 5703S and 5704S 3222 */ 3223 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 3224 3225 3226 /* Step 21: 5822 B0 errata */ 3227 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_BX) 3228 BGE_SETBIT(sc, 0x66, 1 << 13 | 1 << 12 | 1 << 10); 3229 3230 /* Step 23: restore cache line size */ 3231 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ, cachesize); 3232 3233 #if 0 3234 /* 3235 * XXX Wait for the value of the PCISTATE register to 3236 * return to its original pre-reset state. This is a 3237 * fairly good indicator of reset completion. If we don't 3238 * wait for the reset to fully complete, trying to read 3239 * from the device's non-PCI registers may yield garbage 3240 * results. 3241 */ 3242 for (i = 0; i < BGE_TIMEOUT; i++) { 3243 new_pcistate = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 3244 BGE_PCI_PCISTATE); 3245 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) == 3246 (pcistate & ~BGE_PCISTATE_RESERVED)) 3247 break; 3248 DELAY(10); 3249 } 3250 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) != 3251 (pcistate & ~BGE_PCISTATE_RESERVED)) { 3252 aprint_error_dev(sc->bge_dev, "pcistate failed to revert\n"); 3253 } 3254 #endif 3255 3256 /* Step 28: Fix up byte swapping */ 3257 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS); 3258 3259 /* Tell the ASF firmware we are up */ 3260 if (sc->bge_asf_mode & ASF_STACKUP) 3261 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3262 3263 /* 3264 * The 5704 in TBI mode apparently needs some special 3265 * adjustment to insure the SERDES drive level is set 3266 * to 1.2V. 3267 */ 3268 if (sc->bge_flags & BGE_PHY_FIBER_TBI && 3269 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 3270 uint32_t serdescfg; 3271 3272 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 3273 serdescfg = (serdescfg & ~0xFFF) | 0x880; 3274 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 3275 } 3276 3277 if (sc->bge_flags & BGE_PCIE && 3278 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 && 3279 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 && 3280 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 && 3281 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57765) { 3282 uint32_t v; 3283 3284 /* Enable PCI Express bug fix */ 3285 v = CSR_READ_4(sc, 0x7c00); 3286 CSR_WRITE_4(sc, 0x7c00, v | (1<<25)); 3287 } 3288 DELAY(10000); 3289 3290 return 0; 3291 } 3292 3293 /* 3294 * Frame reception handling. This is called if there's a frame 3295 * on the receive return list. 3296 * 3297 * Note: we have to be able to handle two possibilities here: 3298 * 1) the frame is from the jumbo recieve ring 3299 * 2) the frame is from the standard receive ring 3300 */ 3301 3302 static void 3303 bge_rxeof(struct bge_softc *sc) 3304 { 3305 struct ifnet *ifp; 3306 uint16_t rx_prod, rx_cons; 3307 int stdcnt = 0, jumbocnt = 0; 3308 bus_dmamap_t dmamap; 3309 bus_addr_t offset, toff; 3310 bus_size_t tlen; 3311 int tosync; 3312 3313 rx_cons = sc->bge_rx_saved_considx; 3314 rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx; 3315 3316 /* Nothing to do */ 3317 if (rx_cons == rx_prod) 3318 return; 3319 3320 ifp = &sc->ethercom.ec_if; 3321 3322 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3323 offsetof(struct bge_ring_data, bge_status_block), 3324 sizeof (struct bge_status_block), 3325 BUS_DMASYNC_POSTREAD); 3326 3327 offset = offsetof(struct bge_ring_data, bge_rx_return_ring); 3328 tosync = rx_prod - rx_cons; 3329 3330 #if NRND > 0 3331 if (tosync != 0 && RND_ENABLED(&sc->rnd_source)) 3332 rnd_add_uint32(&sc->rnd_source, tosync); 3333 #endif 3334 3335 toff = offset + (rx_cons * sizeof (struct bge_rx_bd)); 3336 3337 if (tosync < 0) { 3338 tlen = (sc->bge_return_ring_cnt - rx_cons) * 3339 sizeof (struct bge_rx_bd); 3340 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3341 toff, tlen, BUS_DMASYNC_POSTREAD); 3342 tosync = -tosync; 3343 } 3344 3345 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3346 offset, tosync * sizeof (struct bge_rx_bd), 3347 BUS_DMASYNC_POSTREAD); 3348 3349 while (rx_cons != rx_prod) { 3350 struct bge_rx_bd *cur_rx; 3351 uint32_t rxidx; 3352 struct mbuf *m = NULL; 3353 3354 cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons]; 3355 3356 rxidx = cur_rx->bge_idx; 3357 BGE_INC(rx_cons, sc->bge_return_ring_cnt); 3358 3359 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 3360 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 3361 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 3362 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 3363 jumbocnt++; 3364 bus_dmamap_sync(sc->bge_dmatag, 3365 sc->bge_cdata.bge_rx_jumbo_map, 3366 mtod(m, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, 3367 BGE_JLEN, BUS_DMASYNC_POSTREAD); 3368 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3369 ifp->if_ierrors++; 3370 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 3371 continue; 3372 } 3373 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 3374 NULL)== ENOBUFS) { 3375 ifp->if_ierrors++; 3376 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 3377 continue; 3378 } 3379 } else { 3380 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 3381 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 3382 3383 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 3384 stdcnt++; 3385 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; 3386 sc->bge_cdata.bge_rx_std_map[rxidx] = 0; 3387 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, 3388 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 3389 bus_dmamap_unload(sc->bge_dmatag, dmamap); 3390 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3391 ifp->if_ierrors++; 3392 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 3393 continue; 3394 } 3395 if (bge_newbuf_std(sc, sc->bge_std, 3396 NULL, dmamap) == ENOBUFS) { 3397 ifp->if_ierrors++; 3398 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 3399 continue; 3400 } 3401 } 3402 3403 ifp->if_ipackets++; 3404 #ifndef __NO_STRICT_ALIGNMENT 3405 /* 3406 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect, 3407 * the Rx buffer has the layer-2 header unaligned. 3408 * If our CPU requires alignment, re-align by copying. 3409 */ 3410 if (sc->bge_flags & BGE_RX_ALIGNBUG) { 3411 memmove(mtod(m, char *) + ETHER_ALIGN, m->m_data, 3412 cur_rx->bge_len); 3413 m->m_data += ETHER_ALIGN; 3414 } 3415 #endif 3416 3417 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 3418 m->m_pkthdr.rcvif = ifp; 3419 3420 /* 3421 * Handle BPF listeners. Let the BPF user see the packet. 3422 */ 3423 if (ifp->if_bpf) 3424 bpf_ops->bpf_mtap(ifp->if_bpf, m); 3425 3426 m->m_pkthdr.csum_flags = M_CSUM_IPv4; 3427 3428 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0) 3429 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 3430 /* 3431 * Rx transport checksum-offload may also 3432 * have bugs with packets which, when transmitted, 3433 * were `runts' requiring padding. 3434 */ 3435 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 3436 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/ 3437 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) { 3438 m->m_pkthdr.csum_data = 3439 cur_rx->bge_tcp_udp_csum; 3440 m->m_pkthdr.csum_flags |= 3441 (M_CSUM_TCPv4|M_CSUM_UDPv4| 3442 M_CSUM_DATA|M_CSUM_NO_PSEUDOHDR); 3443 } 3444 3445 /* 3446 * If we received a packet with a vlan tag, pass it 3447 * to vlan_input() instead of ether_input(). 3448 */ 3449 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 3450 VLAN_INPUT_TAG(ifp, m, cur_rx->bge_vlan_tag, continue); 3451 } 3452 3453 (*ifp->if_input)(ifp, m); 3454 } 3455 3456 sc->bge_rx_saved_considx = rx_cons; 3457 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 3458 if (stdcnt) 3459 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 3460 if (jumbocnt) 3461 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 3462 } 3463 3464 static void 3465 bge_txeof(struct bge_softc *sc) 3466 { 3467 struct bge_tx_bd *cur_tx = NULL; 3468 struct ifnet *ifp; 3469 struct txdmamap_pool_entry *dma; 3470 bus_addr_t offset, toff; 3471 bus_size_t tlen; 3472 int tosync; 3473 struct mbuf *m; 3474 3475 ifp = &sc->ethercom.ec_if; 3476 3477 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3478 offsetof(struct bge_ring_data, bge_status_block), 3479 sizeof (struct bge_status_block), 3480 BUS_DMASYNC_POSTREAD); 3481 3482 offset = offsetof(struct bge_ring_data, bge_tx_ring); 3483 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx - 3484 sc->bge_tx_saved_considx; 3485 3486 #if NRND > 0 3487 if (tosync != 0 && RND_ENABLED(&sc->rnd_source)) 3488 rnd_add_uint32(&sc->rnd_source, tosync); 3489 #endif 3490 3491 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd)); 3492 3493 if (tosync < 0) { 3494 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) * 3495 sizeof (struct bge_tx_bd); 3496 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3497 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3498 tosync = -tosync; 3499 } 3500 3501 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3502 offset, tosync * sizeof (struct bge_tx_bd), 3503 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3504 3505 /* 3506 * Go through our tx ring and free mbufs for those 3507 * frames that have been sent. 3508 */ 3509 while (sc->bge_tx_saved_considx != 3510 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 3511 uint32_t idx = 0; 3512 3513 idx = sc->bge_tx_saved_considx; 3514 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 3515 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 3516 ifp->if_opackets++; 3517 m = sc->bge_cdata.bge_tx_chain[idx]; 3518 if (m != NULL) { 3519 sc->bge_cdata.bge_tx_chain[idx] = NULL; 3520 dma = sc->txdma[idx]; 3521 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0, 3522 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 3523 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap); 3524 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 3525 sc->txdma[idx] = NULL; 3526 3527 m_freem(m); 3528 } 3529 sc->bge_txcnt--; 3530 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 3531 ifp->if_timer = 0; 3532 } 3533 3534 if (cur_tx != NULL) 3535 ifp->if_flags &= ~IFF_OACTIVE; 3536 } 3537 3538 static int 3539 bge_intr(void *xsc) 3540 { 3541 struct bge_softc *sc; 3542 struct ifnet *ifp; 3543 uint32_t statusword; 3544 3545 sc = xsc; 3546 ifp = &sc->ethercom.ec_if; 3547 3548 /* It is possible for the interrupt to arrive before 3549 * the status block is updated prior to the interrupt. 3550 * Reading the PCI State register will confirm whether the 3551 * interrupt is ours and will flush the status block. 3552 */ 3553 3554 /* read status word from status block */ 3555 statusword = sc->bge_rdata->bge_status_block.bge_status; 3556 3557 if ((statusword & BGE_STATFLAG_UPDATED) || 3558 (!(CSR_READ_4(sc, BGE_PCI_PCISTATE) & BGE_PCISTATE_INTR_NOT_ACTIVE))) { 3559 /* Ack interrupt and stop others from occuring. */ 3560 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 3561 3562 BGE_EVCNT_INCR(sc->bge_ev_intr); 3563 3564 /* clear status word */ 3565 sc->bge_rdata->bge_status_block.bge_status = 0; 3566 3567 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 3568 statusword & BGE_STATFLAG_LINKSTATE_CHANGED || 3569 BGE_STS_BIT(sc, BGE_STS_LINK_EVT)) 3570 bge_link_upd(sc); 3571 3572 if (ifp->if_flags & IFF_RUNNING) { 3573 /* Check RX return ring producer/consumer */ 3574 bge_rxeof(sc); 3575 3576 /* Check TX ring producer/consumer */ 3577 bge_txeof(sc); 3578 } 3579 3580 if (sc->bge_pending_rxintr_change) { 3581 uint32_t rx_ticks = sc->bge_rx_coal_ticks; 3582 uint32_t rx_bds = sc->bge_rx_max_coal_bds; 3583 uint32_t junk; 3584 3585 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks); 3586 DELAY(10); 3587 junk = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); 3588 3589 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds); 3590 DELAY(10); 3591 junk = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); 3592 3593 sc->bge_pending_rxintr_change = 0; 3594 } 3595 bge_handle_events(sc); 3596 3597 /* Re-enable interrupts. */ 3598 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 3599 3600 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) 3601 bge_start(ifp); 3602 3603 return 1; 3604 } else 3605 return 0; 3606 } 3607 3608 static void 3609 bge_asf_driver_up(struct bge_softc *sc) 3610 { 3611 if (sc->bge_asf_mode & ASF_STACKUP) { 3612 /* Send ASF heartbeat aprox. every 2s */ 3613 if (sc->bge_asf_count) 3614 sc->bge_asf_count --; 3615 else { 3616 sc->bge_asf_count = 2; 3617 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, 3618 BGE_FW_DRV_ALIVE); 3619 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4); 3620 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3); 3621 CSR_WRITE_4(sc, BGE_CPU_EVENT, 3622 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14)); 3623 } 3624 } 3625 } 3626 3627 static void 3628 bge_tick(void *xsc) 3629 { 3630 struct bge_softc *sc = xsc; 3631 struct mii_data *mii = &sc->bge_mii; 3632 int s; 3633 3634 s = splnet(); 3635 3636 if (BGE_IS_5705_PLUS(sc)) 3637 bge_stats_update_regs(sc); 3638 else 3639 bge_stats_update(sc); 3640 3641 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 3642 /* 3643 * Since in TBI mode auto-polling can't be used we should poll 3644 * link status manually. Here we register pending link event 3645 * and trigger interrupt. 3646 */ 3647 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT); 3648 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 3649 } else { 3650 /* 3651 * Do not touch PHY if we have link up. This could break 3652 * IPMI/ASF mode or produce extra input errors. 3653 * (extra input errors was reported for bcm5701 & bcm5704). 3654 */ 3655 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) 3656 mii_tick(mii); 3657 } 3658 3659 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 3660 3661 splx(s); 3662 } 3663 3664 static void 3665 bge_stats_update_regs(struct bge_softc *sc) 3666 { 3667 struct ifnet *ifp = &sc->ethercom.ec_if; 3668 3669 ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS + 3670 offsetof(struct bge_mac_stats_regs, etherStatsCollisions)); 3671 3672 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); 3673 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS); 3674 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); 3675 } 3676 3677 static void 3678 bge_stats_update(struct bge_softc *sc) 3679 { 3680 struct ifnet *ifp = &sc->ethercom.ec_if; 3681 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 3682 3683 #define READ_STAT(sc, stats, stat) \ 3684 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 3685 3686 ifp->if_collisions += 3687 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) + 3688 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) + 3689 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) + 3690 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) - 3691 ifp->if_collisions; 3692 3693 BGE_EVCNT_UPD(sc->bge_ev_tx_xoff, 3694 READ_STAT(sc, stats, outXoffSent.bge_addr_lo)); 3695 BGE_EVCNT_UPD(sc->bge_ev_tx_xon, 3696 READ_STAT(sc, stats, outXonSent.bge_addr_lo)); 3697 BGE_EVCNT_UPD(sc->bge_ev_rx_xoff, 3698 READ_STAT(sc, stats, 3699 xoffPauseFramesReceived.bge_addr_lo)); 3700 BGE_EVCNT_UPD(sc->bge_ev_rx_xon, 3701 READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo)); 3702 BGE_EVCNT_UPD(sc->bge_ev_rx_macctl, 3703 READ_STAT(sc, stats, 3704 macControlFramesReceived.bge_addr_lo)); 3705 BGE_EVCNT_UPD(sc->bge_ev_xoffentered, 3706 READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo)); 3707 3708 #undef READ_STAT 3709 3710 #ifdef notdef 3711 ifp->if_collisions += 3712 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 3713 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 3714 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 3715 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 3716 ifp->if_collisions; 3717 #endif 3718 } 3719 3720 /* 3721 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 3722 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 3723 * but when such padded frames employ the bge IP/TCP checksum offload, 3724 * the hardware checksum assist gives incorrect results (possibly 3725 * from incorporating its own padding into the UDP/TCP checksum; who knows). 3726 * If we pad such runts with zeros, the onboard checksum comes out correct. 3727 */ 3728 static inline int 3729 bge_cksum_pad(struct mbuf *pkt) 3730 { 3731 struct mbuf *last = NULL; 3732 int padlen; 3733 3734 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len; 3735 3736 /* if there's only the packet-header and we can pad there, use it. */ 3737 if (pkt->m_pkthdr.len == pkt->m_len && 3738 M_TRAILINGSPACE(pkt) >= padlen) { 3739 last = pkt; 3740 } else { 3741 /* 3742 * Walk packet chain to find last mbuf. We will either 3743 * pad there, or append a new mbuf and pad it 3744 * (thus perhaps avoiding the bcm5700 dma-min bug). 3745 */ 3746 for (last = pkt; last->m_next != NULL; last = last->m_next) { 3747 continue; /* do nothing */ 3748 } 3749 3750 /* `last' now points to last in chain. */ 3751 if (M_TRAILINGSPACE(last) < padlen) { 3752 /* Allocate new empty mbuf, pad it. Compact later. */ 3753 struct mbuf *n; 3754 MGET(n, M_DONTWAIT, MT_DATA); 3755 if (n == NULL) 3756 return ENOBUFS; 3757 n->m_len = 0; 3758 last->m_next = n; 3759 last = n; 3760 } 3761 } 3762 3763 KDASSERT(!M_READONLY(last)); 3764 KDASSERT(M_TRAILINGSPACE(last) >= padlen); 3765 3766 /* Now zero the pad area, to avoid the bge cksum-assist bug */ 3767 memset(mtod(last, char *) + last->m_len, 0, padlen); 3768 last->m_len += padlen; 3769 pkt->m_pkthdr.len += padlen; 3770 return 0; 3771 } 3772 3773 /* 3774 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes. 3775 */ 3776 static inline int 3777 bge_compact_dma_runt(struct mbuf *pkt) 3778 { 3779 struct mbuf *m, *prev; 3780 int totlen, prevlen; 3781 3782 prev = NULL; 3783 totlen = 0; 3784 prevlen = -1; 3785 3786 for (m = pkt; m != NULL; prev = m,m = m->m_next) { 3787 int mlen = m->m_len; 3788 int shortfall = 8 - mlen ; 3789 3790 totlen += mlen; 3791 if (mlen == 0) { 3792 continue; 3793 } 3794 if (mlen >= 8) 3795 continue; 3796 3797 /* If we get here, mbuf data is too small for DMA engine. 3798 * Try to fix by shuffling data to prev or next in chain. 3799 * If that fails, do a compacting deep-copy of the whole chain. 3800 */ 3801 3802 /* Internal frag. If fits in prev, copy it there. */ 3803 if (prev && M_TRAILINGSPACE(prev) >= m->m_len) { 3804 memcpy(prev->m_data + prev->m_len, m->m_data, mlen); 3805 prev->m_len += mlen; 3806 m->m_len = 0; 3807 /* XXX stitch chain */ 3808 prev->m_next = m_free(m); 3809 m = prev; 3810 continue; 3811 } 3812 else if (m->m_next != NULL && 3813 M_TRAILINGSPACE(m) >= shortfall && 3814 m->m_next->m_len >= (8 + shortfall)) { 3815 /* m is writable and have enough data in next, pull up. */ 3816 3817 memcpy(m->m_data + m->m_len, m->m_next->m_data, 3818 shortfall); 3819 m->m_len += shortfall; 3820 m->m_next->m_len -= shortfall; 3821 m->m_next->m_data += shortfall; 3822 } 3823 else if (m->m_next == NULL || 1) { 3824 /* Got a runt at the very end of the packet. 3825 * borrow data from the tail of the preceding mbuf and 3826 * update its length in-place. (The original data is still 3827 * valid, so we can do this even if prev is not writable.) 3828 */ 3829 3830 /* if we'd make prev a runt, just move all of its data. */ 3831 KASSERT(prev != NULL /*, ("runt but null PREV")*/); 3832 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/); 3833 3834 if ((prev->m_len - shortfall) < 8) 3835 shortfall = prev->m_len; 3836 3837 #ifdef notyet /* just do the safe slow thing for now */ 3838 if (!M_READONLY(m)) { 3839 if (M_LEADINGSPACE(m) < shorfall) { 3840 void *m_dat; 3841 m_dat = (m->m_flags & M_PKTHDR) ? 3842 m->m_pktdat : m->dat; 3843 memmove(m_dat, mtod(m, void*), m->m_len); 3844 m->m_data = m_dat; 3845 } 3846 } else 3847 #endif /* just do the safe slow thing */ 3848 { 3849 struct mbuf * n = NULL; 3850 int newprevlen = prev->m_len - shortfall; 3851 3852 MGET(n, M_NOWAIT, MT_DATA); 3853 if (n == NULL) 3854 return ENOBUFS; 3855 KASSERT(m->m_len + shortfall < MLEN 3856 /*, 3857 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/); 3858 3859 /* first copy the data we're stealing from prev */ 3860 memcpy(n->m_data, prev->m_data + newprevlen, 3861 shortfall); 3862 3863 /* update prev->m_len accordingly */ 3864 prev->m_len -= shortfall; 3865 3866 /* copy data from runt m */ 3867 memcpy(n->m_data + shortfall, m->m_data, 3868 m->m_len); 3869 3870 /* n holds what we stole from prev, plus m */ 3871 n->m_len = shortfall + m->m_len; 3872 3873 /* stitch n into chain and free m */ 3874 n->m_next = m->m_next; 3875 prev->m_next = n; 3876 /* KASSERT(m->m_next == NULL); */ 3877 m->m_next = NULL; 3878 m_free(m); 3879 m = n; /* for continuing loop */ 3880 } 3881 } 3882 prevlen = m->m_len; 3883 } 3884 return 0; 3885 } 3886 3887 /* 3888 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 3889 * pointers to descriptors. 3890 */ 3891 static int 3892 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx) 3893 { 3894 struct bge_tx_bd *f = NULL; 3895 uint32_t frag, cur; 3896 uint16_t csum_flags = 0; 3897 uint16_t txbd_tso_flags = 0; 3898 struct txdmamap_pool_entry *dma; 3899 bus_dmamap_t dmamap; 3900 int i = 0; 3901 struct m_tag *mtag; 3902 int use_tso, maxsegsize, error; 3903 3904 cur = frag = *txidx; 3905 3906 if (m_head->m_pkthdr.csum_flags) { 3907 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4) 3908 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3909 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) 3910 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3911 } 3912 3913 /* 3914 * If we were asked to do an outboard checksum, and the NIC 3915 * has the bug where it sometimes adds in the Ethernet padding, 3916 * explicitly pad with zeros so the cksum will be correct either way. 3917 * (For now, do this for all chip versions, until newer 3918 * are confirmed to not require the workaround.) 3919 */ 3920 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 || 3921 #ifdef notyet 3922 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 || 3923 #endif 3924 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD) 3925 goto check_dma_bug; 3926 3927 if (bge_cksum_pad(m_head) != 0) 3928 return ENOBUFS; 3929 3930 check_dma_bug: 3931 if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)) 3932 goto doit; 3933 3934 /* 3935 * bcm5700 Revision B silicon cannot handle DMA descriptors with 3936 * less than eight bytes. If we encounter a teeny mbuf 3937 * at the end of a chain, we can pad. Otherwise, copy. 3938 */ 3939 if (bge_compact_dma_runt(m_head) != 0) 3940 return ENOBUFS; 3941 3942 doit: 3943 dma = SLIST_FIRST(&sc->txdma_list); 3944 if (dma == NULL) 3945 return ENOBUFS; 3946 dmamap = dma->dmamap; 3947 3948 /* 3949 * Set up any necessary TSO state before we start packing... 3950 */ 3951 use_tso = (m_head->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 3952 if (!use_tso) { 3953 maxsegsize = 0; 3954 } else { /* TSO setup */ 3955 unsigned mss; 3956 struct ether_header *eh; 3957 unsigned ip_tcp_hlen, iptcp_opt_words, tcp_seg_flags, offset; 3958 struct mbuf * m0 = m_head; 3959 struct ip *ip; 3960 struct tcphdr *th; 3961 int iphl, hlen; 3962 3963 /* 3964 * XXX It would be nice if the mbuf pkthdr had offset 3965 * fields for the protocol headers. 3966 */ 3967 3968 eh = mtod(m0, struct ether_header *); 3969 switch (htons(eh->ether_type)) { 3970 case ETHERTYPE_IP: 3971 offset = ETHER_HDR_LEN; 3972 break; 3973 3974 case ETHERTYPE_VLAN: 3975 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3976 break; 3977 3978 default: 3979 /* 3980 * Don't support this protocol or encapsulation. 3981 */ 3982 return ENOBUFS; 3983 } 3984 3985 /* 3986 * TCP/IP headers are in the first mbuf; we can do 3987 * this the easy way. 3988 */ 3989 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 3990 hlen = iphl + offset; 3991 if (__predict_false(m0->m_len < 3992 (hlen + sizeof(struct tcphdr)))) { 3993 3994 aprint_debug_dev(sc->bge_dev, 3995 "TSO: hard case m0->m_len == %d < ip/tcp hlen %zd," 3996 "not handled yet\n", 3997 m0->m_len, hlen+ sizeof(struct tcphdr)); 3998 #ifdef NOTYET 3999 /* 4000 * XXX jonathan@NetBSD.org: untested. 4001 * how to force this branch to be taken? 4002 */ 4003 BGE_EVCNT_INCR(&sc->sc_ev_txtsopain); 4004 4005 m_copydata(m0, offset, sizeof(ip), &ip); 4006 m_copydata(m0, hlen, sizeof(th), &th); 4007 4008 ip.ip_len = 0; 4009 4010 m_copyback(m0, hlen + offsetof(struct ip, ip_len), 4011 sizeof(ip.ip_len), &ip.ip_len); 4012 4013 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 4014 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 4015 4016 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 4017 sizeof(th.th_sum), &th.th_sum); 4018 4019 hlen += th.th_off << 2; 4020 iptcp_opt_words = hlen; 4021 #else 4022 /* 4023 * if_wm "hard" case not yet supported, can we not 4024 * mandate it out of existence? 4025 */ 4026 (void) ip; (void)th; (void) ip_tcp_hlen; 4027 4028 return ENOBUFS; 4029 #endif 4030 } else { 4031 ip = (struct ip *) (mtod(m0, char *) + offset); 4032 th = (struct tcphdr *) (mtod(m0, char *) + hlen); 4033 ip_tcp_hlen = iphl + (th->th_off << 2); 4034 4035 /* Total IP/TCP options, in 32-bit words */ 4036 iptcp_opt_words = (ip_tcp_hlen 4037 - sizeof(struct tcphdr) 4038 - sizeof(struct ip)) >> 2; 4039 } 4040 if (BGE_IS_5750_OR_BEYOND(sc)) { 4041 th->th_sum = 0; 4042 csum_flags &= ~(BGE_TXBDFLAG_TCP_UDP_CSUM); 4043 } else { 4044 /* 4045 * XXX jonathan@NetBSD.org: 5705 untested. 4046 * Requires TSO firmware patch for 5701/5703/5704. 4047 */ 4048 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 4049 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 4050 } 4051 4052 mss = m_head->m_pkthdr.segsz; 4053 txbd_tso_flags |= 4054 BGE_TXBDFLAG_CPU_PRE_DMA | 4055 BGE_TXBDFLAG_CPU_POST_DMA; 4056 4057 /* 4058 * Our NIC TSO-assist assumes TSO has standard, optionless 4059 * IPv4 and TCP headers, which total 40 bytes. By default, 4060 * the NIC copies 40 bytes of IP/TCP header from the 4061 * supplied header into the IP/TCP header portion of 4062 * each post-TSO-segment. If the supplied packet has IP or 4063 * TCP options, we need to tell the NIC to copy those extra 4064 * bytes into each post-TSO header, in addition to the normal 4065 * 40-byte IP/TCP header (and to leave space accordingly). 4066 * Unfortunately, the driver encoding of option length 4067 * varies across different ASIC families. 4068 */ 4069 tcp_seg_flags = 0; 4070 if (iptcp_opt_words) { 4071 if (BGE_IS_5705_PLUS(sc)) { 4072 tcp_seg_flags = 4073 iptcp_opt_words << 11; 4074 } else { 4075 txbd_tso_flags |= 4076 iptcp_opt_words << 12; 4077 } 4078 } 4079 maxsegsize = mss | tcp_seg_flags; 4080 ip->ip_len = htons(mss + ip_tcp_hlen); 4081 4082 } /* TSO setup */ 4083 4084 /* 4085 * Start packing the mbufs in this chain into 4086 * the fragment pointers. Stop when we run out 4087 * of fragments or hit the end of the mbuf chain. 4088 */ 4089 error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head, 4090 BUS_DMA_NOWAIT); 4091 if (error) 4092 return ENOBUFS; 4093 /* 4094 * Sanity check: avoid coming within 16 descriptors 4095 * of the end of the ring. 4096 */ 4097 if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) { 4098 BGE_TSO_PRINTF(("%s: " 4099 " dmamap_load_mbuf too close to ring wrap\n", 4100 device_xname(sc->bge_dev))); 4101 goto fail_unload; 4102 } 4103 4104 mtag = sc->ethercom.ec_nvlans ? 4105 m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL; 4106 4107 4108 /* Iterate over dmap-map fragments. */ 4109 for (i = 0; i < dmamap->dm_nsegs; i++) { 4110 f = &sc->bge_rdata->bge_tx_ring[frag]; 4111 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 4112 break; 4113 4114 BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr); 4115 f->bge_len = dmamap->dm_segs[i].ds_len; 4116 4117 /* 4118 * For 5751 and follow-ons, for TSO we must turn 4119 * off checksum-assist flag in the tx-descr, and 4120 * supply the ASIC-revision-specific encoding 4121 * of TSO flags and segsize. 4122 */ 4123 if (use_tso) { 4124 if (BGE_IS_5750_OR_BEYOND(sc) || i == 0) { 4125 f->bge_rsvd = maxsegsize; 4126 f->bge_flags = csum_flags | txbd_tso_flags; 4127 } else { 4128 f->bge_rsvd = 0; 4129 f->bge_flags = 4130 (csum_flags | txbd_tso_flags) & 0x0fff; 4131 } 4132 } else { 4133 f->bge_rsvd = 0; 4134 f->bge_flags = csum_flags; 4135 } 4136 4137 if (mtag != NULL) { 4138 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 4139 f->bge_vlan_tag = VLAN_TAG_VALUE(mtag); 4140 } else { 4141 f->bge_vlan_tag = 0; 4142 } 4143 cur = frag; 4144 BGE_INC(frag, BGE_TX_RING_CNT); 4145 } 4146 4147 if (i < dmamap->dm_nsegs) { 4148 BGE_TSO_PRINTF(("%s: reached %d < dm_nsegs %d\n", 4149 device_xname(sc->bge_dev), i, dmamap->dm_nsegs)); 4150 goto fail_unload; 4151 } 4152 4153 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 4154 BUS_DMASYNC_PREWRITE); 4155 4156 if (frag == sc->bge_tx_saved_considx) { 4157 BGE_TSO_PRINTF(("%s: frag %d = wrapped id %d?\n", 4158 device_xname(sc->bge_dev), frag, sc->bge_tx_saved_considx)); 4159 4160 goto fail_unload; 4161 } 4162 4163 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 4164 sc->bge_cdata.bge_tx_chain[cur] = m_head; 4165 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 4166 sc->txdma[cur] = dma; 4167 sc->bge_txcnt += dmamap->dm_nsegs; 4168 4169 *txidx = frag; 4170 4171 return 0; 4172 4173 fail_unload: 4174 bus_dmamap_unload(sc->bge_dmatag, dmamap); 4175 4176 return ENOBUFS; 4177 } 4178 4179 /* 4180 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 4181 * to the mbuf data regions directly in the transmit descriptors. 4182 */ 4183 static void 4184 bge_start(struct ifnet *ifp) 4185 { 4186 struct bge_softc *sc; 4187 struct mbuf *m_head = NULL; 4188 uint32_t prodidx; 4189 int pkts = 0; 4190 4191 sc = ifp->if_softc; 4192 4193 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 4194 return; 4195 4196 prodidx = sc->bge_tx_prodidx; 4197 4198 while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 4199 IFQ_POLL(&ifp->if_snd, m_head); 4200 if (m_head == NULL) 4201 break; 4202 4203 #if 0 4204 /* 4205 * XXX 4206 * safety overkill. If this is a fragmented packet chain 4207 * with delayed TCP/UDP checksums, then only encapsulate 4208 * it if we have enough descriptors to handle the entire 4209 * chain at once. 4210 * (paranoia -- may not actually be needed) 4211 */ 4212 if (m_head->m_flags & M_FIRSTFRAG && 4213 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 4214 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 4215 M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) { 4216 ifp->if_flags |= IFF_OACTIVE; 4217 break; 4218 } 4219 } 4220 #endif 4221 4222 /* 4223 * Pack the data into the transmit ring. If we 4224 * don't have room, set the OACTIVE flag and wait 4225 * for the NIC to drain the ring. 4226 */ 4227 if (bge_encap(sc, m_head, &prodidx)) { 4228 ifp->if_flags |= IFF_OACTIVE; 4229 break; 4230 } 4231 4232 /* now we are committed to transmit the packet */ 4233 IFQ_DEQUEUE(&ifp->if_snd, m_head); 4234 pkts++; 4235 4236 /* 4237 * If there's a BPF listener, bounce a copy of this frame 4238 * to him. 4239 */ 4240 if (ifp->if_bpf) 4241 bpf_ops->bpf_mtap(ifp->if_bpf, m_head); 4242 } 4243 if (pkts == 0) 4244 return; 4245 4246 /* Transmit */ 4247 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 4248 /* 5700 b2 errata */ 4249 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 4250 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 4251 4252 sc->bge_tx_prodidx = prodidx; 4253 4254 /* 4255 * Set a timeout in case the chip goes out to lunch. 4256 */ 4257 ifp->if_timer = 5; 4258 } 4259 4260 static int 4261 bge_init(struct ifnet *ifp) 4262 { 4263 struct bge_softc *sc = ifp->if_softc; 4264 const uint16_t *m; 4265 int s, error = 0; 4266 4267 s = splnet(); 4268 4269 ifp = &sc->ethercom.ec_if; 4270 4271 /* Cancel pending I/O and flush buffers. */ 4272 bge_stop(ifp, 0); 4273 4274 bge_stop_fw(sc); 4275 bge_sig_pre_reset(sc, BGE_RESET_START); 4276 bge_reset(sc); 4277 bge_sig_legacy(sc, BGE_RESET_START); 4278 bge_sig_post_reset(sc, BGE_RESET_START); 4279 4280 bge_chipinit(sc); 4281 4282 /* 4283 * Init the various state machines, ring 4284 * control blocks and firmware. 4285 */ 4286 error = bge_blockinit(sc); 4287 if (error != 0) { 4288 aprint_error_dev(sc->bge_dev, "initialization error %d\n", 4289 error); 4290 splx(s); 4291 return error; 4292 } 4293 4294 ifp = &sc->ethercom.ec_if; 4295 4296 /* Specify MTU. */ 4297 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 4298 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 4299 4300 /* Load our MAC address. */ 4301 m = (const uint16_t *)&(CLLADDR(ifp->if_sadl)[0]); 4302 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 4303 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 4304 4305 /* Enable or disable promiscuous mode as needed. */ 4306 if (ifp->if_flags & IFF_PROMISC) 4307 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4308 else 4309 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4310 4311 /* Program multicast filter. */ 4312 bge_setmulti(sc); 4313 4314 /* Init RX ring. */ 4315 bge_init_rx_ring_std(sc); 4316 4317 /* 4318 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 4319 * memory to insure that the chip has in fact read the first 4320 * entry of the ring. 4321 */ 4322 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 4323 uint32_t v, i; 4324 for (i = 0; i < 10; i++) { 4325 DELAY(20); 4326 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 4327 if (v == (MCLBYTES - ETHER_ALIGN)) 4328 break; 4329 } 4330 if (i == 10) 4331 aprint_error_dev(sc->bge_dev, 4332 "5705 A0 chip failed to load RX ring\n"); 4333 } 4334 4335 /* Init jumbo RX ring. */ 4336 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 4337 bge_init_rx_ring_jumbo(sc); 4338 4339 /* Init our RX return ring index */ 4340 sc->bge_rx_saved_considx = 0; 4341 4342 /* Init TX ring. */ 4343 bge_init_tx_ring(sc); 4344 4345 /* Turn on transmitter */ 4346 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 4347 4348 /* Turn on receiver */ 4349 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 4350 4351 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 4352 4353 /* Tell firmware we're alive. */ 4354 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4355 4356 /* Enable host interrupts. */ 4357 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 4358 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 4359 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 4360 4361 if ((error = bge_ifmedia_upd(ifp)) != 0) 4362 goto out; 4363 4364 ifp->if_flags |= IFF_RUNNING; 4365 ifp->if_flags &= ~IFF_OACTIVE; 4366 4367 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 4368 4369 out: 4370 splx(s); 4371 4372 return error; 4373 } 4374 4375 /* 4376 * Set media options. 4377 */ 4378 static int 4379 bge_ifmedia_upd(struct ifnet *ifp) 4380 { 4381 struct bge_softc *sc = ifp->if_softc; 4382 struct mii_data *mii = &sc->bge_mii; 4383 struct ifmedia *ifm = &sc->bge_ifmedia; 4384 int rc; 4385 4386 /* If this is a 1000baseX NIC, enable the TBI port. */ 4387 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 4388 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 4389 return EINVAL; 4390 switch (IFM_SUBTYPE(ifm->ifm_media)) { 4391 case IFM_AUTO: 4392 /* 4393 * The BCM5704 ASIC appears to have a special 4394 * mechanism for programming the autoneg 4395 * advertisement registers in TBI mode. 4396 */ 4397 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 4398 uint32_t sgdig; 4399 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS); 4400 if (sgdig & BGE_SGDIGSTS_DONE) { 4401 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 4402 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 4403 sgdig |= BGE_SGDIGCFG_AUTO | 4404 BGE_SGDIGCFG_PAUSE_CAP | 4405 BGE_SGDIGCFG_ASYM_PAUSE; 4406 CSR_WRITE_4(sc, BGE_SGDIG_CFG, 4407 sgdig | BGE_SGDIGCFG_SEND); 4408 DELAY(5); 4409 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); 4410 } 4411 } 4412 break; 4413 case IFM_1000_SX: 4414 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 4415 BGE_CLRBIT(sc, BGE_MAC_MODE, 4416 BGE_MACMODE_HALF_DUPLEX); 4417 } else { 4418 BGE_SETBIT(sc, BGE_MAC_MODE, 4419 BGE_MACMODE_HALF_DUPLEX); 4420 } 4421 break; 4422 default: 4423 return EINVAL; 4424 } 4425 /* XXX 802.3x flow control for 1000BASE-SX */ 4426 return 0; 4427 } 4428 4429 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT); 4430 if ((rc = mii_mediachg(mii)) == ENXIO) 4431 return 0; 4432 4433 /* 4434 * Force an interrupt so that we will call bge_link_upd 4435 * if needed and clear any pending link state attention. 4436 * Without this we are not getting any further interrupts 4437 * for link state changes and thus will not UP the link and 4438 * not be able to send in bge_start. The only way to get 4439 * things working was to receive a packet and get a RX intr. 4440 */ 4441 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 4442 sc->bge_flags & BGE_IS_5788) 4443 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 4444 else 4445 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 4446 4447 return rc; 4448 } 4449 4450 /* 4451 * Report current media status. 4452 */ 4453 static void 4454 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 4455 { 4456 struct bge_softc *sc = ifp->if_softc; 4457 struct mii_data *mii = &sc->bge_mii; 4458 4459 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 4460 ifmr->ifm_status = IFM_AVALID; 4461 ifmr->ifm_active = IFM_ETHER; 4462 if (CSR_READ_4(sc, BGE_MAC_STS) & 4463 BGE_MACSTAT_TBI_PCS_SYNCHED) 4464 ifmr->ifm_status |= IFM_ACTIVE; 4465 ifmr->ifm_active |= IFM_1000_SX; 4466 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 4467 ifmr->ifm_active |= IFM_HDX; 4468 else 4469 ifmr->ifm_active |= IFM_FDX; 4470 return; 4471 } 4472 4473 mii_pollstat(mii); 4474 ifmr->ifm_status = mii->mii_media_status; 4475 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) | 4476 sc->bge_flowflags; 4477 } 4478 4479 static int 4480 bge_ioctl(struct ifnet *ifp, u_long command, void *data) 4481 { 4482 struct bge_softc *sc = ifp->if_softc; 4483 struct ifreq *ifr = (struct ifreq *) data; 4484 int s, error = 0; 4485 struct mii_data *mii; 4486 4487 s = splnet(); 4488 4489 switch (command) { 4490 case SIOCSIFFLAGS: 4491 if ((error = ifioctl_common(ifp, command, data)) != 0) 4492 break; 4493 if (ifp->if_flags & IFF_UP) { 4494 /* 4495 * If only the state of the PROMISC flag changed, 4496 * then just use the 'set promisc mode' command 4497 * instead of reinitializing the entire NIC. Doing 4498 * a full re-init means reloading the firmware and 4499 * waiting for it to start up, which may take a 4500 * second or two. 4501 */ 4502 if (ifp->if_flags & IFF_RUNNING && 4503 ifp->if_flags & IFF_PROMISC && 4504 !(sc->bge_if_flags & IFF_PROMISC)) { 4505 BGE_SETBIT(sc, BGE_RX_MODE, 4506 BGE_RXMODE_RX_PROMISC); 4507 } else if (ifp->if_flags & IFF_RUNNING && 4508 !(ifp->if_flags & IFF_PROMISC) && 4509 sc->bge_if_flags & IFF_PROMISC) { 4510 BGE_CLRBIT(sc, BGE_RX_MODE, 4511 BGE_RXMODE_RX_PROMISC); 4512 } else if (!(sc->bge_if_flags & IFF_UP)) 4513 bge_init(ifp); 4514 } else { 4515 if (ifp->if_flags & IFF_RUNNING) 4516 bge_stop(ifp, 1); 4517 } 4518 sc->bge_if_flags = ifp->if_flags; 4519 error = 0; 4520 break; 4521 case SIOCSIFMEDIA: 4522 /* XXX Flow control is not supported for 1000BASE-SX */ 4523 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 4524 ifr->ifr_media &= ~IFM_ETH_FMASK; 4525 sc->bge_flowflags = 0; 4526 } 4527 4528 /* Flow control requires full-duplex mode. */ 4529 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 4530 (ifr->ifr_media & IFM_FDX) == 0) { 4531 ifr->ifr_media &= ~IFM_ETH_FMASK; 4532 } 4533 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 4534 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 4535 /* We can do both TXPAUSE and RXPAUSE. */ 4536 ifr->ifr_media |= 4537 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 4538 } 4539 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 4540 } 4541 /* FALLTHROUGH */ 4542 case SIOCGIFMEDIA: 4543 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 4544 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 4545 command); 4546 } else { 4547 mii = &sc->bge_mii; 4548 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 4549 command); 4550 } 4551 break; 4552 default: 4553 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) 4554 break; 4555 4556 error = 0; 4557 4558 if (command != SIOCADDMULTI && command != SIOCDELMULTI) 4559 ; 4560 else if (ifp->if_flags & IFF_RUNNING) 4561 bge_setmulti(sc); 4562 break; 4563 } 4564 4565 splx(s); 4566 4567 return error; 4568 } 4569 4570 static void 4571 bge_watchdog(struct ifnet *ifp) 4572 { 4573 struct bge_softc *sc; 4574 4575 sc = ifp->if_softc; 4576 4577 aprint_error_dev(sc->bge_dev, "watchdog timeout -- resetting\n"); 4578 4579 ifp->if_flags &= ~IFF_RUNNING; 4580 bge_init(ifp); 4581 4582 ifp->if_oerrors++; 4583 } 4584 4585 static void 4586 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit) 4587 { 4588 int i; 4589 4590 BGE_CLRBIT(sc, reg, bit); 4591 4592 for (i = 0; i < 1000; i++) { 4593 if ((CSR_READ_4(sc, reg) & bit) == 0) 4594 return; 4595 delay(100); 4596 } 4597 4598 /* 4599 * Doesn't print only when the register is BGE_SRS_MODE. It occurs 4600 * on some environment (and once after boot?) 4601 */ 4602 if (reg != BGE_SRS_MODE) 4603 aprint_error_dev(sc->bge_dev, 4604 "block failed to stop: reg 0x%lx, bit 0x%08x\n", 4605 (u_long)reg, bit); 4606 } 4607 4608 /* 4609 * Stop the adapter and free any mbufs allocated to the 4610 * RX and TX lists. 4611 */ 4612 static void 4613 bge_stop(struct ifnet *ifp, int disable) 4614 { 4615 struct bge_softc *sc = ifp->if_softc; 4616 4617 callout_stop(&sc->bge_timeout); 4618 4619 /* 4620 * Tell firmware we're shutting down. 4621 */ 4622 bge_stop_fw(sc); 4623 bge_sig_pre_reset(sc, BGE_RESET_STOP); 4624 4625 /* Disable host interrupts. */ 4626 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 4627 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 4628 4629 /* 4630 * Disable all of the receiver blocks 4631 */ 4632 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 4633 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 4634 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 4635 if (BGE_IS_5700_FAMILY(sc)) 4636 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 4637 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 4638 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 4639 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 4640 4641 /* 4642 * Disable all of the transmit blocks 4643 */ 4644 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 4645 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 4646 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 4647 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 4648 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 4649 if (BGE_IS_5700_FAMILY(sc)) 4650 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 4651 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 4652 4653 /* 4654 * Shut down all of the memory managers and related 4655 * state machines. 4656 */ 4657 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 4658 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 4659 if (BGE_IS_5700_FAMILY(sc)) 4660 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 4661 4662 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 4663 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 4664 4665 if (BGE_IS_5700_FAMILY(sc)) { 4666 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 4667 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 4668 } 4669 4670 bge_reset(sc); 4671 bge_sig_legacy(sc, BGE_RESET_STOP); 4672 bge_sig_post_reset(sc, BGE_RESET_STOP); 4673 4674 /* 4675 * Keep the ASF firmware running if up. 4676 */ 4677 if (sc->bge_asf_mode & ASF_STACKUP) 4678 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4679 else 4680 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4681 4682 /* Free the RX lists. */ 4683 bge_free_rx_ring_std(sc); 4684 4685 /* Free jumbo RX list. */ 4686 if (BGE_IS_JUMBO_CAPABLE(sc)) 4687 bge_free_rx_ring_jumbo(sc); 4688 4689 /* Free TX buffers. */ 4690 bge_free_tx_ring(sc); 4691 4692 /* 4693 * Isolate/power down the PHY. 4694 */ 4695 if (!(sc->bge_flags & BGE_PHY_FIBER_TBI)) 4696 mii_down(&sc->bge_mii); 4697 4698 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 4699 4700 /* Clear MAC's link state (PHY may still have link UP). */ 4701 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 4702 4703 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 4704 } 4705 4706 static void 4707 bge_link_upd(struct bge_softc *sc) 4708 { 4709 struct ifnet *ifp = &sc->ethercom.ec_if; 4710 struct mii_data *mii = &sc->bge_mii; 4711 uint32_t status; 4712 int link; 4713 4714 /* Clear 'pending link event' flag */ 4715 BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT); 4716 4717 /* 4718 * Process link state changes. 4719 * Grrr. The link status word in the status block does 4720 * not work correctly on the BCM5700 rev AX and BX chips, 4721 * according to all available information. Hence, we have 4722 * to enable MII interrupts in order to properly obtain 4723 * async link changes. Unfortunately, this also means that 4724 * we have to read the MAC status register to detect link 4725 * changes, thereby adding an additional register access to 4726 * the interrupt handler. 4727 */ 4728 4729 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) { 4730 status = CSR_READ_4(sc, BGE_MAC_STS); 4731 if (status & BGE_MACSTAT_MI_INTERRUPT) { 4732 mii_pollstat(mii); 4733 4734 if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 4735 mii->mii_media_status & IFM_ACTIVE && 4736 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 4737 BGE_STS_SETBIT(sc, BGE_STS_LINK); 4738 else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 4739 (!(mii->mii_media_status & IFM_ACTIVE) || 4740 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 4741 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 4742 4743 /* Clear the interrupt */ 4744 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 4745 BGE_EVTENB_MI_INTERRUPT); 4746 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); 4747 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, 4748 BRGPHY_INTRS); 4749 } 4750 return; 4751 } 4752 4753 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 4754 status = CSR_READ_4(sc, BGE_MAC_STS); 4755 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 4756 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) { 4757 BGE_STS_SETBIT(sc, BGE_STS_LINK); 4758 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 4759 BGE_CLRBIT(sc, BGE_MAC_MODE, 4760 BGE_MACMODE_TBI_SEND_CFGS); 4761 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 4762 if_link_state_change(ifp, LINK_STATE_UP); 4763 } 4764 } else if (BGE_STS_BIT(sc, BGE_STS_LINK)) { 4765 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 4766 if_link_state_change(ifp, LINK_STATE_DOWN); 4767 } 4768 /* 4769 * Discard link events for MII/GMII cards if MI auto-polling disabled. 4770 * This should not happen since mii callouts are locked now, but 4771 * we keep this check for debug. 4772 */ 4773 } else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) { 4774 /* 4775 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED 4776 * bit in status word always set. Workaround this bug by 4777 * reading PHY link status directly. 4778 */ 4779 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)? 4780 BGE_STS_LINK : 0; 4781 4782 if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) { 4783 mii_pollstat(mii); 4784 4785 if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 4786 mii->mii_media_status & IFM_ACTIVE && 4787 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 4788 BGE_STS_SETBIT(sc, BGE_STS_LINK); 4789 else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 4790 (!(mii->mii_media_status & IFM_ACTIVE) || 4791 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 4792 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 4793 } 4794 } 4795 4796 /* Clear the attention */ 4797 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 4798 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 4799 BGE_MACSTAT_LINK_CHANGED); 4800 } 4801 4802 static int 4803 sysctl_bge_verify(SYSCTLFN_ARGS) 4804 { 4805 int error, t; 4806 struct sysctlnode node; 4807 4808 node = *rnode; 4809 t = *(int*)rnode->sysctl_data; 4810 node.sysctl_data = &t; 4811 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 4812 if (error || newp == NULL) 4813 return error; 4814 4815 #if 0 4816 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t, 4817 node.sysctl_num, rnode->sysctl_num)); 4818 #endif 4819 4820 if (node.sysctl_num == bge_rxthresh_nodenum) { 4821 if (t < 0 || t >= NBGE_RX_THRESH) 4822 return EINVAL; 4823 bge_update_all_threshes(t); 4824 } else 4825 return EINVAL; 4826 4827 *(int*)rnode->sysctl_data = t; 4828 4829 return 0; 4830 } 4831 4832 /* 4833 * Set up sysctl(3) MIB, hw.bge.*. 4834 * 4835 * TBD condition SYSCTL_PERMANENT on being an LKM or not 4836 */ 4837 SYSCTL_SETUP(sysctl_bge, "sysctl bge subtree setup") 4838 { 4839 int rc, bge_root_num; 4840 const struct sysctlnode *node; 4841 4842 if ((rc = sysctl_createv(clog, 0, NULL, NULL, 4843 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL, 4844 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) { 4845 goto err; 4846 } 4847 4848 if ((rc = sysctl_createv(clog, 0, NULL, &node, 4849 CTLFLAG_PERMANENT, CTLTYPE_NODE, "bge", 4850 SYSCTL_DESCR("BGE interface controls"), 4851 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 4852 goto err; 4853 } 4854 4855 bge_root_num = node->sysctl_num; 4856 4857 /* BGE Rx interrupt mitigation level */ 4858 if ((rc = sysctl_createv(clog, 0, NULL, &node, 4859 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 4860 CTLTYPE_INT, "rx_lvl", 4861 SYSCTL_DESCR("BGE receive interrupt mitigation level"), 4862 sysctl_bge_verify, 0, 4863 &bge_rx_thresh_lvl, 4864 0, CTL_HW, bge_root_num, CTL_CREATE, 4865 CTL_EOL)) != 0) { 4866 goto err; 4867 } 4868 4869 bge_rxthresh_nodenum = node->sysctl_num; 4870 4871 return; 4872 4873 err: 4874 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc); 4875 } 4876 4877 #ifdef BGE_DEBUG 4878 void 4879 bge_debug_info(struct bge_softc *sc) 4880 { 4881 4882 printf("Hardware Flags:\n"); 4883 if (BGE_IS_5755_PLUS(sc)) 4884 printf(" - 5755 Plus\n"); 4885 if (BGE_IS_5750_OR_BEYOND(sc)) 4886 printf(" - 5750 Plus\n"); 4887 if (BGE_IS_5705_PLUS(sc)) 4888 printf(" - 5705 Plus\n"); 4889 if (BGE_IS_5714_FAMILY(sc)) 4890 printf(" - 5714 Family\n"); 4891 if (BGE_IS_5700_FAMILY(sc)) 4892 printf(" - 5700 Family\n"); 4893 if (sc->bge_flags & BGE_IS_5788) 4894 printf(" - 5788\n"); 4895 if (sc->bge_flags & BGE_JUMBO_CAPABLE) 4896 printf(" - Supports Jumbo Frames\n"); 4897 if (sc->bge_flags & BGE_NO_EEPROM) 4898 printf(" - No EEPROM\n"); 4899 if (sc->bge_flags & BGE_PCIX) 4900 printf(" - PCI-X Bus\n"); 4901 if (sc->bge_flags & BGE_PCIE) 4902 printf(" - PCI Express Bus\n"); 4903 if (sc->bge_flags & BGE_NO_3LED) 4904 printf(" - No 3 LEDs\n"); 4905 if (sc->bge_flags & BGE_RX_ALIGNBUG) 4906 printf(" - RX Alignment Bug\n"); 4907 if (sc->bge_flags & BGE_TSO) 4908 printf(" - TSO\n"); 4909 } 4910 #endif /* BGE_DEBUG */ 4911 4912 static int 4913 bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]) 4914 { 4915 prop_dictionary_t dict; 4916 prop_data_t ea; 4917 4918 if ((sc->bge_flags & BGE_NO_EEPROM) == 0) 4919 return 1; 4920 4921 dict = device_properties(sc->bge_dev); 4922 ea = prop_dictionary_get(dict, "mac-address"); 4923 if (ea != NULL) { 4924 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 4925 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 4926 memcpy(ether_addr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN); 4927 return 0; 4928 } 4929 4930 return 1; 4931 } 4932 4933 static int 4934 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[]) 4935 { 4936 uint32_t mac_addr; 4937 4938 mac_addr = bge_readmem_ind(sc, 0x0c14); 4939 if ((mac_addr >> 16) == 0x484b) { 4940 ether_addr[0] = (uint8_t)(mac_addr >> 8); 4941 ether_addr[1] = (uint8_t)mac_addr; 4942 mac_addr = bge_readmem_ind(sc, 0x0c18); 4943 ether_addr[2] = (uint8_t)(mac_addr >> 24); 4944 ether_addr[3] = (uint8_t)(mac_addr >> 16); 4945 ether_addr[4] = (uint8_t)(mac_addr >> 8); 4946 ether_addr[5] = (uint8_t)mac_addr; 4947 return 0; 4948 } 4949 return 1; 4950 } 4951 4952 static int 4953 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[]) 4954 { 4955 int mac_offset = BGE_EE_MAC_OFFSET; 4956 4957 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 4958 mac_offset = BGE_EE_MAC_OFFSET_5906; 4959 4960 return (bge_read_nvram(sc, ether_addr, mac_offset + 2, 4961 ETHER_ADDR_LEN)); 4962 } 4963 4964 static int 4965 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[]) 4966 { 4967 4968 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 4969 return 1; 4970 4971 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, 4972 ETHER_ADDR_LEN)); 4973 } 4974 4975 static int 4976 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[]) 4977 { 4978 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = { 4979 /* NOTE: Order is critical */ 4980 bge_get_eaddr_fw, 4981 bge_get_eaddr_mem, 4982 bge_get_eaddr_nvram, 4983 bge_get_eaddr_eeprom, 4984 NULL 4985 }; 4986 const bge_eaddr_fcn_t *func; 4987 4988 for (func = bge_eaddr_funcs; *func != NULL; ++func) { 4989 if ((*func)(sc, eaddr) == 0) 4990 break; 4991 } 4992 return (*func == NULL ? ENXIO : 0); 4993 } 4994