1 /* $NetBSD: if_bge.c,v 1.202 2012/09/17 11:54:36 tsutsui Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2001 6 * Bill Paul <wpaul@windriver.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $ 36 */ 37 38 /* 39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD. 40 * 41 * NetBSD version by: 42 * 43 * Frank van der Linden <fvdl@wasabisystems.com> 44 * Jason Thorpe <thorpej@wasabisystems.com> 45 * Jonathan Stone <jonathan@dsg.stanford.edu> 46 * 47 * Originally written for FreeBSD by Bill Paul <wpaul@windriver.com> 48 * Senior Engineer, Wind River Systems 49 */ 50 51 /* 52 * The Broadcom BCM5700 is based on technology originally developed by 53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 54 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 57 * frames, highly configurable RX filtering, and 16 RX and TX queues 58 * (which, along with RX filter rules, can be used for QOS applications). 59 * Other features, such as TCP segmentation, may be available as part 60 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 61 * firmware images can be stored in hardware and need not be compiled 62 * into the driver. 63 * 64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus. 66 * 67 * The BCM5701 is a single-chip solution incorporating both the BCM5700 68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 69 * does not support external SSRAM. 70 * 71 * Broadcom also produces a variation of the BCM5700 under the "Altima" 72 * brand name, which is functionally similar but lacks PCI-X support. 73 * 74 * Without external SSRAM, you can only have at most 4 TX rings, 75 * and the use of the mini RX ring is disabled. This seems to imply 76 * that these features are simply not available on the BCM5701. As a 77 * result, this driver does not implement any support for the mini RX 78 * ring. 79 */ 80 81 #include <sys/cdefs.h> 82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.202 2012/09/17 11:54:36 tsutsui Exp $"); 83 84 #include "vlan.h" 85 86 #include <sys/param.h> 87 #include <sys/systm.h> 88 #include <sys/callout.h> 89 #include <sys/sockio.h> 90 #include <sys/mbuf.h> 91 #include <sys/malloc.h> 92 #include <sys/kernel.h> 93 #include <sys/device.h> 94 #include <sys/socket.h> 95 #include <sys/sysctl.h> 96 97 #include <net/if.h> 98 #include <net/if_dl.h> 99 #include <net/if_media.h> 100 #include <net/if_ether.h> 101 102 #include <sys/rnd.h> 103 104 #ifdef INET 105 #include <netinet/in.h> 106 #include <netinet/in_systm.h> 107 #include <netinet/in_var.h> 108 #include <netinet/ip.h> 109 #endif 110 111 /* Headers for TCP Segmentation Offload (TSO) */ 112 #include <netinet/in_systm.h> /* n_time for <netinet/ip.h>... */ 113 #include <netinet/in.h> /* ip_{src,dst}, for <netinet/ip.h> */ 114 #include <netinet/ip.h> /* for struct ip */ 115 #include <netinet/tcp.h> /* for struct tcphdr */ 116 117 118 #include <net/bpf.h> 119 120 #include <dev/pci/pcireg.h> 121 #include <dev/pci/pcivar.h> 122 #include <dev/pci/pcidevs.h> 123 124 #include <dev/mii/mii.h> 125 #include <dev/mii/miivar.h> 126 #include <dev/mii/miidevs.h> 127 #include <dev/mii/brgphyreg.h> 128 129 #include <dev/pci/if_bgereg.h> 130 #include <dev/pci/if_bgevar.h> 131 132 #include <prop/proplib.h> 133 134 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 135 136 137 /* 138 * Tunable thresholds for rx-side bge interrupt mitigation. 139 */ 140 141 /* 142 * The pairs of values below were obtained from empirical measurement 143 * on bcm5700 rev B2; they ar designed to give roughly 1 receive 144 * interrupt for every N packets received, where N is, approximately, 145 * the second value (rx_max_bds) in each pair. The values are chosen 146 * such that moving from one pair to the succeeding pair was observed 147 * to roughly halve interrupt rate under sustained input packet load. 148 * The values were empirically chosen to avoid overflowing internal 149 * limits on the bcm5700: increasing rx_ticks much beyond 600 150 * results in internal wrapping and higher interrupt rates. 151 * The limit of 46 frames was chosen to match NFS workloads. 152 * 153 * These values also work well on bcm5701, bcm5704C, and (less 154 * tested) bcm5703. On other chipsets, (including the Altima chip 155 * family), the larger values may overflow internal chip limits, 156 * leading to increasing interrupt rates rather than lower interrupt 157 * rates. 158 * 159 * Applications using heavy interrupt mitigation (interrupting every 160 * 32 or 46 frames) in both directions may need to increase the TCP 161 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain 162 * full link bandwidth, due to ACKs and window updates lingering 163 * in the RX queue during the 30-to-40-frame interrupt-mitigation window. 164 */ 165 static const struct bge_load_rx_thresh { 166 int rx_ticks; 167 int rx_max_bds; } 168 bge_rx_threshes[] = { 169 { 16, 1 }, /* rx_max_bds = 1 disables interrupt mitigation */ 170 { 32, 2 }, 171 { 50, 4 }, 172 { 100, 8 }, 173 { 192, 16 }, 174 { 416, 32 }, 175 { 598, 46 } 176 }; 177 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0])) 178 179 /* XXX patchable; should be sysctl'able */ 180 static int bge_auto_thresh = 1; 181 static int bge_rx_thresh_lvl; 182 183 static int bge_rxthresh_nodenum; 184 185 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]); 186 187 static int bge_probe(device_t, cfdata_t, void *); 188 static void bge_attach(device_t, device_t, void *); 189 static void bge_release_resources(struct bge_softc *); 190 191 static int bge_get_eaddr_fw(struct bge_softc *, uint8_t[]); 192 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]); 193 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]); 194 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]); 195 static int bge_get_eaddr(struct bge_softc *, uint8_t[]); 196 197 static void bge_txeof(struct bge_softc *); 198 static void bge_rxeof(struct bge_softc *); 199 200 static void bge_asf_driver_up (struct bge_softc *); 201 static void bge_tick(void *); 202 static void bge_stats_update(struct bge_softc *); 203 static void bge_stats_update_regs(struct bge_softc *); 204 static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *); 205 206 static int bge_intr(void *); 207 static void bge_start(struct ifnet *); 208 static int bge_ifflags_cb(struct ethercom *); 209 static int bge_ioctl(struct ifnet *, u_long, void *); 210 static int bge_init(struct ifnet *); 211 static void bge_stop(struct ifnet *, int); 212 static void bge_watchdog(struct ifnet *); 213 static int bge_ifmedia_upd(struct ifnet *); 214 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 215 216 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *); 217 static int bge_read_nvram(struct bge_softc *, uint8_t *, int, int); 218 219 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); 220 static int bge_read_eeprom(struct bge_softc *, void *, int, int); 221 static void bge_setmulti(struct bge_softc *); 222 223 static void bge_handle_events(struct bge_softc *); 224 static int bge_alloc_jumbo_mem(struct bge_softc *); 225 #if 0 /* XXX */ 226 static void bge_free_jumbo_mem(struct bge_softc *); 227 #endif 228 static void *bge_jalloc(struct bge_softc *); 229 static void bge_jfree(struct mbuf *, void *, size_t, void *); 230 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, 231 bus_dmamap_t); 232 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 233 static int bge_init_rx_ring_std(struct bge_softc *); 234 static void bge_free_rx_ring_std(struct bge_softc *); 235 static int bge_init_rx_ring_jumbo(struct bge_softc *); 236 static void bge_free_rx_ring_jumbo(struct bge_softc *); 237 static void bge_free_tx_ring(struct bge_softc *); 238 static int bge_init_tx_ring(struct bge_softc *); 239 240 static int bge_chipinit(struct bge_softc *); 241 static int bge_blockinit(struct bge_softc *); 242 static int bge_setpowerstate(struct bge_softc *, int); 243 static uint32_t bge_readmem_ind(struct bge_softc *, int); 244 static void bge_writemem_ind(struct bge_softc *, int, int); 245 static void bge_writembx(struct bge_softc *, int, int); 246 static void bge_writemem_direct(struct bge_softc *, int, int); 247 static void bge_writereg_ind(struct bge_softc *, int, int); 248 static void bge_set_max_readrq(struct bge_softc *); 249 250 static int bge_miibus_readreg(device_t, int, int); 251 static void bge_miibus_writereg(device_t, int, int, int); 252 static void bge_miibus_statchg(struct ifnet *); 253 254 #define BGE_RESET_START 1 255 #define BGE_RESET_STOP 2 256 static void bge_sig_post_reset(struct bge_softc *, int); 257 static void bge_sig_legacy(struct bge_softc *, int); 258 static void bge_sig_pre_reset(struct bge_softc *, int); 259 static void bge_stop_fw(struct bge_softc *); 260 static int bge_reset(struct bge_softc *); 261 static void bge_link_upd(struct bge_softc *); 262 static void sysctl_bge_init(struct bge_softc *); 263 static int sysctl_bge_verify(SYSCTLFN_PROTO); 264 265 #ifdef BGE_DEBUG 266 #define DPRINTF(x) if (bgedebug) printf x 267 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x 268 #define BGE_TSO_PRINTF(x) do { if (bge_tso_debug) printf x ;} while (0) 269 int bgedebug = 0; 270 int bge_tso_debug = 0; 271 void bge_debug_info(struct bge_softc *); 272 #else 273 #define DPRINTF(x) 274 #define DPRINTFN(n,x) 275 #define BGE_TSO_PRINTF(x) 276 #endif 277 278 #ifdef BGE_EVENT_COUNTERS 279 #define BGE_EVCNT_INCR(ev) (ev).ev_count++ 280 #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val) 281 #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val) 282 #else 283 #define BGE_EVCNT_INCR(ev) /* nothing */ 284 #define BGE_EVCNT_ADD(ev, val) /* nothing */ 285 #define BGE_EVCNT_UPD(ev, val) /* nothing */ 286 #endif 287 288 static const struct bge_product { 289 pci_vendor_id_t bp_vendor; 290 pci_product_id_t bp_product; 291 const char *bp_name; 292 } bge_products[] = { 293 /* 294 * The BCM5700 documentation seems to indicate that the hardware 295 * still has the Alteon vendor ID burned into it, though it 296 * should always be overridden by the value in the EEPROM. We'll 297 * check for it anyway. 298 */ 299 { PCI_VENDOR_ALTEON, 300 PCI_PRODUCT_ALTEON_BCM5700, 301 "Broadcom BCM5700 Gigabit Ethernet", 302 }, 303 { PCI_VENDOR_ALTEON, 304 PCI_PRODUCT_ALTEON_BCM5701, 305 "Broadcom BCM5701 Gigabit Ethernet", 306 }, 307 { PCI_VENDOR_ALTIMA, 308 PCI_PRODUCT_ALTIMA_AC1000, 309 "Altima AC1000 Gigabit Ethernet", 310 }, 311 { PCI_VENDOR_ALTIMA, 312 PCI_PRODUCT_ALTIMA_AC1001, 313 "Altima AC1001 Gigabit Ethernet", 314 }, 315 { PCI_VENDOR_ALTIMA, 316 PCI_PRODUCT_ALTIMA_AC9100, 317 "Altima AC9100 Gigabit Ethernet", 318 }, 319 { PCI_VENDOR_BROADCOM, 320 PCI_PRODUCT_BROADCOM_BCM5700, 321 "Broadcom BCM5700 Gigabit Ethernet", 322 }, 323 { PCI_VENDOR_BROADCOM, 324 PCI_PRODUCT_BROADCOM_BCM5701, 325 "Broadcom BCM5701 Gigabit Ethernet", 326 }, 327 { PCI_VENDOR_BROADCOM, 328 PCI_PRODUCT_BROADCOM_BCM5702, 329 "Broadcom BCM5702 Gigabit Ethernet", 330 }, 331 { PCI_VENDOR_BROADCOM, 332 PCI_PRODUCT_BROADCOM_BCM5702X, 333 "Broadcom BCM5702X Gigabit Ethernet" }, 334 { PCI_VENDOR_BROADCOM, 335 PCI_PRODUCT_BROADCOM_BCM5703, 336 "Broadcom BCM5703 Gigabit Ethernet", 337 }, 338 { PCI_VENDOR_BROADCOM, 339 PCI_PRODUCT_BROADCOM_BCM5703X, 340 "Broadcom BCM5703X Gigabit Ethernet", 341 }, 342 { PCI_VENDOR_BROADCOM, 343 PCI_PRODUCT_BROADCOM_BCM5703_ALT, 344 "Broadcom BCM5703 Gigabit Ethernet", 345 }, 346 { PCI_VENDOR_BROADCOM, 347 PCI_PRODUCT_BROADCOM_BCM5704C, 348 "Broadcom BCM5704C Dual Gigabit Ethernet", 349 }, 350 { PCI_VENDOR_BROADCOM, 351 PCI_PRODUCT_BROADCOM_BCM5704S, 352 "Broadcom BCM5704S Dual Gigabit Ethernet", 353 }, 354 { PCI_VENDOR_BROADCOM, 355 PCI_PRODUCT_BROADCOM_BCM5705, 356 "Broadcom BCM5705 Gigabit Ethernet", 357 }, 358 { PCI_VENDOR_BROADCOM, 359 PCI_PRODUCT_BROADCOM_BCM5705F, 360 "Broadcom BCM5705F Gigabit Ethernet", 361 }, 362 { PCI_VENDOR_BROADCOM, 363 PCI_PRODUCT_BROADCOM_BCM5705K, 364 "Broadcom BCM5705K Gigabit Ethernet", 365 }, 366 { PCI_VENDOR_BROADCOM, 367 PCI_PRODUCT_BROADCOM_BCM5705M, 368 "Broadcom BCM5705M Gigabit Ethernet", 369 }, 370 { PCI_VENDOR_BROADCOM, 371 PCI_PRODUCT_BROADCOM_BCM5705M_ALT, 372 "Broadcom BCM5705M Gigabit Ethernet", 373 }, 374 { PCI_VENDOR_BROADCOM, 375 PCI_PRODUCT_BROADCOM_BCM5714, 376 "Broadcom BCM5714 Gigabit Ethernet", 377 }, 378 { PCI_VENDOR_BROADCOM, 379 PCI_PRODUCT_BROADCOM_BCM5714S, 380 "Broadcom BCM5714S Gigabit Ethernet", 381 }, 382 { PCI_VENDOR_BROADCOM, 383 PCI_PRODUCT_BROADCOM_BCM5715, 384 "Broadcom BCM5715 Gigabit Ethernet", 385 }, 386 { PCI_VENDOR_BROADCOM, 387 PCI_PRODUCT_BROADCOM_BCM5715S, 388 "Broadcom BCM5715S Gigabit Ethernet", 389 }, 390 { PCI_VENDOR_BROADCOM, 391 PCI_PRODUCT_BROADCOM_BCM5717, 392 "Broadcom BCM5717 Gigabit Ethernet", 393 }, 394 { PCI_VENDOR_BROADCOM, 395 PCI_PRODUCT_BROADCOM_BCM5718, 396 "Broadcom BCM5718 Gigabit Ethernet", 397 }, 398 { PCI_VENDOR_BROADCOM, 399 PCI_PRODUCT_BROADCOM_BCM5720, 400 "Broadcom BCM5720 Gigabit Ethernet", 401 }, 402 { PCI_VENDOR_BROADCOM, 403 PCI_PRODUCT_BROADCOM_BCM5721, 404 "Broadcom BCM5721 Gigabit Ethernet", 405 }, 406 { PCI_VENDOR_BROADCOM, 407 PCI_PRODUCT_BROADCOM_BCM5722, 408 "Broadcom BCM5722 Gigabit Ethernet", 409 }, 410 { PCI_VENDOR_BROADCOM, 411 PCI_PRODUCT_BROADCOM_BCM5723, 412 "Broadcom BCM5723 Gigabit Ethernet", 413 }, 414 { PCI_VENDOR_BROADCOM, 415 PCI_PRODUCT_BROADCOM_BCM5724, 416 "Broadcom BCM5724 Gigabit Ethernet", 417 }, 418 { PCI_VENDOR_BROADCOM, 419 PCI_PRODUCT_BROADCOM_BCM5750, 420 "Broadcom BCM5750 Gigabit Ethernet", 421 }, 422 { PCI_VENDOR_BROADCOM, 423 PCI_PRODUCT_BROADCOM_BCM5750M, 424 "Broadcom BCM5750M Gigabit Ethernet", 425 }, 426 { PCI_VENDOR_BROADCOM, 427 PCI_PRODUCT_BROADCOM_BCM5751, 428 "Broadcom BCM5751 Gigabit Ethernet", 429 }, 430 { PCI_VENDOR_BROADCOM, 431 PCI_PRODUCT_BROADCOM_BCM5751F, 432 "Broadcom BCM5751F Gigabit Ethernet", 433 }, 434 { PCI_VENDOR_BROADCOM, 435 PCI_PRODUCT_BROADCOM_BCM5751M, 436 "Broadcom BCM5751M Gigabit Ethernet", 437 }, 438 { PCI_VENDOR_BROADCOM, 439 PCI_PRODUCT_BROADCOM_BCM5752, 440 "Broadcom BCM5752 Gigabit Ethernet", 441 }, 442 { PCI_VENDOR_BROADCOM, 443 PCI_PRODUCT_BROADCOM_BCM5752M, 444 "Broadcom BCM5752M Gigabit Ethernet", 445 }, 446 { PCI_VENDOR_BROADCOM, 447 PCI_PRODUCT_BROADCOM_BCM5753, 448 "Broadcom BCM5753 Gigabit Ethernet", 449 }, 450 { PCI_VENDOR_BROADCOM, 451 PCI_PRODUCT_BROADCOM_BCM5753F, 452 "Broadcom BCM5753F Gigabit Ethernet", 453 }, 454 { PCI_VENDOR_BROADCOM, 455 PCI_PRODUCT_BROADCOM_BCM5753M, 456 "Broadcom BCM5753M Gigabit Ethernet", 457 }, 458 { PCI_VENDOR_BROADCOM, 459 PCI_PRODUCT_BROADCOM_BCM5754, 460 "Broadcom BCM5754 Gigabit Ethernet", 461 }, 462 { PCI_VENDOR_BROADCOM, 463 PCI_PRODUCT_BROADCOM_BCM5754M, 464 "Broadcom BCM5754M Gigabit Ethernet", 465 }, 466 { PCI_VENDOR_BROADCOM, 467 PCI_PRODUCT_BROADCOM_BCM5755, 468 "Broadcom BCM5755 Gigabit Ethernet", 469 }, 470 { PCI_VENDOR_BROADCOM, 471 PCI_PRODUCT_BROADCOM_BCM5755M, 472 "Broadcom BCM5755M Gigabit Ethernet", 473 }, 474 { PCI_VENDOR_BROADCOM, 475 PCI_PRODUCT_BROADCOM_BCM5756, 476 "Broadcom BCM5756 Gigabit Ethernet", 477 }, 478 { PCI_VENDOR_BROADCOM, 479 PCI_PRODUCT_BROADCOM_BCM5761, 480 "Broadcom BCM5761 Gigabit Ethernet", 481 }, 482 { PCI_VENDOR_BROADCOM, 483 PCI_PRODUCT_BROADCOM_BCM5761E, 484 "Broadcom BCM5761E Gigabit Ethernet", 485 }, 486 { PCI_VENDOR_BROADCOM, 487 PCI_PRODUCT_BROADCOM_BCM5761S, 488 "Broadcom BCM5761S Gigabit Ethernet", 489 }, 490 { PCI_VENDOR_BROADCOM, 491 PCI_PRODUCT_BROADCOM_BCM5761SE, 492 "Broadcom BCM5761SE Gigabit Ethernet", 493 }, 494 { PCI_VENDOR_BROADCOM, 495 PCI_PRODUCT_BROADCOM_BCM5764, 496 "Broadcom BCM5764 Gigabit Ethernet", 497 }, 498 { PCI_VENDOR_BROADCOM, 499 PCI_PRODUCT_BROADCOM_BCM5780, 500 "Broadcom BCM5780 Gigabit Ethernet", 501 }, 502 { PCI_VENDOR_BROADCOM, 503 PCI_PRODUCT_BROADCOM_BCM5780S, 504 "Broadcom BCM5780S Gigabit Ethernet", 505 }, 506 { PCI_VENDOR_BROADCOM, 507 PCI_PRODUCT_BROADCOM_BCM5781, 508 "Broadcom BCM5781 Gigabit Ethernet", 509 }, 510 { PCI_VENDOR_BROADCOM, 511 PCI_PRODUCT_BROADCOM_BCM5782, 512 "Broadcom BCM5782 Gigabit Ethernet", 513 }, 514 { PCI_VENDOR_BROADCOM, 515 PCI_PRODUCT_BROADCOM_BCM5784M, 516 "BCM5784M NetLink 1000baseT Ethernet", 517 }, 518 { PCI_VENDOR_BROADCOM, 519 PCI_PRODUCT_BROADCOM_BCM5786, 520 "Broadcom BCM5786 Gigabit Ethernet", 521 }, 522 { PCI_VENDOR_BROADCOM, 523 PCI_PRODUCT_BROADCOM_BCM5787, 524 "Broadcom BCM5787 Gigabit Ethernet", 525 }, 526 { PCI_VENDOR_BROADCOM, 527 PCI_PRODUCT_BROADCOM_BCM5787M, 528 "Broadcom BCM5787M Gigabit Ethernet", 529 }, 530 { PCI_VENDOR_BROADCOM, 531 PCI_PRODUCT_BROADCOM_BCM5788, 532 "Broadcom BCM5788 Gigabit Ethernet", 533 }, 534 { PCI_VENDOR_BROADCOM, 535 PCI_PRODUCT_BROADCOM_BCM5789, 536 "Broadcom BCM5789 Gigabit Ethernet", 537 }, 538 { PCI_VENDOR_BROADCOM, 539 PCI_PRODUCT_BROADCOM_BCM5901, 540 "Broadcom BCM5901 Fast Ethernet", 541 }, 542 { PCI_VENDOR_BROADCOM, 543 PCI_PRODUCT_BROADCOM_BCM5901A2, 544 "Broadcom BCM5901A2 Fast Ethernet", 545 }, 546 { PCI_VENDOR_BROADCOM, 547 PCI_PRODUCT_BROADCOM_BCM5903M, 548 "Broadcom BCM5903M Fast Ethernet", 549 }, 550 { PCI_VENDOR_BROADCOM, 551 PCI_PRODUCT_BROADCOM_BCM5906, 552 "Broadcom BCM5906 Fast Ethernet", 553 }, 554 { PCI_VENDOR_BROADCOM, 555 PCI_PRODUCT_BROADCOM_BCM5906M, 556 "Broadcom BCM5906M Fast Ethernet", 557 }, 558 { PCI_VENDOR_BROADCOM, 559 PCI_PRODUCT_BROADCOM_BCM57760, 560 "Broadcom BCM57760 Fast Ethernet", 561 }, 562 { PCI_VENDOR_BROADCOM, 563 PCI_PRODUCT_BROADCOM_BCM57761, 564 "Broadcom BCM57761 Fast Ethernet", 565 }, 566 { PCI_VENDOR_BROADCOM, 567 PCI_PRODUCT_BROADCOM_BCM57762, 568 "Broadcom BCM57762 Gigabit Ethernet", 569 }, 570 { PCI_VENDOR_BROADCOM, 571 PCI_PRODUCT_BROADCOM_BCM57765, 572 "Broadcom BCM57765 Fast Ethernet", 573 }, 574 { PCI_VENDOR_BROADCOM, 575 PCI_PRODUCT_BROADCOM_BCM57780, 576 "Broadcom BCM57780 Fast Ethernet", 577 }, 578 { PCI_VENDOR_BROADCOM, 579 PCI_PRODUCT_BROADCOM_BCM57781, 580 "Broadcom BCM57781 Fast Ethernet", 581 }, 582 { PCI_VENDOR_BROADCOM, 583 PCI_PRODUCT_BROADCOM_BCM57785, 584 "Broadcom BCM57785 Fast Ethernet", 585 }, 586 { PCI_VENDOR_BROADCOM, 587 PCI_PRODUCT_BROADCOM_BCM57788, 588 "Broadcom BCM57788 Fast Ethernet", 589 }, 590 { PCI_VENDOR_BROADCOM, 591 PCI_PRODUCT_BROADCOM_BCM57790, 592 "Broadcom BCM57790 Fast Ethernet", 593 }, 594 { PCI_VENDOR_BROADCOM, 595 PCI_PRODUCT_BROADCOM_BCM57791, 596 "Broadcom BCM57791 Fast Ethernet", 597 }, 598 { PCI_VENDOR_BROADCOM, 599 PCI_PRODUCT_BROADCOM_BCM57795, 600 "Broadcom BCM57795 Fast Ethernet", 601 }, 602 { PCI_VENDOR_SCHNEIDERKOCH, 603 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1, 604 "SysKonnect SK-9Dx1 Gigabit Ethernet", 605 }, 606 { PCI_VENDOR_3COM, 607 PCI_PRODUCT_3COM_3C996, 608 "3Com 3c996 Gigabit Ethernet", 609 }, 610 { PCI_VENDOR_FUJITSU4, 611 PCI_PRODUCT_FUJITSU4_PW008GE4, 612 "Fujitsu PW008GE4 Gigabit Ethernet", 613 }, 614 { PCI_VENDOR_FUJITSU4, 615 PCI_PRODUCT_FUJITSU4_PW008GE5, 616 "Fujitsu PW008GE5 Gigabit Ethernet", 617 }, 618 { PCI_VENDOR_FUJITSU4, 619 PCI_PRODUCT_FUJITSU4_PP250_450_LAN, 620 "Fujitsu Primepower 250/450 Gigabit Ethernet", 621 }, 622 { 0, 623 0, 624 NULL }, 625 }; 626 627 /* 628 * XXX: how to handle variants based on 5750 and derivatives: 629 * 5750 5751, 5721, possibly 5714, 5752, and 5708?, which 630 * in general behave like a 5705, except with additional quirks. 631 * This driver's current handling of the 5721 is wrong; 632 * how we map ASIC revision to "quirks" needs more thought. 633 * (defined here until the thought is done). 634 */ 635 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_5700_FAMILY) 636 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_5714_FAMILY) 637 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_5705_PLUS) 638 #define BGE_IS_5750_OR_BEYOND(sc) ((sc)->bge_flags & BGE_5750_PLUS) 639 #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_5755_PLUS) 640 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_JUMBO_CAPABLE) 641 642 static const struct bge_revision { 643 uint32_t br_chipid; 644 const char *br_name; 645 } bge_revisions[] = { 646 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, 647 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, 648 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, 649 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, 650 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, 651 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, 652 /* This is treated like a BCM5700 Bx */ 653 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, 654 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, 655 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, 656 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, 657 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, 658 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, 659 { BGE_CHIPID_BCM5703_A0, "BCM5702/5703 A0" }, 660 { BGE_CHIPID_BCM5703_A1, "BCM5702/5703 A1" }, 661 { BGE_CHIPID_BCM5703_A2, "BCM5702/5703 A2" }, 662 { BGE_CHIPID_BCM5703_A3, "BCM5702/5703 A3" }, 663 { BGE_CHIPID_BCM5703_B0, "BCM5702/5703 B0" }, 664 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, 665 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, 666 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, 667 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, 668 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, 669 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, 670 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, 671 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, 672 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, 673 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, 674 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, 675 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, 676 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, 677 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, 678 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, 679 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, 680 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, 681 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, 682 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, 683 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, 684 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, 685 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, 686 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, 687 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, 688 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, 689 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" }, 690 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" }, 691 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" }, 692 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" }, 693 { BGE_CHIPID_BCM5755_C0, "BCM5755 C0" }, 694 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" }, 695 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" }, 696 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" }, 697 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" }, 698 /* 5754 and 5787 share the same ASIC ID */ 699 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" }, 700 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" }, 701 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" }, 702 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" }, 703 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" }, 704 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" }, 705 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" }, 706 707 { 0, NULL } 708 }; 709 710 /* 711 * Some defaults for major revisions, so that newer steppings 712 * that we don't know about have a shot at working. 713 */ 714 static const struct bge_revision bge_majorrevs[] = { 715 { BGE_ASICREV_BCM5700, "unknown BCM5700" }, 716 { BGE_ASICREV_BCM5701, "unknown BCM5701" }, 717 { BGE_ASICREV_BCM5703, "unknown BCM5703" }, 718 { BGE_ASICREV_BCM5704, "unknown BCM5704" }, 719 { BGE_ASICREV_BCM5705, "unknown BCM5705" }, 720 { BGE_ASICREV_BCM5750, "unknown BCM5750" }, 721 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, 722 { BGE_ASICREV_BCM5752, "unknown BCM5752" }, 723 { BGE_ASICREV_BCM5780, "unknown BCM5780" }, 724 { BGE_ASICREV_BCM5714, "unknown BCM5714" }, 725 { BGE_ASICREV_BCM5755, "unknown BCM5755" }, 726 { BGE_ASICREV_BCM5761, "unknown BCM5761" }, 727 { BGE_ASICREV_BCM5784, "unknown BCM5784" }, 728 { BGE_ASICREV_BCM5785, "unknown BCM5785" }, 729 /* 5754 and 5787 share the same ASIC ID */ 730 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" }, 731 { BGE_ASICREV_BCM5906, "unknown BCM5906" }, 732 { BGE_ASICREV_BCM57780, "unknown BCM57780" }, 733 { BGE_ASICREV_BCM5717, "unknown BCM5717" }, 734 { BGE_ASICREV_BCM57765, "unknown BCM57765" }, 735 { BGE_ASICREV_BCM57766, "unknown BCM57766" }, 736 737 { 0, NULL } 738 }; 739 740 static int bge_allow_asf = 1; 741 742 CFATTACH_DECL_NEW(bge, sizeof(struct bge_softc), 743 bge_probe, bge_attach, NULL, NULL); 744 745 static uint32_t 746 bge_readmem_ind(struct bge_softc *sc, int off) 747 { 748 pcireg_t val; 749 750 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off); 751 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA); 752 return val; 753 } 754 755 static void 756 bge_writemem_ind(struct bge_softc *sc, int off, int val) 757 { 758 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off); 759 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA, val); 760 } 761 762 /* 763 * PCI Express only 764 */ 765 static void 766 bge_set_max_readrq(struct bge_softc *sc) 767 { 768 pcireg_t val; 769 770 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap 771 + PCI_PCIE_DCSR); 772 if ((val & PCI_PCIE_DCSR_MAX_READ_REQ) != 773 BGE_PCIE_DEVCTL_MAX_READRQ_4096) { 774 aprint_verbose_dev(sc->bge_dev, 775 "adjust device control 0x%04x ", val); 776 val &= ~PCI_PCIE_DCSR_MAX_READ_REQ; 777 val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096; 778 pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pciecap 779 + PCI_PCIE_DCSR, val); 780 aprint_verbose("-> 0x%04x\n", val); 781 } 782 } 783 784 #ifdef notdef 785 static uint32_t 786 bge_readreg_ind(struct bge_softc *sc, int off) 787 { 788 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off); 789 return (pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA)); 790 } 791 #endif 792 793 static void 794 bge_writereg_ind(struct bge_softc *sc, int off, int val) 795 { 796 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off); 797 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA, val); 798 } 799 800 static void 801 bge_writemem_direct(struct bge_softc *sc, int off, int val) 802 { 803 CSR_WRITE_4(sc, off, val); 804 } 805 806 static void 807 bge_writembx(struct bge_softc *sc, int off, int val) 808 { 809 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 810 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; 811 812 CSR_WRITE_4(sc, off, val); 813 } 814 815 static uint8_t 816 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 817 { 818 uint32_t access, byte = 0; 819 int i; 820 821 /* Lock. */ 822 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 823 for (i = 0; i < 8000; i++) { 824 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) 825 break; 826 DELAY(20); 827 } 828 if (i == 8000) 829 return 1; 830 831 /* Enable access. */ 832 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); 833 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); 834 835 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); 836 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); 837 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 838 DELAY(10); 839 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { 840 DELAY(10); 841 break; 842 } 843 } 844 845 if (i == BGE_TIMEOUT * 10) { 846 aprint_error_dev(sc->bge_dev, "nvram read timed out\n"); 847 return 1; 848 } 849 850 /* Get result. */ 851 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); 852 853 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; 854 855 /* Disable access. */ 856 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); 857 858 /* Unlock. */ 859 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); 860 CSR_READ_4(sc, BGE_NVRAM_SWARB); 861 862 return 0; 863 } 864 865 /* 866 * Read a sequence of bytes from NVRAM. 867 */ 868 static int 869 bge_read_nvram(struct bge_softc *sc, uint8_t *dest, int off, int cnt) 870 { 871 int err = 0, i; 872 uint8_t byte = 0; 873 874 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906) 875 return 1; 876 877 for (i = 0; i < cnt; i++) { 878 err = bge_nvram_getbyte(sc, off + i, &byte); 879 if (err) 880 break; 881 *(dest + i) = byte; 882 } 883 884 return (err ? 1 : 0); 885 } 886 887 /* 888 * Read a byte of data stored in the EEPROM at address 'addr.' The 889 * BCM570x supports both the traditional bitbang interface and an 890 * auto access interface for reading the EEPROM. We use the auto 891 * access method. 892 */ 893 static uint8_t 894 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 895 { 896 int i; 897 uint32_t byte = 0; 898 899 /* 900 * Enable use of auto EEPROM access so we can avoid 901 * having to use the bitbang method. 902 */ 903 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 904 905 /* Reset the EEPROM, load the clock period. */ 906 CSR_WRITE_4(sc, BGE_EE_ADDR, 907 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 908 DELAY(20); 909 910 /* Issue the read EEPROM command. */ 911 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 912 913 /* Wait for completion */ 914 for (i = 0; i < BGE_TIMEOUT * 10; i++) { 915 DELAY(10); 916 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 917 break; 918 } 919 920 if (i == BGE_TIMEOUT * 10) { 921 aprint_error_dev(sc->bge_dev, "eeprom read timed out\n"); 922 return 1; 923 } 924 925 /* Get result. */ 926 byte = CSR_READ_4(sc, BGE_EE_DATA); 927 928 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 929 930 return 0; 931 } 932 933 /* 934 * Read a sequence of bytes from the EEPROM. 935 */ 936 static int 937 bge_read_eeprom(struct bge_softc *sc, void *destv, int off, int cnt) 938 { 939 int err = 0, i; 940 uint8_t byte = 0; 941 char *dest = destv; 942 943 for (i = 0; i < cnt; i++) { 944 err = bge_eeprom_getbyte(sc, off + i, &byte); 945 if (err) 946 break; 947 *(dest + i) = byte; 948 } 949 950 return (err ? 1 : 0); 951 } 952 953 static int 954 bge_miibus_readreg(device_t dev, int phy, int reg) 955 { 956 struct bge_softc *sc = device_private(dev); 957 uint32_t val; 958 uint32_t autopoll; 959 int i; 960 961 /* 962 * Broadcom's own driver always assumes the internal 963 * PHY is at GMII address 1. On some chips, the PHY responds 964 * to accesses at all addresses, which could cause us to 965 * bogusly attach the PHY 32 times at probe type. Always 966 * restricting the lookup to address 1 is simpler than 967 * trying to figure out which chips revisions should be 968 * special-cased. 969 */ 970 if (phy != 1) 971 return 0; 972 973 /* Reading with autopolling on may trigger PCI errors */ 974 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 975 if (autopoll & BGE_MIMODE_AUTOPOLL) { 976 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL); 977 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 978 DELAY(40); 979 } 980 981 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | 982 BGE_MIPHY(phy) | BGE_MIREG(reg)); 983 984 for (i = 0; i < BGE_TIMEOUT; i++) { 985 val = CSR_READ_4(sc, BGE_MI_COMM); 986 if (!(val & BGE_MICOMM_BUSY)) 987 break; 988 delay(10); 989 } 990 991 if (i == BGE_TIMEOUT) { 992 aprint_error_dev(sc->bge_dev, "PHY read timed out\n"); 993 val = 0; 994 goto done; 995 } 996 997 val = CSR_READ_4(sc, BGE_MI_COMM); 998 999 done: 1000 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1001 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 1002 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1003 DELAY(40); 1004 } 1005 1006 if (val & BGE_MICOMM_READFAIL) 1007 return 0; 1008 1009 return (val & 0xFFFF); 1010 } 1011 1012 static void 1013 bge_miibus_writereg(device_t dev, int phy, int reg, int val) 1014 { 1015 struct bge_softc *sc = device_private(dev); 1016 uint32_t autopoll; 1017 int i; 1018 1019 if (phy!=1) { 1020 return; 1021 } 1022 1023 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 && 1024 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL)) { 1025 return; 1026 } 1027 1028 /* Reading with autopolling on may trigger PCI errors */ 1029 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 1030 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1031 delay(40); 1032 BGE_STS_CLRBIT(sc, BGE_STS_AUTOPOLL); 1033 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1034 delay(10); /* 40 usec is supposed to be adequate */ 1035 } 1036 1037 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | 1038 BGE_MIPHY(phy) | BGE_MIREG(reg) | val); 1039 1040 for (i = 0; i < BGE_TIMEOUT; i++) { 1041 delay(10); 1042 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { 1043 delay(5); 1044 CSR_READ_4(sc, BGE_MI_COMM); 1045 break; 1046 } 1047 } 1048 1049 if (autopoll & BGE_MIMODE_AUTOPOLL) { 1050 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 1051 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 1052 delay(40); 1053 } 1054 1055 if (i == BGE_TIMEOUT) 1056 aprint_error_dev(sc->bge_dev, "PHY read timed out\n"); 1057 } 1058 1059 static void 1060 bge_miibus_statchg(struct ifnet *ifp) 1061 { 1062 struct bge_softc *sc = ifp->if_softc; 1063 struct mii_data *mii = &sc->bge_mii; 1064 1065 /* 1066 * Get flow control negotiation result. 1067 */ 1068 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1069 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) { 1070 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1071 mii->mii_media_active &= ~IFM_ETH_FMASK; 1072 } 1073 1074 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 1075 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 1076 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 1077 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 1078 else 1079 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 1080 1081 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 1082 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 1083 else 1084 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 1085 1086 /* 1087 * 802.3x flow control 1088 */ 1089 if (sc->bge_flowflags & IFM_ETH_RXPAUSE) 1090 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 1091 else 1092 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE); 1093 1094 if (sc->bge_flowflags & IFM_ETH_TXPAUSE) 1095 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 1096 else 1097 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE); 1098 } 1099 1100 /* 1101 * Update rx threshold levels to values in a particular slot 1102 * of the interrupt-mitigation table bge_rx_threshes. 1103 */ 1104 static void 1105 bge_set_thresh(struct ifnet *ifp, int lvl) 1106 { 1107 struct bge_softc *sc = ifp->if_softc; 1108 int s; 1109 1110 /* For now, just save the new Rx-intr thresholds and record 1111 * that a threshold update is pending. Updating the hardware 1112 * registers here (even at splhigh()) is observed to 1113 * occasionaly cause glitches where Rx-interrupts are not 1114 * honoured for up to 10 seconds. jonathan@NetBSD.org, 2003-04-05 1115 */ 1116 s = splnet(); 1117 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks; 1118 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds; 1119 sc->bge_pending_rxintr_change = 1; 1120 splx(s); 1121 1122 return; 1123 } 1124 1125 1126 /* 1127 * Update Rx thresholds of all bge devices 1128 */ 1129 static void 1130 bge_update_all_threshes(int lvl) 1131 { 1132 struct ifnet *ifp; 1133 const char * const namebuf = "bge"; 1134 int namelen; 1135 1136 if (lvl < 0) 1137 lvl = 0; 1138 else if (lvl >= NBGE_RX_THRESH) 1139 lvl = NBGE_RX_THRESH - 1; 1140 1141 namelen = strlen(namebuf); 1142 /* 1143 * Now search all the interfaces for this name/number 1144 */ 1145 IFNET_FOREACH(ifp) { 1146 if (strncmp(ifp->if_xname, namebuf, namelen) != 0) 1147 continue; 1148 /* We got a match: update if doing auto-threshold-tuning */ 1149 if (bge_auto_thresh) 1150 bge_set_thresh(ifp, lvl); 1151 } 1152 } 1153 1154 /* 1155 * Handle events that have triggered interrupts. 1156 */ 1157 static void 1158 bge_handle_events(struct bge_softc *sc) 1159 { 1160 1161 return; 1162 } 1163 1164 /* 1165 * Memory management for jumbo frames. 1166 */ 1167 1168 static int 1169 bge_alloc_jumbo_mem(struct bge_softc *sc) 1170 { 1171 char *ptr, *kva; 1172 bus_dma_segment_t seg; 1173 int i, rseg, state, error; 1174 struct bge_jpool_entry *entry; 1175 1176 state = error = 0; 1177 1178 /* Grab a big chunk o' storage. */ 1179 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0, 1180 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 1181 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n"); 1182 return ENOBUFS; 1183 } 1184 1185 state = 1; 1186 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, (void **)&kva, 1187 BUS_DMA_NOWAIT)) { 1188 aprint_error_dev(sc->bge_dev, 1189 "can't map DMA buffers (%d bytes)\n", (int)BGE_JMEM); 1190 error = ENOBUFS; 1191 goto out; 1192 } 1193 1194 state = 2; 1195 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0, 1196 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) { 1197 aprint_error_dev(sc->bge_dev, "can't create DMA map\n"); 1198 error = ENOBUFS; 1199 goto out; 1200 } 1201 1202 state = 3; 1203 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 1204 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) { 1205 aprint_error_dev(sc->bge_dev, "can't load DMA map\n"); 1206 error = ENOBUFS; 1207 goto out; 1208 } 1209 1210 state = 4; 1211 sc->bge_cdata.bge_jumbo_buf = (void *)kva; 1212 DPRINTFN(1,("bge_jumbo_buf = %p\n", sc->bge_cdata.bge_jumbo_buf)); 1213 1214 SLIST_INIT(&sc->bge_jfree_listhead); 1215 SLIST_INIT(&sc->bge_jinuse_listhead); 1216 1217 /* 1218 * Now divide it up into 9K pieces and save the addresses 1219 * in an array. 1220 */ 1221 ptr = sc->bge_cdata.bge_jumbo_buf; 1222 for (i = 0; i < BGE_JSLOTS; i++) { 1223 sc->bge_cdata.bge_jslots[i] = ptr; 1224 ptr += BGE_JLEN; 1225 entry = malloc(sizeof(struct bge_jpool_entry), 1226 M_DEVBUF, M_NOWAIT); 1227 if (entry == NULL) { 1228 aprint_error_dev(sc->bge_dev, 1229 "no memory for jumbo buffer queue!\n"); 1230 error = ENOBUFS; 1231 goto out; 1232 } 1233 entry->slot = i; 1234 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, 1235 entry, jpool_entries); 1236 } 1237 out: 1238 if (error != 0) { 1239 switch (state) { 1240 case 4: 1241 bus_dmamap_unload(sc->bge_dmatag, 1242 sc->bge_cdata.bge_rx_jumbo_map); 1243 case 3: 1244 bus_dmamap_destroy(sc->bge_dmatag, 1245 sc->bge_cdata.bge_rx_jumbo_map); 1246 case 2: 1247 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM); 1248 case 1: 1249 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 1250 break; 1251 default: 1252 break; 1253 } 1254 } 1255 1256 return error; 1257 } 1258 1259 /* 1260 * Allocate a jumbo buffer. 1261 */ 1262 static void * 1263 bge_jalloc(struct bge_softc *sc) 1264 { 1265 struct bge_jpool_entry *entry; 1266 1267 entry = SLIST_FIRST(&sc->bge_jfree_listhead); 1268 1269 if (entry == NULL) { 1270 aprint_error_dev(sc->bge_dev, "no free jumbo buffers\n"); 1271 return NULL; 1272 } 1273 1274 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); 1275 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); 1276 return (sc->bge_cdata.bge_jslots[entry->slot]); 1277 } 1278 1279 /* 1280 * Release a jumbo buffer. 1281 */ 1282 static void 1283 bge_jfree(struct mbuf *m, void *buf, size_t size, void *arg) 1284 { 1285 struct bge_jpool_entry *entry; 1286 struct bge_softc *sc; 1287 int i, s; 1288 1289 /* Extract the softc struct pointer. */ 1290 sc = (struct bge_softc *)arg; 1291 1292 if (sc == NULL) 1293 panic("bge_jfree: can't find softc pointer!"); 1294 1295 /* calculate the slot this buffer belongs to */ 1296 1297 i = ((char *)buf 1298 - (char *)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN; 1299 1300 if ((i < 0) || (i >= BGE_JSLOTS)) 1301 panic("bge_jfree: asked to free buffer that we don't manage!"); 1302 1303 s = splvm(); 1304 entry = SLIST_FIRST(&sc->bge_jinuse_listhead); 1305 if (entry == NULL) 1306 panic("bge_jfree: buffer not in use!"); 1307 entry->slot = i; 1308 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); 1309 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); 1310 1311 if (__predict_true(m != NULL)) 1312 pool_cache_put(mb_cache, m); 1313 splx(s); 1314 } 1315 1316 1317 /* 1318 * Initialize a standard receive ring descriptor. 1319 */ 1320 static int 1321 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m, 1322 bus_dmamap_t dmamap) 1323 { 1324 struct mbuf *m_new = NULL; 1325 struct bge_rx_bd *r; 1326 int error; 1327 1328 if (dmamap == NULL) { 1329 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1, 1330 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap); 1331 if (error != 0) 1332 return error; 1333 } 1334 1335 sc->bge_cdata.bge_rx_std_map[i] = dmamap; 1336 1337 if (m == NULL) { 1338 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1339 if (m_new == NULL) 1340 return ENOBUFS; 1341 1342 MCLGET(m_new, M_DONTWAIT); 1343 if (!(m_new->m_flags & M_EXT)) { 1344 m_freem(m_new); 1345 return ENOBUFS; 1346 } 1347 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1348 1349 } else { 1350 m_new = m; 1351 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1352 m_new->m_data = m_new->m_ext.ext_buf; 1353 } 1354 if (!(sc->bge_flags & BGE_RX_ALIGNBUG)) 1355 m_adj(m_new, ETHER_ALIGN); 1356 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new, 1357 BUS_DMA_READ|BUS_DMA_NOWAIT)) 1358 return ENOBUFS; 1359 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 1360 BUS_DMASYNC_PREREAD); 1361 1362 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 1363 r = &sc->bge_rdata->bge_rx_std_ring[i]; 1364 BGE_HOSTADDR(r->bge_addr, dmamap->dm_segs[0].ds_addr); 1365 r->bge_flags = BGE_RXBDFLAG_END; 1366 r->bge_len = m_new->m_len; 1367 r->bge_idx = i; 1368 1369 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1370 offsetof(struct bge_ring_data, bge_rx_std_ring) + 1371 i * sizeof (struct bge_rx_bd), 1372 sizeof (struct bge_rx_bd), 1373 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1374 1375 return 0; 1376 } 1377 1378 /* 1379 * Initialize a jumbo receive ring descriptor. This allocates 1380 * a jumbo buffer from the pool managed internally by the driver. 1381 */ 1382 static int 1383 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m) 1384 { 1385 struct mbuf *m_new = NULL; 1386 struct bge_rx_bd *r; 1387 void *buf = NULL; 1388 1389 if (m == NULL) { 1390 1391 /* Allocate the mbuf. */ 1392 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1393 if (m_new == NULL) 1394 return ENOBUFS; 1395 1396 /* Allocate the jumbo buffer */ 1397 buf = bge_jalloc(sc); 1398 if (buf == NULL) { 1399 m_freem(m_new); 1400 aprint_error_dev(sc->bge_dev, 1401 "jumbo allocation failed -- packet dropped!\n"); 1402 return ENOBUFS; 1403 } 1404 1405 /* Attach the buffer to the mbuf. */ 1406 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; 1407 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF, 1408 bge_jfree, sc); 1409 m_new->m_flags |= M_EXT_RW; 1410 } else { 1411 m_new = m; 1412 buf = m_new->m_data = m_new->m_ext.ext_buf; 1413 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; 1414 } 1415 if (!(sc->bge_flags & BGE_RX_ALIGNBUG)) 1416 m_adj(m_new, ETHER_ALIGN); 1417 bus_dmamap_sync(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map, 1418 mtod(m_new, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, BGE_JLEN, 1419 BUS_DMASYNC_PREREAD); 1420 /* Set up the descriptor. */ 1421 r = &sc->bge_rdata->bge_rx_jumbo_ring[i]; 1422 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 1423 BGE_HOSTADDR(r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new)); 1424 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING; 1425 r->bge_len = m_new->m_len; 1426 r->bge_idx = i; 1427 1428 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 1429 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) + 1430 i * sizeof (struct bge_rx_bd), 1431 sizeof (struct bge_rx_bd), 1432 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1433 1434 return 0; 1435 } 1436 1437 /* 1438 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 1439 * that's 1MB or memory, which is a lot. For now, we fill only the first 1440 * 256 ring entries and hope that our CPU is fast enough to keep up with 1441 * the NIC. 1442 */ 1443 static int 1444 bge_init_rx_ring_std(struct bge_softc *sc) 1445 { 1446 int i; 1447 1448 if (sc->bge_flags & BGE_RXRING_VALID) 1449 return 0; 1450 1451 for (i = 0; i < BGE_SSLOTS; i++) { 1452 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 1453 return ENOBUFS; 1454 } 1455 1456 sc->bge_std = i - 1; 1457 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 1458 1459 sc->bge_flags |= BGE_RXRING_VALID; 1460 1461 return 0; 1462 } 1463 1464 static void 1465 bge_free_rx_ring_std(struct bge_softc *sc) 1466 { 1467 int i; 1468 1469 if (!(sc->bge_flags & BGE_RXRING_VALID)) 1470 return; 1471 1472 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1473 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 1474 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 1475 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 1476 bus_dmamap_destroy(sc->bge_dmatag, 1477 sc->bge_cdata.bge_rx_std_map[i]); 1478 } 1479 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0, 1480 sizeof(struct bge_rx_bd)); 1481 } 1482 1483 sc->bge_flags &= ~BGE_RXRING_VALID; 1484 } 1485 1486 static int 1487 bge_init_rx_ring_jumbo(struct bge_softc *sc) 1488 { 1489 int i; 1490 volatile struct bge_rcb *rcb; 1491 1492 if (sc->bge_flags & BGE_JUMBO_RXRING_VALID) 1493 return 0; 1494 1495 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1496 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 1497 return ENOBUFS; 1498 }; 1499 1500 sc->bge_jumbo = i - 1; 1501 sc->bge_flags |= BGE_JUMBO_RXRING_VALID; 1502 1503 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 1504 rcb->bge_maxlen_flags = 0; 1505 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1506 1507 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 1508 1509 return 0; 1510 } 1511 1512 static void 1513 bge_free_rx_ring_jumbo(struct bge_softc *sc) 1514 { 1515 int i; 1516 1517 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID)) 1518 return; 1519 1520 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1521 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 1522 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 1523 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 1524 } 1525 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0, 1526 sizeof(struct bge_rx_bd)); 1527 } 1528 1529 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID; 1530 } 1531 1532 static void 1533 bge_free_tx_ring(struct bge_softc *sc) 1534 { 1535 int i, freed; 1536 struct txdmamap_pool_entry *dma; 1537 1538 if (!(sc->bge_flags & BGE_TXRING_VALID)) 1539 return; 1540 1541 freed = 0; 1542 1543 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1544 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 1545 freed++; 1546 m_freem(sc->bge_cdata.bge_tx_chain[i]); 1547 sc->bge_cdata.bge_tx_chain[i] = NULL; 1548 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i], 1549 link); 1550 sc->txdma[i] = 0; 1551 } 1552 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0, 1553 sizeof(struct bge_tx_bd)); 1554 } 1555 1556 while ((dma = SLIST_FIRST(&sc->txdma_list))) { 1557 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 1558 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap); 1559 free(dma, M_DEVBUF); 1560 } 1561 1562 sc->bge_flags &= ~BGE_TXRING_VALID; 1563 } 1564 1565 static int 1566 bge_init_tx_ring(struct bge_softc *sc) 1567 { 1568 int i; 1569 bus_dmamap_t dmamap; 1570 struct txdmamap_pool_entry *dma; 1571 1572 if (sc->bge_flags & BGE_TXRING_VALID) 1573 return 0; 1574 1575 sc->bge_txcnt = 0; 1576 sc->bge_tx_saved_considx = 0; 1577 1578 /* Initialize transmit producer index for host-memory send ring. */ 1579 sc->bge_tx_prodidx = 0; 1580 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1581 /* 5700 b2 errata */ 1582 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 1583 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1584 1585 /* NIC-memory send ring not used; initialize to zero. */ 1586 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1587 /* 5700 b2 errata */ 1588 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 1589 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1590 1591 SLIST_INIT(&sc->txdma_list); 1592 for (i = 0; i < BGE_RSLOTS; i++) { 1593 if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX, 1594 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT, 1595 &dmamap)) 1596 return ENOBUFS; 1597 if (dmamap == NULL) 1598 panic("dmamap NULL in bge_init_tx_ring"); 1599 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT); 1600 if (dma == NULL) { 1601 aprint_error_dev(sc->bge_dev, 1602 "can't alloc txdmamap_pool_entry\n"); 1603 bus_dmamap_destroy(sc->bge_dmatag, dmamap); 1604 return ENOMEM; 1605 } 1606 dma->dmamap = dmamap; 1607 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 1608 } 1609 1610 sc->bge_flags |= BGE_TXRING_VALID; 1611 1612 return 0; 1613 } 1614 1615 static void 1616 bge_setmulti(struct bge_softc *sc) 1617 { 1618 struct ethercom *ac = &sc->ethercom; 1619 struct ifnet *ifp = &ac->ec_if; 1620 struct ether_multi *enm; 1621 struct ether_multistep step; 1622 uint32_t hashes[4] = { 0, 0, 0, 0 }; 1623 uint32_t h; 1624 int i; 1625 1626 if (ifp->if_flags & IFF_PROMISC) 1627 goto allmulti; 1628 1629 /* Now program new ones. */ 1630 ETHER_FIRST_MULTI(step, ac, enm); 1631 while (enm != NULL) { 1632 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1633 /* 1634 * We must listen to a range of multicast addresses. 1635 * For now, just accept all multicasts, rather than 1636 * trying to set only those filter bits needed to match 1637 * the range. (At this time, the only use of address 1638 * ranges is for IP multicast routing, for which the 1639 * range is big enough to require all bits set.) 1640 */ 1641 goto allmulti; 1642 } 1643 1644 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1645 1646 /* Just want the 7 least-significant bits. */ 1647 h &= 0x7f; 1648 1649 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1650 ETHER_NEXT_MULTI(step, enm); 1651 } 1652 1653 ifp->if_flags &= ~IFF_ALLMULTI; 1654 goto setit; 1655 1656 allmulti: 1657 ifp->if_flags |= IFF_ALLMULTI; 1658 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff; 1659 1660 setit: 1661 for (i = 0; i < 4; i++) 1662 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1663 } 1664 1665 static void 1666 bge_sig_pre_reset(struct bge_softc *sc, int type) 1667 { 1668 /* 1669 * Some chips don't like this so only do this if ASF is enabled 1670 */ 1671 if (sc->bge_asf_mode) 1672 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 1673 1674 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1675 switch (type) { 1676 case BGE_RESET_START: 1677 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 1678 break; 1679 case BGE_RESET_STOP: 1680 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 1681 break; 1682 } 1683 } 1684 } 1685 1686 static void 1687 bge_sig_post_reset(struct bge_softc *sc, int type) 1688 { 1689 1690 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1691 switch (type) { 1692 case BGE_RESET_START: 1693 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001); 1694 /* START DONE */ 1695 break; 1696 case BGE_RESET_STOP: 1697 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002); 1698 break; 1699 } 1700 } 1701 } 1702 1703 static void 1704 bge_sig_legacy(struct bge_softc *sc, int type) 1705 { 1706 1707 if (sc->bge_asf_mode) { 1708 switch (type) { 1709 case BGE_RESET_START: 1710 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 1711 break; 1712 case BGE_RESET_STOP: 1713 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 1714 break; 1715 } 1716 } 1717 } 1718 1719 static void 1720 bge_stop_fw(struct bge_softc *sc) 1721 { 1722 int i; 1723 1724 if (sc->bge_asf_mode) { 1725 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE); 1726 CSR_WRITE_4(sc, BGE_CPU_EVENT, 1727 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14)); 1728 1729 for (i = 0; i < 100; i++) { 1730 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14))) 1731 break; 1732 DELAY(10); 1733 } 1734 } 1735 } 1736 1737 static int 1738 bge_poll_fw(struct bge_softc *sc) 1739 { 1740 uint32_t val; 1741 int i; 1742 1743 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 1744 for (i = 0; i < BGE_TIMEOUT; i++) { 1745 val = CSR_READ_4(sc, BGE_VCPU_STATUS); 1746 if (val & BGE_VCPU_STATUS_INIT_DONE) 1747 break; 1748 DELAY(100); 1749 } 1750 if (i >= BGE_TIMEOUT) { 1751 aprint_error_dev(sc->bge_dev, "reset timed out\n"); 1752 return -1; 1753 } 1754 } else if ((sc->bge_flags & BGE_NO_EEPROM) == 0) { 1755 /* 1756 * Poll the value location we just wrote until 1757 * we see the 1's complement of the magic number. 1758 * This indicates that the firmware initialization 1759 * is complete. 1760 * XXX 1000ms for Flash and 10000ms for SEEPROM. 1761 */ 1762 for (i = 0; i < BGE_TIMEOUT; i++) { 1763 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 1764 if (val == ~BGE_MAGIC_NUMBER) 1765 break; 1766 DELAY(10); 1767 } 1768 1769 if (i >= BGE_TIMEOUT) { 1770 aprint_error_dev(sc->bge_dev, 1771 "firmware handshake timed out, val = %x\n", val); 1772 return -1; 1773 } 1774 } 1775 1776 return 0; 1777 } 1778 1779 /* 1780 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1781 * self-test results. 1782 */ 1783 static int 1784 bge_chipinit(struct bge_softc *sc) 1785 { 1786 int i; 1787 uint32_t dma_rw_ctl; 1788 1789 /* Set endianness before we access any non-PCI registers. */ 1790 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 1791 BGE_INIT); 1792 1793 /* Set power state to D0. */ 1794 bge_setpowerstate(sc, 0); 1795 1796 /* Clear the MAC control register */ 1797 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1798 1799 /* 1800 * Clear the MAC statistics block in the NIC's 1801 * internal memory. 1802 */ 1803 for (i = BGE_STATS_BLOCK; 1804 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 1805 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0); 1806 1807 for (i = BGE_STATUS_BLOCK; 1808 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 1809 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0); 1810 1811 /* Set up the PCI DMA control register. */ 1812 dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD; 1813 if (sc->bge_flags & BGE_PCIE) { 1814 /* Read watermark not used, 128 bytes for write. */ 1815 DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n", 1816 device_xname(sc->bge_dev))); 1817 dma_rw_ctl |= (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1818 } else if (sc->bge_flags & BGE_PCIX) { 1819 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", 1820 device_xname(sc->bge_dev))); 1821 /* PCI-X bus */ 1822 if (BGE_IS_5714_FAMILY(sc)) { 1823 /* 256 bytes for read and write. */ 1824 dma_rw_ctl |= (0x02 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1825 (0x02 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1826 1827 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780) 1828 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 1829 else 1830 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL; 1831 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 1832 /* 1536 bytes for read, 384 bytes for write. */ 1833 dma_rw_ctl |= 1834 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1835 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1836 } else { 1837 /* 384 bytes for read and write. */ 1838 dma_rw_ctl |= (0x03 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1839 (0x03 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1840 (0x0F); 1841 } 1842 1843 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 1844 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 1845 uint32_t tmp; 1846 1847 /* Set ONEDMA_ATONCE for hardware workaround. */ 1848 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; 1849 if (tmp == 6 || tmp == 7) 1850 dma_rw_ctl |= 1851 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 1852 1853 /* Set PCI-X DMA write workaround. */ 1854 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE; 1855 } 1856 } else { 1857 /* Conventional PCI bus: 256 bytes for read and write. */ 1858 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", 1859 device_xname(sc->bge_dev))); 1860 dma_rw_ctl |= (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1861 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1862 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5705 && 1863 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5750) 1864 dma_rw_ctl |= 0x0F; 1865 } 1866 1867 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 1868 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) 1869 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM | 1870 BGE_PCIDMARWCTL_ASRT_ALL_BE; 1871 1872 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 1873 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 1874 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 1875 1876 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, 1877 dma_rw_ctl); 1878 1879 /* 1880 * Set up general mode register. 1881 */ 1882 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS | 1883 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS | 1884 BGE_MODECTL_TX_NO_PHDR_CSUM); 1885 1886 /* 1887 * BCM5701 B5 have a bug causing data corruption when using 1888 * 64-bit DMA reads, which can be terminated early and then 1889 * completed later as 32-bit accesses, in combination with 1890 * certain bridges. 1891 */ 1892 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 && 1893 sc->bge_chipid == BGE_CHIPID_BCM5701_B5) 1894 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32); 1895 1896 /* 1897 * Tell the firmware the driver is running 1898 */ 1899 if (sc->bge_asf_mode & ASF_STACKUP) 1900 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 1901 1902 /* 1903 * Disable memory write invalidate. Apparently it is not supported 1904 * properly by these devices. 1905 */ 1906 PCI_CLRBIT(sc->sc_pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, 1907 PCI_COMMAND_INVALIDATE_ENABLE); 1908 1909 #ifdef __brokenalpha__ 1910 /* 1911 * Must insure that we do not cross an 8K (bytes) boundary 1912 * for DMA reads. Our highest limit is 1K bytes. This is a 1913 * restriction on some ALPHA platforms with early revision 1914 * 21174 PCI chipsets, such as the AlphaPC 164lx 1915 */ 1916 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4); 1917 #endif 1918 1919 /* Set the timer prescaler (always 66MHz) */ 1920 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1921 1922 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 1923 DELAY(40); /* XXX */ 1924 1925 /* Put PHY into ready state */ 1926 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); 1927 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */ 1928 DELAY(40); 1929 } 1930 1931 return 0; 1932 } 1933 1934 static int 1935 bge_blockinit(struct bge_softc *sc) 1936 { 1937 volatile struct bge_rcb *rcb; 1938 bus_size_t rcb_addr; 1939 int i; 1940 struct ifnet *ifp = &sc->ethercom.ec_if; 1941 bge_hostaddr taddr; 1942 uint32_t val; 1943 1944 /* 1945 * Initialize the memory window pointer register so that 1946 * we can access the first 32K of internal NIC RAM. This will 1947 * allow us to set up the TX send ring RCBs and the RX return 1948 * ring RCBs, plus other things which live in NIC memory. 1949 */ 1950 1951 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0); 1952 1953 /* Step 33: Configure mbuf memory pool */ 1954 if (BGE_IS_5700_FAMILY(sc)) { 1955 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1956 BGE_BUFFPOOL_1); 1957 1958 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 1959 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1960 else 1961 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1962 1963 /* Configure DMA resource pool */ 1964 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1965 BGE_DMA_DESCRIPTORS); 1966 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1967 } 1968 1969 /* Step 35: Configure mbuf pool watermarks */ 1970 #ifdef ORIG_WPAUL_VALUES 1971 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24); 1972 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24); 1973 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48); 1974 #else 1975 1976 /* new broadcom docs strongly recommend these: */ 1977 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 1978 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 || 1979 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) { 1980 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1981 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a); 1982 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0); 1983 } else if (BGE_IS_5705_PLUS(sc)) { 1984 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1985 1986 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 1987 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); 1988 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); 1989 } else { 1990 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1991 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1992 } 1993 } else if (!BGE_IS_5705_PLUS(sc)) { 1994 if (ifp->if_mtu > ETHER_MAX_LEN) { 1995 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1996 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1997 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1998 } else { 1999 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 304); 2000 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 152); 2001 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 380); 2002 } 2003 } else { 2004 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 2005 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 2006 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 2007 } 2008 #endif 2009 2010 /* Step 36: Configure DMA resource watermarks */ 2011 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 2012 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 2013 2014 /* Step 38: Enable buffer manager */ 2015 CSR_WRITE_4(sc, BGE_BMAN_MODE, 2016 BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN); 2017 2018 /* Step 39: Poll for buffer manager start indication */ 2019 for (i = 0; i < BGE_TIMEOUT * 2; i++) { 2020 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 2021 break; 2022 DELAY(10); 2023 } 2024 2025 if (i == BGE_TIMEOUT * 2) { 2026 aprint_error_dev(sc->bge_dev, 2027 "buffer manager failed to start\n"); 2028 return ENXIO; 2029 } 2030 2031 /* Step 40: Enable flow-through queues */ 2032 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 2033 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 2034 2035 /* Wait until queue initialization is complete */ 2036 for (i = 0; i < BGE_TIMEOUT * 2; i++) { 2037 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 2038 break; 2039 DELAY(10); 2040 } 2041 2042 if (i == BGE_TIMEOUT * 2) { 2043 aprint_error_dev(sc->bge_dev, 2044 "flow-through queue init failed\n"); 2045 return ENXIO; 2046 } 2047 2048 /* Step 41: Initialize the standard RX ring control block */ 2049 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb; 2050 BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring)); 2051 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2052 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 || 2053 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) 2054 rcb->bge_maxlen_flags = 2055 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2); 2056 else if (BGE_IS_5705_PLUS(sc)) 2057 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 2058 else 2059 rcb->bge_maxlen_flags = 2060 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 2061 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 2062 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 2063 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 2064 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 2065 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 2066 2067 /* 2068 * Step 42: Initialize the jumbo RX ring control block 2069 * We set the 'ring disabled' bit in the flags 2070 * field until we're actually ready to start 2071 * using this ring (i.e. once we set the MTU 2072 * high enough to require it). 2073 */ 2074 if (BGE_IS_JUMBO_CAPABLE(sc)) { 2075 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb; 2076 BGE_HOSTADDR(rcb->bge_hostaddr, 2077 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring)); 2078 rcb->bge_maxlen_flags = 2079 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 2080 BGE_RCB_FLAG_RING_DISABLED); 2081 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 2082 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 2083 rcb->bge_hostaddr.bge_addr_hi); 2084 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 2085 rcb->bge_hostaddr.bge_addr_lo); 2086 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 2087 rcb->bge_maxlen_flags); 2088 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 2089 2090 /* Set up dummy disabled mini ring RCB */ 2091 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb; 2092 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 2093 BGE_RCB_FLAG_RING_DISABLED); 2094 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 2095 rcb->bge_maxlen_flags); 2096 2097 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 2098 offsetof(struct bge_ring_data, bge_info), 2099 sizeof (struct bge_gib), 2100 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2101 } 2102 2103 /* 2104 * Set the BD ring replenish thresholds. The recommended 2105 * values are 1/8th the number of descriptors allocated to 2106 * each ring. 2107 */ 2108 i = BGE_STD_RX_RING_CNT / 8; 2109 2110 /* 2111 * Use a value of 8 for the following chips to workaround HW errata. 2112 * Some of these chips have been added based on empirical 2113 * evidence (they don't work unless this is done). 2114 */ 2115 if (BGE_IS_5705_PLUS(sc)) 2116 i = 8; 2117 2118 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, i); 2119 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT / 8); 2120 2121 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2122 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 || 2123 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) { 2124 CSR_WRITE_4(sc, BGE_STD_REPL_LWM, 4); 2125 CSR_WRITE_4(sc, BGE_JUMBO_REPL_LWM, 4); 2126 } 2127 2128 /* 2129 * Disable all unused send rings by setting the 'ring disabled' 2130 * bit in the flags field of all the TX send ring control blocks. 2131 * These are located in NIC memory. 2132 */ 2133 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 2134 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 2135 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2136 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 2137 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 2138 rcb_addr += sizeof(struct bge_rcb); 2139 } 2140 2141 /* Configure TX RCB 0 (we use only the first ring) */ 2142 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 2143 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring)); 2144 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 2145 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 2146 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 2147 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 2148 if (BGE_IS_5700_FAMILY(sc)) 2149 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2150 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 2151 2152 /* Disable all unused RX return rings */ 2153 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 2154 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 2155 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0); 2156 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0); 2157 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2158 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 2159 BGE_RCB_FLAG_RING_DISABLED)); 2160 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0); 2161 bge_writembx(sc, BGE_MBX_RX_CONS0_LO + 2162 (i * (sizeof(uint64_t))), 0); 2163 rcb_addr += sizeof(struct bge_rcb); 2164 } 2165 2166 /* Initialize RX ring indexes */ 2167 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); 2168 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 2169 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 2170 2171 /* 2172 * Set up RX return ring 0 2173 * Note that the NIC address for RX return rings is 0x00000000. 2174 * The return rings live entirely within the host, so the 2175 * nicaddr field in the RCB isn't used. 2176 */ 2177 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 2178 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring)); 2179 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 2180 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 2181 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000); 2182 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags, 2183 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 2184 2185 /* Set random backoff seed for TX */ 2186 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 2187 CLLADDR(ifp->if_sadl)[0] + CLLADDR(ifp->if_sadl)[1] + 2188 CLLADDR(ifp->if_sadl)[2] + CLLADDR(ifp->if_sadl)[3] + 2189 CLLADDR(ifp->if_sadl)[4] + CLLADDR(ifp->if_sadl)[5] + 2190 BGE_TX_BACKOFF_SEED_MASK); 2191 2192 /* Set inter-packet gap */ 2193 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 2194 2195 /* 2196 * Specify which ring to use for packets that don't match 2197 * any RX rules. 2198 */ 2199 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 2200 2201 /* 2202 * Configure number of RX lists. One interrupt distribution 2203 * list, sixteen active lists, one bad frames class. 2204 */ 2205 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 2206 2207 /* Inialize RX list placement stats mask. */ 2208 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 2209 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 2210 2211 /* Disable host coalescing until we get it set up */ 2212 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 2213 2214 /* Poll to make sure it's shut down. */ 2215 for (i = 0; i < BGE_TIMEOUT * 2; i++) { 2216 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 2217 break; 2218 DELAY(10); 2219 } 2220 2221 if (i == BGE_TIMEOUT * 2) { 2222 aprint_error_dev(sc->bge_dev, 2223 "host coalescing engine failed to idle\n"); 2224 return ENXIO; 2225 } 2226 2227 /* Set up host coalescing defaults */ 2228 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 2229 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 2230 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 2231 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 2232 if (BGE_IS_5700_FAMILY(sc)) { 2233 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 2234 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 2235 } 2236 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 2237 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 2238 2239 /* Set up address of statistics block */ 2240 if (BGE_IS_5700_FAMILY(sc)) { 2241 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats)); 2242 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 2243 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 2244 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi); 2245 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo); 2246 } 2247 2248 /* Set up address of status block */ 2249 BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block)); 2250 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 2251 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi); 2252 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo); 2253 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0; 2254 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0; 2255 2256 /* Turn on host coalescing state machine */ 2257 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 2258 2259 /* Turn on RX BD completion state machine and enable attentions */ 2260 CSR_WRITE_4(sc, BGE_RBDC_MODE, 2261 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN); 2262 2263 /* Turn on RX list placement state machine */ 2264 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 2265 2266 /* Turn on RX list selector state machine. */ 2267 if (BGE_IS_5700_FAMILY(sc)) 2268 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 2269 2270 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | 2271 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | 2272 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | 2273 BGE_MACMODE_FRMHDR_DMA_ENB; 2274 2275 if (sc->bge_flags & BGE_PHY_FIBER_TBI) 2276 val |= BGE_PORTMODE_TBI; 2277 else if (sc->bge_flags & BGE_PHY_FIBER_MII) 2278 val |= BGE_PORTMODE_GMII; 2279 else 2280 val |= BGE_PORTMODE_MII; 2281 2282 /* Turn on DMA, clear stats */ 2283 CSR_WRITE_4(sc, BGE_MAC_MODE, val); 2284 2285 /* Set misc. local control, enable interrupts on attentions */ 2286 sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM; 2287 2288 #ifdef notdef 2289 /* Assert GPIO pins for PHY reset */ 2290 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 2291 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 2292 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 2293 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 2294 #endif 2295 2296 #if defined(not_quite_yet) 2297 /* Linux driver enables enable gpio pin #1 on 5700s */ 2298 if (sc->bge_chipid == BGE_CHIPID_BCM5700) { 2299 sc->bge_local_ctrl_reg |= 2300 (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1); 2301 } 2302 #endif 2303 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 2304 2305 /* Turn on DMA completion state machine */ 2306 if (BGE_IS_5700_FAMILY(sc)) 2307 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 2308 2309 /* Turn on write DMA state machine */ 2310 { 2311 uint32_t bge_wdma_mode = 2312 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS; 2313 2314 /* Enable host coalescing bug fix; see Linux tg3.c */ 2315 if (BGE_IS_5755_PLUS(sc)) 2316 bge_wdma_mode |= BGE_WDMAMODE_STATUS_TAG_FIX; 2317 2318 CSR_WRITE_4(sc, BGE_WDMA_MODE, bge_wdma_mode); 2319 } 2320 2321 /* Turn on read DMA state machine */ 2322 { 2323 uint32_t dma_read_modebits; 2324 2325 dma_read_modebits = 2326 BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 2327 2328 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2329 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 2330 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) 2331 dma_read_modebits |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | 2332 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | 2333 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; 2334 2335 if (sc->bge_flags & BGE_PCIE) 2336 dma_read_modebits |= BGE_RDMA_MODE_FIFO_LONG_BURST; 2337 if (sc->bge_flags & BGE_TSO) 2338 dma_read_modebits |= BGE_RDMAMODE_TSO4_ENABLE; 2339 CSR_WRITE_4(sc, BGE_RDMA_MODE, dma_read_modebits); 2340 delay(40); 2341 } 2342 2343 /* Turn on RX data completion state machine */ 2344 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 2345 2346 /* Turn on RX BD initiator state machine */ 2347 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 2348 2349 /* Turn on RX data and RX BD initiator state machine */ 2350 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 2351 2352 /* Turn on Mbuf cluster free state machine */ 2353 if (BGE_IS_5700_FAMILY(sc)) 2354 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 2355 2356 /* Turn on send BD completion state machine */ 2357 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 2358 2359 /* Turn on send data completion state machine */ 2360 val = BGE_SDCMODE_ENABLE; 2361 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761) 2362 val |= BGE_SDCMODE_CDELAY; 2363 CSR_WRITE_4(sc, BGE_SDC_MODE, val); 2364 2365 /* Turn on send data initiator state machine */ 2366 if (sc->bge_flags & BGE_TSO) { 2367 /* XXX: magic value from Linux driver */ 2368 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08); 2369 } else 2370 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 2371 2372 /* Turn on send BD initiator state machine */ 2373 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 2374 2375 /* Turn on send BD selector state machine */ 2376 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 2377 2378 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 2379 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 2380 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER); 2381 2382 /* ack/clear link change events */ 2383 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 2384 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 2385 BGE_MACSTAT_LINK_CHANGED); 2386 CSR_WRITE_4(sc, BGE_MI_STS, 0); 2387 2388 /* Enable PHY auto polling (for MII/GMII only) */ 2389 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 2390 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 2391 } else { 2392 BGE_STS_SETBIT(sc, BGE_STS_AUTOPOLL); 2393 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL | (10 << 16)); 2394 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) 2395 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 2396 BGE_EVTENB_MI_INTERRUPT); 2397 } 2398 2399 /* 2400 * Clear any pending link state attention. 2401 * Otherwise some link state change events may be lost until attention 2402 * is cleared by bge_intr() -> bge_link_upd() sequence. 2403 * It's not necessary on newer BCM chips - perhaps enabling link 2404 * state change attentions implies clearing pending attention. 2405 */ 2406 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 2407 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 2408 BGE_MACSTAT_LINK_CHANGED); 2409 2410 /* Enable link state change attentions. */ 2411 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 2412 2413 return 0; 2414 } 2415 2416 static const struct bge_revision * 2417 bge_lookup_rev(uint32_t chipid) 2418 { 2419 const struct bge_revision *br; 2420 2421 for (br = bge_revisions; br->br_name != NULL; br++) { 2422 if (br->br_chipid == chipid) 2423 return br; 2424 } 2425 2426 for (br = bge_majorrevs; br->br_name != NULL; br++) { 2427 if (br->br_chipid == BGE_ASICREV(chipid)) 2428 return br; 2429 } 2430 2431 return NULL; 2432 } 2433 2434 static const struct bge_product * 2435 bge_lookup(const struct pci_attach_args *pa) 2436 { 2437 const struct bge_product *bp; 2438 2439 for (bp = bge_products; bp->bp_name != NULL; bp++) { 2440 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor && 2441 PCI_PRODUCT(pa->pa_id) == bp->bp_product) 2442 return bp; 2443 } 2444 2445 return NULL; 2446 } 2447 2448 static int 2449 bge_setpowerstate(struct bge_softc *sc, int powerlevel) 2450 { 2451 #ifdef NOTYET 2452 uint32_t pm_ctl = 0; 2453 2454 /* XXX FIXME: make sure indirect accesses enabled? */ 2455 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4); 2456 pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS; 2457 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4); 2458 2459 /* clear the PME_assert bit and power state bits, enable PME */ 2460 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2); 2461 pm_ctl &= ~PCIM_PSTAT_DMASK; 2462 pm_ctl |= (1 << 8); 2463 2464 if (powerlevel == 0) { 2465 pm_ctl |= PCIM_PSTAT_D0; 2466 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2467 pm_ctl, 2); 2468 DELAY(10000); 2469 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg); 2470 DELAY(10000); 2471 2472 #ifdef NOTYET 2473 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */ 2474 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02); 2475 #endif 2476 DELAY(40); DELAY(40); DELAY(40); 2477 DELAY(10000); /* above not quite adequate on 5700 */ 2478 return 0; 2479 } 2480 2481 2482 /* 2483 * Entering ACPI power states D1-D3 is achieved by wiggling 2484 * GMII gpio pins. Example code assumes all hardware vendors 2485 * followed Broadcom's sample pcb layout. Until we verify that 2486 * for all supported OEM cards, states D1-D3 are unsupported. 2487 */ 2488 aprint_error_dev(sc->bge_dev, 2489 "power state %d unimplemented; check GPIO pins\n", 2490 powerlevel); 2491 #endif 2492 return EOPNOTSUPP; 2493 } 2494 2495 2496 /* 2497 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 2498 * against our list and return its name if we find a match. Note 2499 * that since the Broadcom controller contains VPD support, we 2500 * can get the device name string from the controller itself instead 2501 * of the compiled-in string. This is a little slow, but it guarantees 2502 * we'll always announce the right product name. 2503 */ 2504 static int 2505 bge_probe(device_t parent, cfdata_t match, void *aux) 2506 { 2507 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 2508 2509 if (bge_lookup(pa) != NULL) 2510 return 1; 2511 2512 return 0; 2513 } 2514 2515 static void 2516 bge_attach(device_t parent, device_t self, void *aux) 2517 { 2518 struct bge_softc *sc = device_private(self); 2519 struct pci_attach_args *pa = aux; 2520 prop_dictionary_t dict; 2521 const struct bge_product *bp; 2522 const struct bge_revision *br; 2523 pci_chipset_tag_t pc; 2524 pci_intr_handle_t ih; 2525 const char *intrstr = NULL; 2526 bus_dma_segment_t seg; 2527 int rseg; 2528 uint32_t hwcfg = 0; 2529 uint32_t command; 2530 struct ifnet *ifp; 2531 uint32_t misccfg; 2532 void * kva; 2533 u_char eaddr[ETHER_ADDR_LEN]; 2534 pcireg_t memtype, subid; 2535 bus_addr_t memaddr; 2536 bus_size_t memsize; 2537 uint32_t pm_ctl; 2538 bool no_seeprom; 2539 2540 bp = bge_lookup(pa); 2541 KASSERT(bp != NULL); 2542 2543 sc->sc_pc = pa->pa_pc; 2544 sc->sc_pcitag = pa->pa_tag; 2545 sc->bge_dev = self; 2546 2547 pc = sc->sc_pc; 2548 subid = pci_conf_read(pc, sc->sc_pcitag, PCI_SUBSYS_ID_REG); 2549 2550 aprint_naive(": Ethernet controller\n"); 2551 aprint_normal(": %s\n", bp->bp_name); 2552 2553 /* 2554 * Map control/status registers. 2555 */ 2556 DPRINTFN(5, ("Map control/status regs\n")); 2557 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 2558 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; 2559 pci_conf_write(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, command); 2560 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG); 2561 2562 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 2563 aprint_error_dev(sc->bge_dev, 2564 "failed to enable memory mapping!\n"); 2565 return; 2566 } 2567 2568 DPRINTFN(5, ("pci_mem_find\n")); 2569 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_pcitag, BGE_PCI_BAR0); 2570 switch (memtype) { 2571 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 2572 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 2573 if (pci_mapreg_map(pa, BGE_PCI_BAR0, 2574 memtype, 0, &sc->bge_btag, &sc->bge_bhandle, 2575 &memaddr, &memsize) == 0) 2576 break; 2577 default: 2578 aprint_error_dev(sc->bge_dev, "can't find mem space\n"); 2579 return; 2580 } 2581 2582 DPRINTFN(5, ("pci_intr_map\n")); 2583 if (pci_intr_map(pa, &ih)) { 2584 aprint_error_dev(sc->bge_dev, "couldn't map interrupt\n"); 2585 return; 2586 } 2587 2588 DPRINTFN(5, ("pci_intr_string\n")); 2589 intrstr = pci_intr_string(pc, ih); 2590 2591 DPRINTFN(5, ("pci_intr_establish\n")); 2592 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc); 2593 2594 if (sc->bge_intrhand == NULL) { 2595 aprint_error_dev(sc->bge_dev, 2596 "couldn't establish interrupt%s%s\n", 2597 intrstr ? " at " : "", intrstr ? intrstr : ""); 2598 return; 2599 } 2600 aprint_normal_dev(sc->bge_dev, "interrupting at %s\n", intrstr); 2601 2602 /* 2603 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?) 2604 * can clobber the chip's PCI config-space power control registers, 2605 * leaving the card in D3 powersave state. 2606 * We do not have memory-mapped registers in this state, 2607 * so force device into D0 state before starting initialization. 2608 */ 2609 pm_ctl = pci_conf_read(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD); 2610 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3); 2611 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */ 2612 pci_conf_write(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD, pm_ctl); 2613 DELAY(1000); /* 27 usec is allegedly sufficent */ 2614 2615 /* 2616 * Save ASIC rev. 2617 */ 2618 sc->bge_chipid = 2619 pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) 2620 >> BGE_PCIMISCCTL_ASICREV_SHIFT; 2621 2622 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) { 2623 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5717 || 2624 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5718 || 2625 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5724) 2626 sc->bge_chipid = pci_conf_read(pc, pa->pa_tag, 2627 BGE_PCI_GEN2_PRODID_ASICREV); 2628 else if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57761 || 2629 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57762 || 2630 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57765 || 2631 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57781 || 2632 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57785 || 2633 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57791 || 2634 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57795) 2635 sc->bge_chipid = pci_conf_read(pc, pa->pa_tag, 2636 BGE_PCI_GEN15_PRODID_ASICREV); 2637 else 2638 sc->bge_chipid = pci_conf_read(pc, pa->pa_tag, 2639 BGE_PCI_PRODID_ASICREV); 2640 } 2641 2642 if ((pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PCIEXPRESS, 2643 &sc->bge_pciecap, NULL) != 0) 2644 || (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785)) { 2645 /* PCIe */ 2646 sc->bge_flags |= BGE_PCIE; 2647 bge_set_max_readrq(sc); 2648 } else if ((pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE) & 2649 BGE_PCISTATE_PCI_BUSMODE) == 0) { 2650 /* PCI-X */ 2651 sc->bge_flags |= BGE_PCIX; 2652 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIX, 2653 &sc->bge_pcixcap, NULL) == 0) 2654 aprint_error_dev(sc->bge_dev, 2655 "unable to find PCIX capability\n"); 2656 } 2657 2658 /* chipid */ 2659 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 2660 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 || 2661 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || 2662 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 2663 sc->bge_flags |= BGE_5700_FAMILY; 2664 2665 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 || 2666 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780 || 2667 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714) 2668 sc->bge_flags |= BGE_5714_FAMILY; 2669 2670 /* Intentionally exclude BGE_ASICREV_BCM5906 */ 2671 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2672 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 2673 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 2674 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2675 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5785 || 2676 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787 || 2677 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 || 2678 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766 || 2679 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57780) 2680 sc->bge_flags |= BGE_5755_PLUS; 2681 2682 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 || 2683 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || 2684 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 || 2685 BGE_IS_5755_PLUS(sc) || 2686 BGE_IS_5714_FAMILY(sc)) 2687 sc->bge_flags |= BGE_5750_PLUS; 2688 2689 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 || 2690 BGE_IS_5750_OR_BEYOND(sc)) 2691 sc->bge_flags |= BGE_5705_PLUS; 2692 2693 /* 2694 * When using the BCM5701 in PCI-X mode, data corruption has 2695 * been observed in the first few bytes of some received packets. 2696 * Aligning the packet buffer in memory eliminates the corruption. 2697 * Unfortunately, this misaligns the packet payloads. On platforms 2698 * which do not support unaligned accesses, we will realign the 2699 * payloads by copying the received packets. 2700 */ 2701 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 && 2702 sc->bge_flags & BGE_PCIX) 2703 sc->bge_flags |= BGE_RX_ALIGNBUG; 2704 2705 if (BGE_IS_5700_FAMILY(sc)) 2706 sc->bge_flags |= BGE_JUMBO_CAPABLE; 2707 2708 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 2709 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) && 2710 PCI_VENDOR(subid) == PCI_VENDOR_DELL) 2711 sc->bge_flags |= BGE_NO_3LED; 2712 2713 misccfg = CSR_READ_4(sc, BGE_MISC_CFG); 2714 misccfg &= BGE_MISCCFG_BOARD_ID_MASK; 2715 2716 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 2717 (misccfg == BGE_MISCCFG_BOARD_ID_5788 || 2718 misccfg == BGE_MISCCFG_BOARD_ID_5788M)) 2719 sc->bge_flags |= BGE_IS_5788; 2720 2721 /* 2722 * Some controllers seem to require a special firmware to use 2723 * TSO. But the firmware is not available to FreeBSD and Linux 2724 * claims that the TSO performed by the firmware is slower than 2725 * hardware based TSO. Moreover the firmware based TSO has one 2726 * known bug which can't handle TSO if ethernet header + IP/TCP 2727 * header is greater than 80 bytes. The workaround for the TSO 2728 * bug exist but it seems it's too expensive than not using 2729 * TSO at all. Some hardwares also have the TSO bug so limit 2730 * the TSO to the controllers that are not affected TSO issues 2731 * (e.g. 5755 or higher). 2732 */ 2733 if (BGE_IS_5755_PLUS(sc)) { 2734 /* 2735 * BCM5754 and BCM5787 shares the same ASIC id so 2736 * explicit device id check is required. 2737 */ 2738 if ((PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754) && 2739 (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5754M)) 2740 sc->bge_flags |= BGE_TSO; 2741 } 2742 2743 if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 && 2744 (misccfg == 0x4000 || misccfg == 0x8000)) || 2745 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 2746 PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && 2747 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 || 2748 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 || 2749 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) || 2750 (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && 2751 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F || 2752 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F || 2753 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) || 2754 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM57790 || 2755 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 2756 sc->bge_flags |= BGE_10_100_ONLY; 2757 2758 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 2759 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 && 2760 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && 2761 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) || 2762 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 2763 sc->bge_flags |= BGE_NO_ETH_WIRE_SPEED; 2764 2765 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || 2766 sc->bge_chipid == BGE_CHIPID_BCM5701_B0) 2767 sc->bge_flags |= BGE_PHY_CRC_BUG; 2768 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX || 2769 BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX) 2770 sc->bge_flags |= BGE_PHY_ADC_BUG; 2771 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) 2772 sc->bge_flags |= BGE_PHY_5704_A0_BUG; 2773 2774 if (BGE_IS_5705_PLUS(sc) && 2775 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906 && 2776 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 && 2777 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 && 2778 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57765 && 2779 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57766 && 2780 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57780) { 2781 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || 2782 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5761 || 2783 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5784 || 2784 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) { 2785 if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 && 2786 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756) 2787 sc->bge_flags |= BGE_PHY_JITTER_BUG; 2788 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M) 2789 sc->bge_flags |= BGE_PHY_ADJUST_TRIM; 2790 } else if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906) 2791 sc->bge_flags |= BGE_PHY_BER_BUG; 2792 } 2793 2794 /* 2795 * SEEPROM check. 2796 * First check if firmware knows we do not have SEEPROM. 2797 */ 2798 if (prop_dictionary_get_bool(device_properties(self), 2799 "without-seeprom", &no_seeprom) && no_seeprom) 2800 sc->bge_flags |= BGE_NO_EEPROM; 2801 2802 /* Now check the 'ROM failed' bit on the RX CPU */ 2803 else if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) 2804 sc->bge_flags |= BGE_NO_EEPROM; 2805 2806 /* Try to reset the chip. */ 2807 DPRINTFN(5, ("bge_reset\n")); 2808 bge_reset(sc); 2809 2810 sc->bge_asf_mode = 0; 2811 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) 2812 == BGE_MAGIC_NUMBER)) { 2813 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG) 2814 & BGE_HWCFG_ASF) { 2815 sc->bge_asf_mode |= ASF_ENABLE; 2816 sc->bge_asf_mode |= ASF_STACKUP; 2817 if (BGE_IS_5750_OR_BEYOND(sc)) { 2818 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; 2819 } 2820 } 2821 } 2822 2823 /* Try to reset the chip again the nice way. */ 2824 bge_stop_fw(sc); 2825 bge_sig_pre_reset(sc, BGE_RESET_STOP); 2826 if (bge_reset(sc)) 2827 aprint_error_dev(sc->bge_dev, "chip reset failed\n"); 2828 2829 bge_sig_legacy(sc, BGE_RESET_STOP); 2830 bge_sig_post_reset(sc, BGE_RESET_STOP); 2831 2832 if (bge_chipinit(sc)) { 2833 aprint_error_dev(sc->bge_dev, "chip initialization failed\n"); 2834 bge_release_resources(sc); 2835 return; 2836 } 2837 2838 /* 2839 * Get station address from the EEPROM 2840 */ 2841 if (bge_get_eaddr(sc, eaddr)) { 2842 aprint_error_dev(sc->bge_dev, 2843 "failed to read station address\n"); 2844 bge_release_resources(sc); 2845 return; 2846 } 2847 2848 br = bge_lookup_rev(sc->bge_chipid); 2849 2850 if (br == NULL) { 2851 aprint_normal_dev(sc->bge_dev, "unknown ASIC (0x%x)", 2852 sc->bge_chipid); 2853 } else { 2854 aprint_normal_dev(sc->bge_dev, "ASIC %s (0x%x)", 2855 br->br_name, sc->bge_chipid); 2856 } 2857 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr)); 2858 2859 /* Allocate the general information block and ring buffers. */ 2860 if (pci_dma64_available(pa)) 2861 sc->bge_dmatag = pa->pa_dmat64; 2862 else 2863 sc->bge_dmatag = pa->pa_dmat; 2864 DPRINTFN(5, ("bus_dmamem_alloc\n")); 2865 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data), 2866 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 2867 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n"); 2868 return; 2869 } 2870 DPRINTFN(5, ("bus_dmamem_map\n")); 2871 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, 2872 sizeof(struct bge_ring_data), &kva, 2873 BUS_DMA_NOWAIT)) { 2874 aprint_error_dev(sc->bge_dev, 2875 "can't map DMA buffers (%zu bytes)\n", 2876 sizeof(struct bge_ring_data)); 2877 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2878 return; 2879 } 2880 DPRINTFN(5, ("bus_dmamem_create\n")); 2881 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1, 2882 sizeof(struct bge_ring_data), 0, 2883 BUS_DMA_NOWAIT, &sc->bge_ring_map)) { 2884 aprint_error_dev(sc->bge_dev, "can't create DMA map\n"); 2885 bus_dmamem_unmap(sc->bge_dmatag, kva, 2886 sizeof(struct bge_ring_data)); 2887 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2888 return; 2889 } 2890 DPRINTFN(5, ("bus_dmamem_load\n")); 2891 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva, 2892 sizeof(struct bge_ring_data), NULL, 2893 BUS_DMA_NOWAIT)) { 2894 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map); 2895 bus_dmamem_unmap(sc->bge_dmatag, kva, 2896 sizeof(struct bge_ring_data)); 2897 bus_dmamem_free(sc->bge_dmatag, &seg, rseg); 2898 return; 2899 } 2900 2901 DPRINTFN(5, ("bzero\n")); 2902 sc->bge_rdata = (struct bge_ring_data *)kva; 2903 2904 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data)); 2905 2906 /* Try to allocate memory for jumbo buffers. */ 2907 if (BGE_IS_JUMBO_CAPABLE(sc)) { 2908 if (bge_alloc_jumbo_mem(sc)) { 2909 aprint_error_dev(sc->bge_dev, 2910 "jumbo buffer allocation failed\n"); 2911 } else 2912 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 2913 } 2914 2915 /* Set default tuneable values. */ 2916 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2917 sc->bge_rx_coal_ticks = 150; 2918 sc->bge_rx_max_coal_bds = 64; 2919 #ifdef ORIG_WPAUL_VALUES 2920 sc->bge_tx_coal_ticks = 150; 2921 sc->bge_tx_max_coal_bds = 128; 2922 #else 2923 sc->bge_tx_coal_ticks = 300; 2924 sc->bge_tx_max_coal_bds = 400; 2925 #endif 2926 if (BGE_IS_5705_PLUS(sc)) { 2927 sc->bge_tx_coal_ticks = (12 * 5); 2928 sc->bge_tx_max_coal_bds = (12 * 5); 2929 aprint_verbose_dev(sc->bge_dev, 2930 "setting short Tx thresholds\n"); 2931 } 2932 2933 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5717 || 2934 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57765 || 2935 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM57766) 2936 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 2937 else if (BGE_IS_5705_PLUS(sc)) 2938 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 2939 else 2940 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 2941 2942 /* Set up ifnet structure */ 2943 ifp = &sc->ethercom.ec_if; 2944 ifp->if_softc = sc; 2945 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2946 ifp->if_ioctl = bge_ioctl; 2947 ifp->if_stop = bge_stop; 2948 ifp->if_start = bge_start; 2949 ifp->if_init = bge_init; 2950 ifp->if_watchdog = bge_watchdog; 2951 IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN)); 2952 IFQ_SET_READY(&ifp->if_snd); 2953 DPRINTFN(5, ("strcpy if_xname\n")); 2954 strcpy(ifp->if_xname, device_xname(sc->bge_dev)); 2955 2956 if (sc->bge_chipid != BGE_CHIPID_BCM5700_B0) 2957 sc->ethercom.ec_if.if_capabilities |= 2958 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx; 2959 #if 1 /* XXX TCP/UDP checksum offload breaks with pf(4) */ 2960 sc->ethercom.ec_if.if_capabilities |= 2961 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 2962 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 2963 #endif 2964 sc->ethercom.ec_capabilities |= 2965 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 2966 2967 if (sc->bge_flags & BGE_TSO) 2968 sc->ethercom.ec_if.if_capabilities |= IFCAP_TSOv4; 2969 2970 /* 2971 * Do MII setup. 2972 */ 2973 DPRINTFN(5, ("mii setup\n")); 2974 sc->bge_mii.mii_ifp = ifp; 2975 sc->bge_mii.mii_readreg = bge_miibus_readreg; 2976 sc->bge_mii.mii_writereg = bge_miibus_writereg; 2977 sc->bge_mii.mii_statchg = bge_miibus_statchg; 2978 2979 /* 2980 * Figure out what sort of media we have by checking the 2981 * hardware config word in the first 32k of NIC internal memory, 2982 * or fall back to the config word in the EEPROM. Note: on some BCM5700 2983 * cards, this value appears to be unset. If that's the 2984 * case, we have to rely on identifying the NIC by its PCI 2985 * subsystem ID, as we do below for the SysKonnect SK-9D41. 2986 */ 2987 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) { 2988 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2989 } else if (!(sc->bge_flags & BGE_NO_EEPROM)) { 2990 bge_read_eeprom(sc, (void *)&hwcfg, 2991 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); 2992 hwcfg = be32toh(hwcfg); 2993 } 2994 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2995 if (PCI_PRODUCT(pa->pa_id) == SK_SUBSYSID_9D41 || 2996 (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) { 2997 if (BGE_IS_5714_FAMILY(sc)) 2998 sc->bge_flags |= BGE_PHY_FIBER_MII; 2999 else 3000 sc->bge_flags |= BGE_PHY_FIBER_TBI; 3001 } 3002 3003 /* set phyflags and chipid before mii_attach() */ 3004 dict = device_properties(self); 3005 prop_dictionary_set_uint32(dict, "phyflags", sc->bge_flags); 3006 prop_dictionary_set_uint32(dict, "chipid", sc->bge_chipid); 3007 3008 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 3009 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 3010 bge_ifmedia_sts); 3011 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER |IFM_1000_SX, 0, NULL); 3012 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX|IFM_FDX, 3013 0, NULL); 3014 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); 3015 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO); 3016 /* Pretend the user requested this setting */ 3017 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 3018 } else { 3019 /* 3020 * Do transceiver setup and tell the firmware the 3021 * driver is down so we can try to get access the 3022 * probe if ASF is running. Retry a couple of times 3023 * if we get a conflict with the ASF firmware accessing 3024 * the PHY. 3025 */ 3026 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3027 bge_asf_driver_up(sc); 3028 3029 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd, 3030 bge_ifmedia_sts); 3031 mii_attach(sc->bge_dev, &sc->bge_mii, 0xffffffff, 3032 MII_PHY_ANY, MII_OFFSET_ANY, 3033 MIIF_FORCEANEG|MIIF_DOPAUSE); 3034 3035 if (LIST_EMPTY(&sc->bge_mii.mii_phys)) { 3036 aprint_error_dev(sc->bge_dev, "no PHY found!\n"); 3037 ifmedia_add(&sc->bge_mii.mii_media, 3038 IFM_ETHER|IFM_MANUAL, 0, NULL); 3039 ifmedia_set(&sc->bge_mii.mii_media, 3040 IFM_ETHER|IFM_MANUAL); 3041 } else 3042 ifmedia_set(&sc->bge_mii.mii_media, 3043 IFM_ETHER|IFM_AUTO); 3044 3045 /* 3046 * Now tell the firmware we are going up after probing the PHY 3047 */ 3048 if (sc->bge_asf_mode & ASF_STACKUP) 3049 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3050 } 3051 3052 /* 3053 * Call MI attach routine. 3054 */ 3055 DPRINTFN(5, ("if_attach\n")); 3056 if_attach(ifp); 3057 DPRINTFN(5, ("ether_ifattach\n")); 3058 ether_ifattach(ifp, eaddr); 3059 ether_set_ifflags_cb(&sc->ethercom, bge_ifflags_cb); 3060 rnd_attach_source(&sc->rnd_source, device_xname(sc->bge_dev), 3061 RND_TYPE_NET, 0); 3062 #ifdef BGE_EVENT_COUNTERS 3063 /* 3064 * Attach event counters. 3065 */ 3066 evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR, 3067 NULL, device_xname(sc->bge_dev), "intr"); 3068 evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC, 3069 NULL, device_xname(sc->bge_dev), "tx_xoff"); 3070 evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC, 3071 NULL, device_xname(sc->bge_dev), "tx_xon"); 3072 evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC, 3073 NULL, device_xname(sc->bge_dev), "rx_xoff"); 3074 evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC, 3075 NULL, device_xname(sc->bge_dev), "rx_xon"); 3076 evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC, 3077 NULL, device_xname(sc->bge_dev), "rx_macctl"); 3078 evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC, 3079 NULL, device_xname(sc->bge_dev), "xoffentered"); 3080 #endif /* BGE_EVENT_COUNTERS */ 3081 DPRINTFN(5, ("callout_init\n")); 3082 callout_init(&sc->bge_timeout, 0); 3083 3084 if (pmf_device_register(self, NULL, NULL)) 3085 pmf_class_network_register(self, ifp); 3086 else 3087 aprint_error_dev(self, "couldn't establish power handler\n"); 3088 3089 sysctl_bge_init(sc); 3090 3091 #ifdef BGE_DEBUG 3092 bge_debug_info(sc); 3093 #endif 3094 } 3095 3096 static void 3097 bge_release_resources(struct bge_softc *sc) 3098 { 3099 if (sc->bge_vpd_prodname != NULL) 3100 free(sc->bge_vpd_prodname, M_DEVBUF); 3101 3102 if (sc->bge_vpd_readonly != NULL) 3103 free(sc->bge_vpd_readonly, M_DEVBUF); 3104 } 3105 3106 static int 3107 bge_reset(struct bge_softc *sc) 3108 { 3109 uint32_t cachesize, command, pcistate, marbmode; 3110 #if 0 3111 uint32_t new_pcistate; 3112 #endif 3113 pcireg_t devctl, reg; 3114 int i, val; 3115 void (*write_op)(struct bge_softc *, int, int); 3116 3117 if (BGE_IS_5750_OR_BEYOND(sc) && !BGE_IS_5714_FAMILY(sc) 3118 && (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)) { 3119 if (sc->bge_flags & BGE_PCIE) 3120 write_op = bge_writemem_direct; 3121 else 3122 write_op = bge_writemem_ind; 3123 } else 3124 write_op = bge_writereg_ind; 3125 3126 /* Save some important PCI state. */ 3127 cachesize = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ); 3128 command = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD); 3129 pcistate = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE); 3130 3131 /* Step 5a: Enable memory arbiter. */ 3132 marbmode = 0; 3133 if (BGE_IS_5714_FAMILY(sc)) 3134 marbmode = CSR_READ_4(sc, BGE_MARB_MODE); 3135 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode); 3136 3137 /* Step 5b-5d: */ 3138 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 3139 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 3140 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW); 3141 3142 /* XXX ???: Disable fastboot on controllers that support it. */ 3143 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || 3144 BGE_IS_5755_PLUS(sc)) 3145 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0); 3146 3147 /* 3148 * Step 6: Write the magic number to SRAM at offset 0xB50. 3149 * When firmware finishes its initialization it will 3150 * write ~BGE_MAGIC_NUMBER to the same location. 3151 */ 3152 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 3153 3154 /* Step 7: */ 3155 val = BGE_MISCCFG_RESET_CORE_CLOCKS | (65<<1); 3156 /* 3157 * XXX: from FreeBSD/Linux; no documentation 3158 */ 3159 if (sc->bge_flags & BGE_PCIE) { 3160 if (CSR_READ_4(sc, BGE_PCIE_CTL1) == 0x60) 3161 /* PCI Express 1.0 system */ 3162 CSR_WRITE_4(sc, BGE_PCIE_CTL1, 0x20); 3163 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 3164 /* 3165 * Prevent PCI Express link training 3166 * during global reset. 3167 */ 3168 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); 3169 val |= (1<<29); 3170 } 3171 } 3172 3173 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) { 3174 i = CSR_READ_4(sc, BGE_VCPU_STATUS); 3175 CSR_WRITE_4(sc, BGE_VCPU_STATUS, 3176 i | BGE_VCPU_STATUS_DRV_RESET); 3177 i = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); 3178 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, 3179 i & ~BGE_VCPU_EXT_CTRL_HALT_CPU); 3180 } 3181 3182 /* 3183 * Set GPHY Power Down Override to leave GPHY 3184 * powered up in D0 uninitialized. 3185 */ 3186 if (BGE_IS_5705_PLUS(sc)) 3187 val |= BGE_MISCCFG_KEEP_GPHY_POWER; 3188 3189 /* XXX 5721, 5751 and 5752 */ 3190 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750) 3191 val |= BGE_MISCCFG_GRC_RESET_DISABLE; 3192 3193 /* Issue global reset */ 3194 write_op(sc, BGE_MISC_CFG, val); 3195 3196 /* Step 8: wait for complete */ 3197 if (sc->bge_flags & BGE_PCIE) 3198 delay(100*1000); /* too big */ 3199 else 3200 delay(100); 3201 3202 /* From Linux: dummy read to flush PCI posted writes */ 3203 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD); 3204 3205 /* Step 9-10: Reset some of the PCI state that got zapped by reset */ 3206 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL, 3207 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 3208 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW 3209 | BGE_PCIMISCCTL_CLOCKCTL_RW); 3210 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, command); 3211 write_op(sc, BGE_MISC_CFG, (65 << 1)); 3212 3213 /* Step 11: disable PCI-X Relaxed Ordering. */ 3214 if (sc->bge_flags & BGE_PCIX) { 3215 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap 3216 + PCI_PCIX_CMD); 3217 pci_conf_write(sc->sc_pc, sc->sc_pcitag, sc->bge_pcixcap 3218 + PCI_PCIX_CMD, reg & ~PCI_PCIX_CMD_RELAXED_ORDER); 3219 } 3220 3221 if (sc->bge_flags & BGE_PCIE) { 3222 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 3223 DELAY(500000); 3224 /* XXX: Magic Numbers */ 3225 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 3226 BGE_PCI_UNKNOWN0); 3227 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 3228 BGE_PCI_UNKNOWN0, 3229 reg | (1 << 15)); 3230 } 3231 devctl = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 3232 sc->bge_pciecap + PCI_PCIE_DCSR); 3233 /* Clear enable no snoop and disable relaxed ordering. */ 3234 devctl &= ~(0x0010 | PCI_PCIE_DCSR_ENA_NO_SNOOP); 3235 /* Set PCIE max payload size to 128. */ 3236 devctl &= ~(0x00e0); 3237 /* Clear device status register. Write 1b to clear */ 3238 devctl |= PCI_PCIE_DCSR_URD | PCI_PCIE_DCSR_FED 3239 | PCI_PCIE_DCSR_NFED | PCI_PCIE_DCSR_CED; 3240 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 3241 sc->bge_pciecap + PCI_PCIE_DCSR, devctl); 3242 } 3243 3244 /* Step 12: Enable memory arbiter. */ 3245 marbmode = 0; 3246 if (BGE_IS_5714_FAMILY(sc)) 3247 marbmode = CSR_READ_4(sc, BGE_MARB_MODE); 3248 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode); 3249 3250 /* Step 17: Poll until the firmware initialization is complete */ 3251 bge_poll_fw(sc); 3252 3253 /* XXX 5721, 5751 and 5752 */ 3254 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750) { 3255 /* Step 19: */ 3256 BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, 1 << 29 | 1 << 25); 3257 /* Step 20: */ 3258 BGE_SETBIT(sc, BGE_TLP_CONTROL_REG, BGE_TLP_DATA_FIFO_PROTECT); 3259 } 3260 3261 /* 3262 * Step 18: wirte mac mode 3263 * XXX Write 0x0c for 5703S and 5704S 3264 */ 3265 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 3266 3267 3268 /* Step 21: 5822 B0 errata */ 3269 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_BX) { 3270 pcireg_t msidata; 3271 3272 msidata = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 3273 BGE_PCI_MSI_DATA); 3274 msidata |= ((1 << 13 | 1 << 12 | 1 << 10) << 16); 3275 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MSI_DATA, 3276 msidata); 3277 } 3278 3279 /* Step 23: restore cache line size */ 3280 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ, cachesize); 3281 3282 #if 0 3283 /* 3284 * XXX Wait for the value of the PCISTATE register to 3285 * return to its original pre-reset state. This is a 3286 * fairly good indicator of reset completion. If we don't 3287 * wait for the reset to fully complete, trying to read 3288 * from the device's non-PCI registers may yield garbage 3289 * results. 3290 */ 3291 for (i = 0; i < BGE_TIMEOUT; i++) { 3292 new_pcistate = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 3293 BGE_PCI_PCISTATE); 3294 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) == 3295 (pcistate & ~BGE_PCISTATE_RESERVED)) 3296 break; 3297 DELAY(10); 3298 } 3299 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) != 3300 (pcistate & ~BGE_PCISTATE_RESERVED)) { 3301 aprint_error_dev(sc->bge_dev, "pcistate failed to revert\n"); 3302 } 3303 #endif 3304 3305 /* Step 28: Fix up byte swapping */ 3306 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS); 3307 3308 /* Tell the ASF firmware we are up */ 3309 if (sc->bge_asf_mode & ASF_STACKUP) 3310 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3311 3312 /* 3313 * The 5704 in TBI mode apparently needs some special 3314 * adjustment to insure the SERDES drive level is set 3315 * to 1.2V. 3316 */ 3317 if (sc->bge_flags & BGE_PHY_FIBER_TBI && 3318 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 3319 uint32_t serdescfg; 3320 3321 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 3322 serdescfg = (serdescfg & ~0xFFF) | 0x880; 3323 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 3324 } 3325 3326 if (sc->bge_flags & BGE_PCIE && 3327 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 && 3328 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5717 && 3329 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5785 && 3330 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57765 && 3331 BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM57766) { 3332 uint32_t v; 3333 3334 /* Enable PCI Express bug fix */ 3335 v = CSR_READ_4(sc, 0x7c00); 3336 CSR_WRITE_4(sc, 0x7c00, v | (1<<25)); 3337 } 3338 DELAY(10000); 3339 3340 return 0; 3341 } 3342 3343 /* 3344 * Frame reception handling. This is called if there's a frame 3345 * on the receive return list. 3346 * 3347 * Note: we have to be able to handle two possibilities here: 3348 * 1) the frame is from the jumbo receive ring 3349 * 2) the frame is from the standard receive ring 3350 */ 3351 3352 static void 3353 bge_rxeof(struct bge_softc *sc) 3354 { 3355 struct ifnet *ifp; 3356 uint16_t rx_prod, rx_cons; 3357 int stdcnt = 0, jumbocnt = 0; 3358 bus_dmamap_t dmamap; 3359 bus_addr_t offset, toff; 3360 bus_size_t tlen; 3361 int tosync; 3362 3363 rx_cons = sc->bge_rx_saved_considx; 3364 rx_prod = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx; 3365 3366 /* Nothing to do */ 3367 if (rx_cons == rx_prod) 3368 return; 3369 3370 ifp = &sc->ethercom.ec_if; 3371 3372 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3373 offsetof(struct bge_ring_data, bge_status_block), 3374 sizeof (struct bge_status_block), 3375 BUS_DMASYNC_POSTREAD); 3376 3377 offset = offsetof(struct bge_ring_data, bge_rx_return_ring); 3378 tosync = rx_prod - rx_cons; 3379 3380 if (tosync != 0) 3381 rnd_add_uint32(&sc->rnd_source, tosync); 3382 3383 toff = offset + (rx_cons * sizeof (struct bge_rx_bd)); 3384 3385 if (tosync < 0) { 3386 tlen = (sc->bge_return_ring_cnt - rx_cons) * 3387 sizeof (struct bge_rx_bd); 3388 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3389 toff, tlen, BUS_DMASYNC_POSTREAD); 3390 tosync = -tosync; 3391 } 3392 3393 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3394 offset, tosync * sizeof (struct bge_rx_bd), 3395 BUS_DMASYNC_POSTREAD); 3396 3397 while (rx_cons != rx_prod) { 3398 struct bge_rx_bd *cur_rx; 3399 uint32_t rxidx; 3400 struct mbuf *m = NULL; 3401 3402 cur_rx = &sc->bge_rdata->bge_rx_return_ring[rx_cons]; 3403 3404 rxidx = cur_rx->bge_idx; 3405 BGE_INC(rx_cons, sc->bge_return_ring_cnt); 3406 3407 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 3408 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 3409 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 3410 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 3411 jumbocnt++; 3412 bus_dmamap_sync(sc->bge_dmatag, 3413 sc->bge_cdata.bge_rx_jumbo_map, 3414 mtod(m, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, 3415 BGE_JLEN, BUS_DMASYNC_POSTREAD); 3416 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3417 ifp->if_ierrors++; 3418 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 3419 continue; 3420 } 3421 if (bge_newbuf_jumbo(sc, sc->bge_jumbo, 3422 NULL)== ENOBUFS) { 3423 ifp->if_ierrors++; 3424 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 3425 continue; 3426 } 3427 } else { 3428 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 3429 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 3430 3431 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 3432 stdcnt++; 3433 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx]; 3434 sc->bge_cdata.bge_rx_std_map[rxidx] = 0; 3435 if (dmamap == NULL) { 3436 ifp->if_ierrors++; 3437 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 3438 continue; 3439 } 3440 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, 3441 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 3442 bus_dmamap_unload(sc->bge_dmatag, dmamap); 3443 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3444 ifp->if_ierrors++; 3445 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 3446 continue; 3447 } 3448 if (bge_newbuf_std(sc, sc->bge_std, 3449 NULL, dmamap) == ENOBUFS) { 3450 ifp->if_ierrors++; 3451 bge_newbuf_std(sc, sc->bge_std, m, dmamap); 3452 continue; 3453 } 3454 } 3455 3456 ifp->if_ipackets++; 3457 #ifndef __NO_STRICT_ALIGNMENT 3458 /* 3459 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect, 3460 * the Rx buffer has the layer-2 header unaligned. 3461 * If our CPU requires alignment, re-align by copying. 3462 */ 3463 if (sc->bge_flags & BGE_RX_ALIGNBUG) { 3464 memmove(mtod(m, char *) + ETHER_ALIGN, m->m_data, 3465 cur_rx->bge_len); 3466 m->m_data += ETHER_ALIGN; 3467 } 3468 #endif 3469 3470 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 3471 m->m_pkthdr.rcvif = ifp; 3472 3473 /* 3474 * Handle BPF listeners. Let the BPF user see the packet. 3475 */ 3476 bpf_mtap(ifp, m); 3477 3478 m->m_pkthdr.csum_flags = M_CSUM_IPv4; 3479 3480 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0) 3481 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 3482 /* 3483 * Rx transport checksum-offload may also 3484 * have bugs with packets which, when transmitted, 3485 * were `runts' requiring padding. 3486 */ 3487 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 3488 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/ 3489 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) { 3490 m->m_pkthdr.csum_data = 3491 cur_rx->bge_tcp_udp_csum; 3492 m->m_pkthdr.csum_flags |= 3493 (M_CSUM_TCPv4|M_CSUM_UDPv4| 3494 M_CSUM_DATA); 3495 } 3496 3497 /* 3498 * If we received a packet with a vlan tag, pass it 3499 * to vlan_input() instead of ether_input(). 3500 */ 3501 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 3502 VLAN_INPUT_TAG(ifp, m, cur_rx->bge_vlan_tag, continue); 3503 } 3504 3505 (*ifp->if_input)(ifp, m); 3506 } 3507 3508 sc->bge_rx_saved_considx = rx_cons; 3509 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 3510 if (stdcnt) 3511 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 3512 if (jumbocnt) 3513 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 3514 } 3515 3516 static void 3517 bge_txeof(struct bge_softc *sc) 3518 { 3519 struct bge_tx_bd *cur_tx = NULL; 3520 struct ifnet *ifp; 3521 struct txdmamap_pool_entry *dma; 3522 bus_addr_t offset, toff; 3523 bus_size_t tlen; 3524 int tosync; 3525 struct mbuf *m; 3526 3527 ifp = &sc->ethercom.ec_if; 3528 3529 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3530 offsetof(struct bge_ring_data, bge_status_block), 3531 sizeof (struct bge_status_block), 3532 BUS_DMASYNC_POSTREAD); 3533 3534 offset = offsetof(struct bge_ring_data, bge_tx_ring); 3535 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx - 3536 sc->bge_tx_saved_considx; 3537 3538 if (tosync != 0) 3539 rnd_add_uint32(&sc->rnd_source, tosync); 3540 3541 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd)); 3542 3543 if (tosync < 0) { 3544 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) * 3545 sizeof (struct bge_tx_bd); 3546 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3547 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3548 tosync = -tosync; 3549 } 3550 3551 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map, 3552 offset, tosync * sizeof (struct bge_tx_bd), 3553 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3554 3555 /* 3556 * Go through our tx ring and free mbufs for those 3557 * frames that have been sent. 3558 */ 3559 while (sc->bge_tx_saved_considx != 3560 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) { 3561 uint32_t idx = 0; 3562 3563 idx = sc->bge_tx_saved_considx; 3564 cur_tx = &sc->bge_rdata->bge_tx_ring[idx]; 3565 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 3566 ifp->if_opackets++; 3567 m = sc->bge_cdata.bge_tx_chain[idx]; 3568 if (m != NULL) { 3569 sc->bge_cdata.bge_tx_chain[idx] = NULL; 3570 dma = sc->txdma[idx]; 3571 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0, 3572 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 3573 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap); 3574 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link); 3575 sc->txdma[idx] = NULL; 3576 3577 m_freem(m); 3578 } 3579 sc->bge_txcnt--; 3580 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 3581 ifp->if_timer = 0; 3582 } 3583 3584 if (cur_tx != NULL) 3585 ifp->if_flags &= ~IFF_OACTIVE; 3586 } 3587 3588 static int 3589 bge_intr(void *xsc) 3590 { 3591 struct bge_softc *sc; 3592 struct ifnet *ifp; 3593 uint32_t statusword; 3594 3595 sc = xsc; 3596 ifp = &sc->ethercom.ec_if; 3597 3598 /* It is possible for the interrupt to arrive before 3599 * the status block is updated prior to the interrupt. 3600 * Reading the PCI State register will confirm whether the 3601 * interrupt is ours and will flush the status block. 3602 */ 3603 3604 /* read status word from status block */ 3605 statusword = sc->bge_rdata->bge_status_block.bge_status; 3606 3607 if ((statusword & BGE_STATFLAG_UPDATED) || 3608 (!(CSR_READ_4(sc, BGE_PCI_PCISTATE) & BGE_PCISTATE_INTR_NOT_ACTIVE))) { 3609 /* Ack interrupt and stop others from occuring. */ 3610 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 3611 3612 BGE_EVCNT_INCR(sc->bge_ev_intr); 3613 3614 /* clear status word */ 3615 sc->bge_rdata->bge_status_block.bge_status = 0; 3616 3617 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 3618 statusword & BGE_STATFLAG_LINKSTATE_CHANGED || 3619 BGE_STS_BIT(sc, BGE_STS_LINK_EVT)) 3620 bge_link_upd(sc); 3621 3622 if (ifp->if_flags & IFF_RUNNING) { 3623 /* Check RX return ring producer/consumer */ 3624 bge_rxeof(sc); 3625 3626 /* Check TX ring producer/consumer */ 3627 bge_txeof(sc); 3628 } 3629 3630 if (sc->bge_pending_rxintr_change) { 3631 uint32_t rx_ticks = sc->bge_rx_coal_ticks; 3632 uint32_t rx_bds = sc->bge_rx_max_coal_bds; 3633 uint32_t junk; 3634 3635 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks); 3636 DELAY(10); 3637 junk = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS); 3638 3639 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds); 3640 DELAY(10); 3641 junk = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS); 3642 3643 sc->bge_pending_rxintr_change = 0; 3644 } 3645 bge_handle_events(sc); 3646 3647 /* Re-enable interrupts. */ 3648 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 3649 3650 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) 3651 bge_start(ifp); 3652 3653 return 1; 3654 } else 3655 return 0; 3656 } 3657 3658 static void 3659 bge_asf_driver_up(struct bge_softc *sc) 3660 { 3661 if (sc->bge_asf_mode & ASF_STACKUP) { 3662 /* Send ASF heartbeat aprox. every 2s */ 3663 if (sc->bge_asf_count) 3664 sc->bge_asf_count --; 3665 else { 3666 sc->bge_asf_count = 2; 3667 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, 3668 BGE_FW_DRV_ALIVE); 3669 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4); 3670 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3); 3671 CSR_WRITE_4(sc, BGE_CPU_EVENT, 3672 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14)); 3673 } 3674 } 3675 } 3676 3677 static void 3678 bge_tick(void *xsc) 3679 { 3680 struct bge_softc *sc = xsc; 3681 struct mii_data *mii = &sc->bge_mii; 3682 int s; 3683 3684 s = splnet(); 3685 3686 if (BGE_IS_5705_PLUS(sc)) 3687 bge_stats_update_regs(sc); 3688 else 3689 bge_stats_update(sc); 3690 3691 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 3692 /* 3693 * Since in TBI mode auto-polling can't be used we should poll 3694 * link status manually. Here we register pending link event 3695 * and trigger interrupt. 3696 */ 3697 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT); 3698 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 3699 } else { 3700 /* 3701 * Do not touch PHY if we have link up. This could break 3702 * IPMI/ASF mode or produce extra input errors. 3703 * (extra input errors was reported for bcm5701 & bcm5704). 3704 */ 3705 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) 3706 mii_tick(mii); 3707 } 3708 3709 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 3710 3711 splx(s); 3712 } 3713 3714 static void 3715 bge_stats_update_regs(struct bge_softc *sc) 3716 { 3717 struct ifnet *ifp = &sc->ethercom.ec_if; 3718 3719 ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS + 3720 offsetof(struct bge_mac_stats_regs, etherStatsCollisions)); 3721 3722 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); 3723 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS); 3724 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); 3725 } 3726 3727 static void 3728 bge_stats_update(struct bge_softc *sc) 3729 { 3730 struct ifnet *ifp = &sc->ethercom.ec_if; 3731 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 3732 3733 #define READ_STAT(sc, stats, stat) \ 3734 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 3735 3736 ifp->if_collisions += 3737 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) + 3738 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) + 3739 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) + 3740 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) - 3741 ifp->if_collisions; 3742 3743 BGE_EVCNT_UPD(sc->bge_ev_tx_xoff, 3744 READ_STAT(sc, stats, outXoffSent.bge_addr_lo)); 3745 BGE_EVCNT_UPD(sc->bge_ev_tx_xon, 3746 READ_STAT(sc, stats, outXonSent.bge_addr_lo)); 3747 BGE_EVCNT_UPD(sc->bge_ev_rx_xoff, 3748 READ_STAT(sc, stats, 3749 xoffPauseFramesReceived.bge_addr_lo)); 3750 BGE_EVCNT_UPD(sc->bge_ev_rx_xon, 3751 READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo)); 3752 BGE_EVCNT_UPD(sc->bge_ev_rx_macctl, 3753 READ_STAT(sc, stats, 3754 macControlFramesReceived.bge_addr_lo)); 3755 BGE_EVCNT_UPD(sc->bge_ev_xoffentered, 3756 READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo)); 3757 3758 #undef READ_STAT 3759 3760 #ifdef notdef 3761 ifp->if_collisions += 3762 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + 3763 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + 3764 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + 3765 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - 3766 ifp->if_collisions; 3767 #endif 3768 } 3769 3770 /* 3771 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 3772 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 3773 * but when such padded frames employ the bge IP/TCP checksum offload, 3774 * the hardware checksum assist gives incorrect results (possibly 3775 * from incorporating its own padding into the UDP/TCP checksum; who knows). 3776 * If we pad such runts with zeros, the onboard checksum comes out correct. 3777 */ 3778 static inline int 3779 bge_cksum_pad(struct mbuf *pkt) 3780 { 3781 struct mbuf *last = NULL; 3782 int padlen; 3783 3784 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len; 3785 3786 /* if there's only the packet-header and we can pad there, use it. */ 3787 if (pkt->m_pkthdr.len == pkt->m_len && 3788 M_TRAILINGSPACE(pkt) >= padlen) { 3789 last = pkt; 3790 } else { 3791 /* 3792 * Walk packet chain to find last mbuf. We will either 3793 * pad there, or append a new mbuf and pad it 3794 * (thus perhaps avoiding the bcm5700 dma-min bug). 3795 */ 3796 for (last = pkt; last->m_next != NULL; last = last->m_next) { 3797 continue; /* do nothing */ 3798 } 3799 3800 /* `last' now points to last in chain. */ 3801 if (M_TRAILINGSPACE(last) < padlen) { 3802 /* Allocate new empty mbuf, pad it. Compact later. */ 3803 struct mbuf *n; 3804 MGET(n, M_DONTWAIT, MT_DATA); 3805 if (n == NULL) 3806 return ENOBUFS; 3807 n->m_len = 0; 3808 last->m_next = n; 3809 last = n; 3810 } 3811 } 3812 3813 KDASSERT(!M_READONLY(last)); 3814 KDASSERT(M_TRAILINGSPACE(last) >= padlen); 3815 3816 /* Now zero the pad area, to avoid the bge cksum-assist bug */ 3817 memset(mtod(last, char *) + last->m_len, 0, padlen); 3818 last->m_len += padlen; 3819 pkt->m_pkthdr.len += padlen; 3820 return 0; 3821 } 3822 3823 /* 3824 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes. 3825 */ 3826 static inline int 3827 bge_compact_dma_runt(struct mbuf *pkt) 3828 { 3829 struct mbuf *m, *prev; 3830 int totlen, prevlen; 3831 3832 prev = NULL; 3833 totlen = 0; 3834 prevlen = -1; 3835 3836 for (m = pkt; m != NULL; prev = m,m = m->m_next) { 3837 int mlen = m->m_len; 3838 int shortfall = 8 - mlen ; 3839 3840 totlen += mlen; 3841 if (mlen == 0) { 3842 continue; 3843 } 3844 if (mlen >= 8) 3845 continue; 3846 3847 /* If we get here, mbuf data is too small for DMA engine. 3848 * Try to fix by shuffling data to prev or next in chain. 3849 * If that fails, do a compacting deep-copy of the whole chain. 3850 */ 3851 3852 /* Internal frag. If fits in prev, copy it there. */ 3853 if (prev && M_TRAILINGSPACE(prev) >= m->m_len) { 3854 memcpy(prev->m_data + prev->m_len, m->m_data, mlen); 3855 prev->m_len += mlen; 3856 m->m_len = 0; 3857 /* XXX stitch chain */ 3858 prev->m_next = m_free(m); 3859 m = prev; 3860 continue; 3861 } 3862 else if (m->m_next != NULL && 3863 M_TRAILINGSPACE(m) >= shortfall && 3864 m->m_next->m_len >= (8 + shortfall)) { 3865 /* m is writable and have enough data in next, pull up. */ 3866 3867 memcpy(m->m_data + m->m_len, m->m_next->m_data, 3868 shortfall); 3869 m->m_len += shortfall; 3870 m->m_next->m_len -= shortfall; 3871 m->m_next->m_data += shortfall; 3872 } 3873 else if (m->m_next == NULL || 1) { 3874 /* Got a runt at the very end of the packet. 3875 * borrow data from the tail of the preceding mbuf and 3876 * update its length in-place. (The original data is still 3877 * valid, so we can do this even if prev is not writable.) 3878 */ 3879 3880 /* if we'd make prev a runt, just move all of its data. */ 3881 KASSERT(prev != NULL /*, ("runt but null PREV")*/); 3882 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/); 3883 3884 if ((prev->m_len - shortfall) < 8) 3885 shortfall = prev->m_len; 3886 3887 #ifdef notyet /* just do the safe slow thing for now */ 3888 if (!M_READONLY(m)) { 3889 if (M_LEADINGSPACE(m) < shorfall) { 3890 void *m_dat; 3891 m_dat = (m->m_flags & M_PKTHDR) ? 3892 m->m_pktdat : m->dat; 3893 memmove(m_dat, mtod(m, void*), m->m_len); 3894 m->m_data = m_dat; 3895 } 3896 } else 3897 #endif /* just do the safe slow thing */ 3898 { 3899 struct mbuf * n = NULL; 3900 int newprevlen = prev->m_len - shortfall; 3901 3902 MGET(n, M_NOWAIT, MT_DATA); 3903 if (n == NULL) 3904 return ENOBUFS; 3905 KASSERT(m->m_len + shortfall < MLEN 3906 /*, 3907 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/); 3908 3909 /* first copy the data we're stealing from prev */ 3910 memcpy(n->m_data, prev->m_data + newprevlen, 3911 shortfall); 3912 3913 /* update prev->m_len accordingly */ 3914 prev->m_len -= shortfall; 3915 3916 /* copy data from runt m */ 3917 memcpy(n->m_data + shortfall, m->m_data, 3918 m->m_len); 3919 3920 /* n holds what we stole from prev, plus m */ 3921 n->m_len = shortfall + m->m_len; 3922 3923 /* stitch n into chain and free m */ 3924 n->m_next = m->m_next; 3925 prev->m_next = n; 3926 /* KASSERT(m->m_next == NULL); */ 3927 m->m_next = NULL; 3928 m_free(m); 3929 m = n; /* for continuing loop */ 3930 } 3931 } 3932 prevlen = m->m_len; 3933 } 3934 return 0; 3935 } 3936 3937 /* 3938 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 3939 * pointers to descriptors. 3940 */ 3941 static int 3942 bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx) 3943 { 3944 struct bge_tx_bd *f = NULL; 3945 uint32_t frag, cur; 3946 uint16_t csum_flags = 0; 3947 uint16_t txbd_tso_flags = 0; 3948 struct txdmamap_pool_entry *dma; 3949 bus_dmamap_t dmamap; 3950 int i = 0; 3951 struct m_tag *mtag; 3952 int use_tso, maxsegsize, error; 3953 3954 cur = frag = *txidx; 3955 3956 if (m_head->m_pkthdr.csum_flags) { 3957 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4) 3958 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3959 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) 3960 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3961 } 3962 3963 /* 3964 * If we were asked to do an outboard checksum, and the NIC 3965 * has the bug where it sometimes adds in the Ethernet padding, 3966 * explicitly pad with zeros so the cksum will be correct either way. 3967 * (For now, do this for all chip versions, until newer 3968 * are confirmed to not require the workaround.) 3969 */ 3970 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 || 3971 #ifdef notyet 3972 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 || 3973 #endif 3974 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD) 3975 goto check_dma_bug; 3976 3977 if (bge_cksum_pad(m_head) != 0) 3978 return ENOBUFS; 3979 3980 check_dma_bug: 3981 if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)) 3982 goto doit; 3983 3984 /* 3985 * bcm5700 Revision B silicon cannot handle DMA descriptors with 3986 * less than eight bytes. If we encounter a teeny mbuf 3987 * at the end of a chain, we can pad. Otherwise, copy. 3988 */ 3989 if (bge_compact_dma_runt(m_head) != 0) 3990 return ENOBUFS; 3991 3992 doit: 3993 dma = SLIST_FIRST(&sc->txdma_list); 3994 if (dma == NULL) 3995 return ENOBUFS; 3996 dmamap = dma->dmamap; 3997 3998 /* 3999 * Set up any necessary TSO state before we start packing... 4000 */ 4001 use_tso = (m_head->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 4002 if (!use_tso) { 4003 maxsegsize = 0; 4004 } else { /* TSO setup */ 4005 unsigned mss; 4006 struct ether_header *eh; 4007 unsigned ip_tcp_hlen, iptcp_opt_words, tcp_seg_flags, offset; 4008 struct mbuf * m0 = m_head; 4009 struct ip *ip; 4010 struct tcphdr *th; 4011 int iphl, hlen; 4012 4013 /* 4014 * XXX It would be nice if the mbuf pkthdr had offset 4015 * fields for the protocol headers. 4016 */ 4017 4018 eh = mtod(m0, struct ether_header *); 4019 switch (htons(eh->ether_type)) { 4020 case ETHERTYPE_IP: 4021 offset = ETHER_HDR_LEN; 4022 break; 4023 4024 case ETHERTYPE_VLAN: 4025 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 4026 break; 4027 4028 default: 4029 /* 4030 * Don't support this protocol or encapsulation. 4031 */ 4032 return ENOBUFS; 4033 } 4034 4035 /* 4036 * TCP/IP headers are in the first mbuf; we can do 4037 * this the easy way. 4038 */ 4039 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 4040 hlen = iphl + offset; 4041 if (__predict_false(m0->m_len < 4042 (hlen + sizeof(struct tcphdr)))) { 4043 4044 aprint_debug_dev(sc->bge_dev, 4045 "TSO: hard case m0->m_len == %d < ip/tcp hlen %zd," 4046 "not handled yet\n", 4047 m0->m_len, hlen+ sizeof(struct tcphdr)); 4048 #ifdef NOTYET 4049 /* 4050 * XXX jonathan@NetBSD.org: untested. 4051 * how to force this branch to be taken? 4052 */ 4053 BGE_EVCNT_INCR(&sc->sc_ev_txtsopain); 4054 4055 m_copydata(m0, offset, sizeof(ip), &ip); 4056 m_copydata(m0, hlen, sizeof(th), &th); 4057 4058 ip.ip_len = 0; 4059 4060 m_copyback(m0, hlen + offsetof(struct ip, ip_len), 4061 sizeof(ip.ip_len), &ip.ip_len); 4062 4063 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 4064 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 4065 4066 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 4067 sizeof(th.th_sum), &th.th_sum); 4068 4069 hlen += th.th_off << 2; 4070 iptcp_opt_words = hlen; 4071 #else 4072 /* 4073 * if_wm "hard" case not yet supported, can we not 4074 * mandate it out of existence? 4075 */ 4076 (void) ip; (void)th; (void) ip_tcp_hlen; 4077 4078 return ENOBUFS; 4079 #endif 4080 } else { 4081 ip = (struct ip *) (mtod(m0, char *) + offset); 4082 th = (struct tcphdr *) (mtod(m0, char *) + hlen); 4083 ip_tcp_hlen = iphl + (th->th_off << 2); 4084 4085 /* Total IP/TCP options, in 32-bit words */ 4086 iptcp_opt_words = (ip_tcp_hlen 4087 - sizeof(struct tcphdr) 4088 - sizeof(struct ip)) >> 2; 4089 } 4090 if (BGE_IS_5750_OR_BEYOND(sc)) { 4091 th->th_sum = 0; 4092 csum_flags &= ~(BGE_TXBDFLAG_TCP_UDP_CSUM); 4093 } else { 4094 /* 4095 * XXX jonathan@NetBSD.org: 5705 untested. 4096 * Requires TSO firmware patch for 5701/5703/5704. 4097 */ 4098 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 4099 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 4100 } 4101 4102 mss = m_head->m_pkthdr.segsz; 4103 txbd_tso_flags |= 4104 BGE_TXBDFLAG_CPU_PRE_DMA | 4105 BGE_TXBDFLAG_CPU_POST_DMA; 4106 4107 /* 4108 * Our NIC TSO-assist assumes TSO has standard, optionless 4109 * IPv4 and TCP headers, which total 40 bytes. By default, 4110 * the NIC copies 40 bytes of IP/TCP header from the 4111 * supplied header into the IP/TCP header portion of 4112 * each post-TSO-segment. If the supplied packet has IP or 4113 * TCP options, we need to tell the NIC to copy those extra 4114 * bytes into each post-TSO header, in addition to the normal 4115 * 40-byte IP/TCP header (and to leave space accordingly). 4116 * Unfortunately, the driver encoding of option length 4117 * varies across different ASIC families. 4118 */ 4119 tcp_seg_flags = 0; 4120 if (iptcp_opt_words) { 4121 if (BGE_IS_5705_PLUS(sc)) { 4122 tcp_seg_flags = 4123 iptcp_opt_words << 11; 4124 } else { 4125 txbd_tso_flags |= 4126 iptcp_opt_words << 12; 4127 } 4128 } 4129 maxsegsize = mss | tcp_seg_flags; 4130 ip->ip_len = htons(mss + ip_tcp_hlen); 4131 4132 } /* TSO setup */ 4133 4134 /* 4135 * Start packing the mbufs in this chain into 4136 * the fragment pointers. Stop when we run out 4137 * of fragments or hit the end of the mbuf chain. 4138 */ 4139 error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head, 4140 BUS_DMA_NOWAIT); 4141 if (error) 4142 return ENOBUFS; 4143 /* 4144 * Sanity check: avoid coming within 16 descriptors 4145 * of the end of the ring. 4146 */ 4147 if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) { 4148 BGE_TSO_PRINTF(("%s: " 4149 " dmamap_load_mbuf too close to ring wrap\n", 4150 device_xname(sc->bge_dev))); 4151 goto fail_unload; 4152 } 4153 4154 mtag = sc->ethercom.ec_nvlans ? 4155 m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL; 4156 4157 4158 /* Iterate over dmap-map fragments. */ 4159 for (i = 0; i < dmamap->dm_nsegs; i++) { 4160 f = &sc->bge_rdata->bge_tx_ring[frag]; 4161 if (sc->bge_cdata.bge_tx_chain[frag] != NULL) 4162 break; 4163 4164 BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr); 4165 f->bge_len = dmamap->dm_segs[i].ds_len; 4166 4167 /* 4168 * For 5751 and follow-ons, for TSO we must turn 4169 * off checksum-assist flag in the tx-descr, and 4170 * supply the ASIC-revision-specific encoding 4171 * of TSO flags and segsize. 4172 */ 4173 if (use_tso) { 4174 if (BGE_IS_5750_OR_BEYOND(sc) || i == 0) { 4175 f->bge_rsvd = maxsegsize; 4176 f->bge_flags = csum_flags | txbd_tso_flags; 4177 } else { 4178 f->bge_rsvd = 0; 4179 f->bge_flags = 4180 (csum_flags | txbd_tso_flags) & 0x0fff; 4181 } 4182 } else { 4183 f->bge_rsvd = 0; 4184 f->bge_flags = csum_flags; 4185 } 4186 4187 if (mtag != NULL) { 4188 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 4189 f->bge_vlan_tag = VLAN_TAG_VALUE(mtag); 4190 } else { 4191 f->bge_vlan_tag = 0; 4192 } 4193 cur = frag; 4194 BGE_INC(frag, BGE_TX_RING_CNT); 4195 } 4196 4197 if (i < dmamap->dm_nsegs) { 4198 BGE_TSO_PRINTF(("%s: reached %d < dm_nsegs %d\n", 4199 device_xname(sc->bge_dev), i, dmamap->dm_nsegs)); 4200 goto fail_unload; 4201 } 4202 4203 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize, 4204 BUS_DMASYNC_PREWRITE); 4205 4206 if (frag == sc->bge_tx_saved_considx) { 4207 BGE_TSO_PRINTF(("%s: frag %d = wrapped id %d?\n", 4208 device_xname(sc->bge_dev), frag, sc->bge_tx_saved_considx)); 4209 4210 goto fail_unload; 4211 } 4212 4213 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END; 4214 sc->bge_cdata.bge_tx_chain[cur] = m_head; 4215 SLIST_REMOVE_HEAD(&sc->txdma_list, link); 4216 sc->txdma[cur] = dma; 4217 sc->bge_txcnt += dmamap->dm_nsegs; 4218 4219 *txidx = frag; 4220 4221 return 0; 4222 4223 fail_unload: 4224 bus_dmamap_unload(sc->bge_dmatag, dmamap); 4225 4226 return ENOBUFS; 4227 } 4228 4229 /* 4230 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 4231 * to the mbuf data regions directly in the transmit descriptors. 4232 */ 4233 static void 4234 bge_start(struct ifnet *ifp) 4235 { 4236 struct bge_softc *sc; 4237 struct mbuf *m_head = NULL; 4238 uint32_t prodidx; 4239 int pkts = 0; 4240 4241 sc = ifp->if_softc; 4242 4243 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 4244 return; 4245 4246 prodidx = sc->bge_tx_prodidx; 4247 4248 while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 4249 IFQ_POLL(&ifp->if_snd, m_head); 4250 if (m_head == NULL) 4251 break; 4252 4253 #if 0 4254 /* 4255 * XXX 4256 * safety overkill. If this is a fragmented packet chain 4257 * with delayed TCP/UDP checksums, then only encapsulate 4258 * it if we have enough descriptors to handle the entire 4259 * chain at once. 4260 * (paranoia -- may not actually be needed) 4261 */ 4262 if (m_head->m_flags & M_FIRSTFRAG && 4263 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 4264 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 4265 M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) { 4266 ifp->if_flags |= IFF_OACTIVE; 4267 break; 4268 } 4269 } 4270 #endif 4271 4272 /* 4273 * Pack the data into the transmit ring. If we 4274 * don't have room, set the OACTIVE flag and wait 4275 * for the NIC to drain the ring. 4276 */ 4277 if (bge_encap(sc, m_head, &prodidx)) { 4278 ifp->if_flags |= IFF_OACTIVE; 4279 break; 4280 } 4281 4282 /* now we are committed to transmit the packet */ 4283 IFQ_DEQUEUE(&ifp->if_snd, m_head); 4284 pkts++; 4285 4286 /* 4287 * If there's a BPF listener, bounce a copy of this frame 4288 * to him. 4289 */ 4290 bpf_mtap(ifp, m_head); 4291 } 4292 if (pkts == 0) 4293 return; 4294 4295 /* Transmit */ 4296 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 4297 /* 5700 b2 errata */ 4298 if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX) 4299 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 4300 4301 sc->bge_tx_prodidx = prodidx; 4302 4303 /* 4304 * Set a timeout in case the chip goes out to lunch. 4305 */ 4306 ifp->if_timer = 5; 4307 } 4308 4309 static int 4310 bge_init(struct ifnet *ifp) 4311 { 4312 struct bge_softc *sc = ifp->if_softc; 4313 const uint16_t *m; 4314 int s, error = 0; 4315 4316 s = splnet(); 4317 4318 ifp = &sc->ethercom.ec_if; 4319 4320 /* Cancel pending I/O and flush buffers. */ 4321 bge_stop(ifp, 0); 4322 4323 bge_stop_fw(sc); 4324 bge_sig_pre_reset(sc, BGE_RESET_START); 4325 bge_reset(sc); 4326 bge_sig_legacy(sc, BGE_RESET_START); 4327 bge_sig_post_reset(sc, BGE_RESET_START); 4328 4329 bge_chipinit(sc); 4330 4331 /* 4332 * Init the various state machines, ring 4333 * control blocks and firmware. 4334 */ 4335 error = bge_blockinit(sc); 4336 if (error != 0) { 4337 aprint_error_dev(sc->bge_dev, "initialization error %d\n", 4338 error); 4339 splx(s); 4340 return error; 4341 } 4342 4343 ifp = &sc->ethercom.ec_if; 4344 4345 /* Specify MTU. */ 4346 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 4347 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 4348 4349 /* Load our MAC address. */ 4350 m = (const uint16_t *)&(CLLADDR(ifp->if_sadl)[0]); 4351 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 4352 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 4353 4354 /* Enable or disable promiscuous mode as needed. */ 4355 if (ifp->if_flags & IFF_PROMISC) 4356 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4357 else 4358 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4359 4360 /* Program multicast filter. */ 4361 bge_setmulti(sc); 4362 4363 /* Init RX ring. */ 4364 bge_init_rx_ring_std(sc); 4365 4366 /* 4367 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 4368 * memory to insure that the chip has in fact read the first 4369 * entry of the ring. 4370 */ 4371 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 4372 uint32_t v, i; 4373 for (i = 0; i < 10; i++) { 4374 DELAY(20); 4375 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 4376 if (v == (MCLBYTES - ETHER_ALIGN)) 4377 break; 4378 } 4379 if (i == 10) 4380 aprint_error_dev(sc->bge_dev, 4381 "5705 A0 chip failed to load RX ring\n"); 4382 } 4383 4384 /* Init jumbo RX ring. */ 4385 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 4386 bge_init_rx_ring_jumbo(sc); 4387 4388 /* Init our RX return ring index */ 4389 sc->bge_rx_saved_considx = 0; 4390 4391 /* Init TX ring. */ 4392 bge_init_tx_ring(sc); 4393 4394 /* Turn on transmitter */ 4395 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 4396 4397 /* Turn on receiver */ 4398 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 4399 4400 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 4401 4402 /* Tell firmware we're alive. */ 4403 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4404 4405 /* Enable host interrupts. */ 4406 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 4407 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 4408 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 4409 4410 if ((error = bge_ifmedia_upd(ifp)) != 0) 4411 goto out; 4412 4413 ifp->if_flags |= IFF_RUNNING; 4414 ifp->if_flags &= ~IFF_OACTIVE; 4415 4416 callout_reset(&sc->bge_timeout, hz, bge_tick, sc); 4417 4418 out: 4419 sc->bge_if_flags = ifp->if_flags; 4420 splx(s); 4421 4422 return error; 4423 } 4424 4425 /* 4426 * Set media options. 4427 */ 4428 static int 4429 bge_ifmedia_upd(struct ifnet *ifp) 4430 { 4431 struct bge_softc *sc = ifp->if_softc; 4432 struct mii_data *mii = &sc->bge_mii; 4433 struct ifmedia *ifm = &sc->bge_ifmedia; 4434 int rc; 4435 4436 /* If this is a 1000baseX NIC, enable the TBI port. */ 4437 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 4438 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 4439 return EINVAL; 4440 switch (IFM_SUBTYPE(ifm->ifm_media)) { 4441 case IFM_AUTO: 4442 /* 4443 * The BCM5704 ASIC appears to have a special 4444 * mechanism for programming the autoneg 4445 * advertisement registers in TBI mode. 4446 */ 4447 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) { 4448 uint32_t sgdig; 4449 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS); 4450 if (sgdig & BGE_SGDIGSTS_DONE) { 4451 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 4452 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 4453 sgdig |= BGE_SGDIGCFG_AUTO | 4454 BGE_SGDIGCFG_PAUSE_CAP | 4455 BGE_SGDIGCFG_ASYM_PAUSE; 4456 CSR_WRITE_4(sc, BGE_SGDIG_CFG, 4457 sgdig | BGE_SGDIGCFG_SEND); 4458 DELAY(5); 4459 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); 4460 } 4461 } 4462 break; 4463 case IFM_1000_SX: 4464 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 4465 BGE_CLRBIT(sc, BGE_MAC_MODE, 4466 BGE_MACMODE_HALF_DUPLEX); 4467 } else { 4468 BGE_SETBIT(sc, BGE_MAC_MODE, 4469 BGE_MACMODE_HALF_DUPLEX); 4470 } 4471 break; 4472 default: 4473 return EINVAL; 4474 } 4475 /* XXX 802.3x flow control for 1000BASE-SX */ 4476 return 0; 4477 } 4478 4479 BGE_STS_SETBIT(sc, BGE_STS_LINK_EVT); 4480 if ((rc = mii_mediachg(mii)) == ENXIO) 4481 return 0; 4482 4483 /* 4484 * Force an interrupt so that we will call bge_link_upd 4485 * if needed and clear any pending link state attention. 4486 * Without this we are not getting any further interrupts 4487 * for link state changes and thus will not UP the link and 4488 * not be able to send in bge_start. The only way to get 4489 * things working was to receive a packet and get a RX intr. 4490 */ 4491 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || 4492 sc->bge_flags & BGE_IS_5788) 4493 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 4494 else 4495 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 4496 4497 return rc; 4498 } 4499 4500 /* 4501 * Report current media status. 4502 */ 4503 static void 4504 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 4505 { 4506 struct bge_softc *sc = ifp->if_softc; 4507 struct mii_data *mii = &sc->bge_mii; 4508 4509 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 4510 ifmr->ifm_status = IFM_AVALID; 4511 ifmr->ifm_active = IFM_ETHER; 4512 if (CSR_READ_4(sc, BGE_MAC_STS) & 4513 BGE_MACSTAT_TBI_PCS_SYNCHED) 4514 ifmr->ifm_status |= IFM_ACTIVE; 4515 ifmr->ifm_active |= IFM_1000_SX; 4516 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 4517 ifmr->ifm_active |= IFM_HDX; 4518 else 4519 ifmr->ifm_active |= IFM_FDX; 4520 return; 4521 } 4522 4523 mii_pollstat(mii); 4524 ifmr->ifm_status = mii->mii_media_status; 4525 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) | 4526 sc->bge_flowflags; 4527 } 4528 4529 static int 4530 bge_ifflags_cb(struct ethercom *ec) 4531 { 4532 struct ifnet *ifp = &ec->ec_if; 4533 struct bge_softc *sc = ifp->if_softc; 4534 int change = ifp->if_flags ^ sc->bge_if_flags; 4535 4536 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) 4537 return ENETRESET; 4538 else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0) 4539 return 0; 4540 4541 if ((ifp->if_flags & IFF_PROMISC) == 0) 4542 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4543 else 4544 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 4545 4546 bge_setmulti(sc); 4547 4548 sc->bge_if_flags = ifp->if_flags; 4549 return 0; 4550 } 4551 4552 static int 4553 bge_ioctl(struct ifnet *ifp, u_long command, void *data) 4554 { 4555 struct bge_softc *sc = ifp->if_softc; 4556 struct ifreq *ifr = (struct ifreq *) data; 4557 int s, error = 0; 4558 struct mii_data *mii; 4559 4560 s = splnet(); 4561 4562 switch (command) { 4563 case SIOCSIFMEDIA: 4564 /* XXX Flow control is not supported for 1000BASE-SX */ 4565 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 4566 ifr->ifr_media &= ~IFM_ETH_FMASK; 4567 sc->bge_flowflags = 0; 4568 } 4569 4570 /* Flow control requires full-duplex mode. */ 4571 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 4572 (ifr->ifr_media & IFM_FDX) == 0) { 4573 ifr->ifr_media &= ~IFM_ETH_FMASK; 4574 } 4575 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 4576 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 4577 /* We can do both TXPAUSE and RXPAUSE. */ 4578 ifr->ifr_media |= 4579 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 4580 } 4581 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 4582 } 4583 /* FALLTHROUGH */ 4584 case SIOCGIFMEDIA: 4585 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 4586 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, 4587 command); 4588 } else { 4589 mii = &sc->bge_mii; 4590 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 4591 command); 4592 } 4593 break; 4594 default: 4595 if ((error = ether_ioctl(ifp, command, data)) != ENETRESET) 4596 break; 4597 4598 error = 0; 4599 4600 if (command != SIOCADDMULTI && command != SIOCDELMULTI) 4601 ; 4602 else if (ifp->if_flags & IFF_RUNNING) 4603 bge_setmulti(sc); 4604 break; 4605 } 4606 4607 splx(s); 4608 4609 return error; 4610 } 4611 4612 static void 4613 bge_watchdog(struct ifnet *ifp) 4614 { 4615 struct bge_softc *sc; 4616 4617 sc = ifp->if_softc; 4618 4619 aprint_error_dev(sc->bge_dev, "watchdog timeout -- resetting\n"); 4620 4621 ifp->if_flags &= ~IFF_RUNNING; 4622 bge_init(ifp); 4623 4624 ifp->if_oerrors++; 4625 } 4626 4627 static void 4628 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit) 4629 { 4630 int i; 4631 4632 BGE_CLRBIT(sc, reg, bit); 4633 4634 for (i = 0; i < 1000; i++) { 4635 if ((CSR_READ_4(sc, reg) & bit) == 0) 4636 return; 4637 delay(100); 4638 } 4639 4640 /* 4641 * Doesn't print only when the register is BGE_SRS_MODE. It occurs 4642 * on some environment (and once after boot?) 4643 */ 4644 if (reg != BGE_SRS_MODE) 4645 aprint_error_dev(sc->bge_dev, 4646 "block failed to stop: reg 0x%lx, bit 0x%08x\n", 4647 (u_long)reg, bit); 4648 } 4649 4650 /* 4651 * Stop the adapter and free any mbufs allocated to the 4652 * RX and TX lists. 4653 */ 4654 static void 4655 bge_stop(struct ifnet *ifp, int disable) 4656 { 4657 struct bge_softc *sc = ifp->if_softc; 4658 4659 callout_stop(&sc->bge_timeout); 4660 4661 /* 4662 * Tell firmware we're shutting down. 4663 */ 4664 bge_stop_fw(sc); 4665 bge_sig_pre_reset(sc, BGE_RESET_STOP); 4666 4667 /* Disable host interrupts. */ 4668 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 4669 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 4670 4671 /* 4672 * Disable all of the receiver blocks 4673 */ 4674 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 4675 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 4676 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 4677 if (BGE_IS_5700_FAMILY(sc)) 4678 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 4679 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 4680 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 4681 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 4682 4683 /* 4684 * Disable all of the transmit blocks 4685 */ 4686 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 4687 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 4688 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 4689 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 4690 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 4691 if (BGE_IS_5700_FAMILY(sc)) 4692 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 4693 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 4694 4695 /* 4696 * Shut down all of the memory managers and related 4697 * state machines. 4698 */ 4699 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 4700 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 4701 if (BGE_IS_5700_FAMILY(sc)) 4702 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 4703 4704 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 4705 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 4706 4707 if (BGE_IS_5700_FAMILY(sc)) { 4708 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 4709 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 4710 } 4711 4712 bge_reset(sc); 4713 bge_sig_legacy(sc, BGE_RESET_STOP); 4714 bge_sig_post_reset(sc, BGE_RESET_STOP); 4715 4716 /* 4717 * Keep the ASF firmware running if up. 4718 */ 4719 if (sc->bge_asf_mode & ASF_STACKUP) 4720 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4721 else 4722 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4723 4724 /* Free the RX lists. */ 4725 bge_free_rx_ring_std(sc); 4726 4727 /* Free jumbo RX list. */ 4728 if (BGE_IS_JUMBO_CAPABLE(sc)) 4729 bge_free_rx_ring_jumbo(sc); 4730 4731 /* Free TX buffers. */ 4732 bge_free_tx_ring(sc); 4733 4734 /* 4735 * Isolate/power down the PHY. 4736 */ 4737 if (!(sc->bge_flags & BGE_PHY_FIBER_TBI)) 4738 mii_down(&sc->bge_mii); 4739 4740 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 4741 4742 /* Clear MAC's link state (PHY may still have link UP). */ 4743 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 4744 4745 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 4746 } 4747 4748 static void 4749 bge_link_upd(struct bge_softc *sc) 4750 { 4751 struct ifnet *ifp = &sc->ethercom.ec_if; 4752 struct mii_data *mii = &sc->bge_mii; 4753 uint32_t status; 4754 int link; 4755 4756 /* Clear 'pending link event' flag */ 4757 BGE_STS_CLRBIT(sc, BGE_STS_LINK_EVT); 4758 4759 /* 4760 * Process link state changes. 4761 * Grrr. The link status word in the status block does 4762 * not work correctly on the BCM5700 rev AX and BX chips, 4763 * according to all available information. Hence, we have 4764 * to enable MII interrupts in order to properly obtain 4765 * async link changes. Unfortunately, this also means that 4766 * we have to read the MAC status register to detect link 4767 * changes, thereby adding an additional register access to 4768 * the interrupt handler. 4769 */ 4770 4771 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) { 4772 status = CSR_READ_4(sc, BGE_MAC_STS); 4773 if (status & BGE_MACSTAT_MI_INTERRUPT) { 4774 mii_pollstat(mii); 4775 4776 if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 4777 mii->mii_media_status & IFM_ACTIVE && 4778 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 4779 BGE_STS_SETBIT(sc, BGE_STS_LINK); 4780 else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 4781 (!(mii->mii_media_status & IFM_ACTIVE) || 4782 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 4783 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 4784 4785 /* Clear the interrupt */ 4786 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 4787 BGE_EVTENB_MI_INTERRUPT); 4788 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); 4789 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, 4790 BRGPHY_INTRS); 4791 } 4792 return; 4793 } 4794 4795 if (sc->bge_flags & BGE_PHY_FIBER_TBI) { 4796 status = CSR_READ_4(sc, BGE_MAC_STS); 4797 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 4798 if (!BGE_STS_BIT(sc, BGE_STS_LINK)) { 4799 BGE_STS_SETBIT(sc, BGE_STS_LINK); 4800 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) 4801 BGE_CLRBIT(sc, BGE_MAC_MODE, 4802 BGE_MACMODE_TBI_SEND_CFGS); 4803 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 4804 if_link_state_change(ifp, LINK_STATE_UP); 4805 } 4806 } else if (BGE_STS_BIT(sc, BGE_STS_LINK)) { 4807 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 4808 if_link_state_change(ifp, LINK_STATE_DOWN); 4809 } 4810 /* 4811 * Discard link events for MII/GMII cards if MI auto-polling disabled. 4812 * This should not happen since mii callouts are locked now, but 4813 * we keep this check for debug. 4814 */ 4815 } else if (BGE_STS_BIT(sc, BGE_STS_AUTOPOLL)) { 4816 /* 4817 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED 4818 * bit in status word always set. Workaround this bug by 4819 * reading PHY link status directly. 4820 */ 4821 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK)? 4822 BGE_STS_LINK : 0; 4823 4824 if (BGE_STS_BIT(sc, BGE_STS_LINK) != link) { 4825 mii_pollstat(mii); 4826 4827 if (!BGE_STS_BIT(sc, BGE_STS_LINK) && 4828 mii->mii_media_status & IFM_ACTIVE && 4829 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 4830 BGE_STS_SETBIT(sc, BGE_STS_LINK); 4831 else if (BGE_STS_BIT(sc, BGE_STS_LINK) && 4832 (!(mii->mii_media_status & IFM_ACTIVE) || 4833 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) 4834 BGE_STS_CLRBIT(sc, BGE_STS_LINK); 4835 } 4836 } 4837 4838 /* Clear the attention */ 4839 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 4840 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 4841 BGE_MACSTAT_LINK_CHANGED); 4842 } 4843 4844 static int 4845 sysctl_bge_verify(SYSCTLFN_ARGS) 4846 { 4847 int error, t; 4848 struct sysctlnode node; 4849 4850 node = *rnode; 4851 t = *(int*)rnode->sysctl_data; 4852 node.sysctl_data = &t; 4853 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 4854 if (error || newp == NULL) 4855 return error; 4856 4857 #if 0 4858 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t, 4859 node.sysctl_num, rnode->sysctl_num)); 4860 #endif 4861 4862 if (node.sysctl_num == bge_rxthresh_nodenum) { 4863 if (t < 0 || t >= NBGE_RX_THRESH) 4864 return EINVAL; 4865 bge_update_all_threshes(t); 4866 } else 4867 return EINVAL; 4868 4869 *(int*)rnode->sysctl_data = t; 4870 4871 return 0; 4872 } 4873 4874 /* 4875 * Set up sysctl(3) MIB, hw.bge.*. 4876 */ 4877 static void 4878 sysctl_bge_init(struct bge_softc *sc) 4879 { 4880 int rc, bge_root_num; 4881 const struct sysctlnode *node; 4882 4883 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, NULL, 4884 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL, 4885 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) { 4886 goto err; 4887 } 4888 4889 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node, 4890 0, CTLTYPE_NODE, "bge", 4891 SYSCTL_DESCR("BGE interface controls"), 4892 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 4893 goto err; 4894 } 4895 4896 bge_root_num = node->sysctl_num; 4897 4898 /* BGE Rx interrupt mitigation level */ 4899 if ((rc = sysctl_createv(&sc->bge_log, 0, NULL, &node, 4900 CTLFLAG_READWRITE, 4901 CTLTYPE_INT, "rx_lvl", 4902 SYSCTL_DESCR("BGE receive interrupt mitigation level"), 4903 sysctl_bge_verify, 0, 4904 &bge_rx_thresh_lvl, 4905 0, CTL_HW, bge_root_num, CTL_CREATE, 4906 CTL_EOL)) != 0) { 4907 goto err; 4908 } 4909 4910 bge_rxthresh_nodenum = node->sysctl_num; 4911 4912 return; 4913 4914 err: 4915 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc); 4916 } 4917 4918 #ifdef BGE_DEBUG 4919 void 4920 bge_debug_info(struct bge_softc *sc) 4921 { 4922 4923 printf("Hardware Flags:\n"); 4924 if (BGE_IS_5755_PLUS(sc)) 4925 printf(" - 5755 Plus\n"); 4926 if (BGE_IS_5750_OR_BEYOND(sc)) 4927 printf(" - 5750 Plus\n"); 4928 if (BGE_IS_5705_PLUS(sc)) 4929 printf(" - 5705 Plus\n"); 4930 if (BGE_IS_5714_FAMILY(sc)) 4931 printf(" - 5714 Family\n"); 4932 if (BGE_IS_5700_FAMILY(sc)) 4933 printf(" - 5700 Family\n"); 4934 if (sc->bge_flags & BGE_IS_5788) 4935 printf(" - 5788\n"); 4936 if (sc->bge_flags & BGE_JUMBO_CAPABLE) 4937 printf(" - Supports Jumbo Frames\n"); 4938 if (sc->bge_flags & BGE_NO_EEPROM) 4939 printf(" - No EEPROM\n"); 4940 if (sc->bge_flags & BGE_PCIX) 4941 printf(" - PCI-X Bus\n"); 4942 if (sc->bge_flags & BGE_PCIE) 4943 printf(" - PCI Express Bus\n"); 4944 if (sc->bge_flags & BGE_NO_3LED) 4945 printf(" - No 3 LEDs\n"); 4946 if (sc->bge_flags & BGE_RX_ALIGNBUG) 4947 printf(" - RX Alignment Bug\n"); 4948 if (sc->bge_flags & BGE_TSO) 4949 printf(" - TSO\n"); 4950 } 4951 #endif /* BGE_DEBUG */ 4952 4953 static int 4954 bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]) 4955 { 4956 prop_dictionary_t dict; 4957 prop_data_t ea; 4958 4959 if ((sc->bge_flags & BGE_NO_EEPROM) == 0) 4960 return 1; 4961 4962 dict = device_properties(sc->bge_dev); 4963 ea = prop_dictionary_get(dict, "mac-address"); 4964 if (ea != NULL) { 4965 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 4966 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 4967 memcpy(ether_addr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN); 4968 return 0; 4969 } 4970 4971 return 1; 4972 } 4973 4974 static int 4975 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[]) 4976 { 4977 uint32_t mac_addr; 4978 4979 mac_addr = bge_readmem_ind(sc, 0x0c14); 4980 if ((mac_addr >> 16) == 0x484b) { 4981 ether_addr[0] = (uint8_t)(mac_addr >> 8); 4982 ether_addr[1] = (uint8_t)mac_addr; 4983 mac_addr = bge_readmem_ind(sc, 0x0c18); 4984 ether_addr[2] = (uint8_t)(mac_addr >> 24); 4985 ether_addr[3] = (uint8_t)(mac_addr >> 16); 4986 ether_addr[4] = (uint8_t)(mac_addr >> 8); 4987 ether_addr[5] = (uint8_t)mac_addr; 4988 return 0; 4989 } 4990 return 1; 4991 } 4992 4993 static int 4994 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[]) 4995 { 4996 int mac_offset = BGE_EE_MAC_OFFSET; 4997 4998 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 4999 mac_offset = BGE_EE_MAC_OFFSET_5906; 5000 5001 return (bge_read_nvram(sc, ether_addr, mac_offset + 2, 5002 ETHER_ADDR_LEN)); 5003 } 5004 5005 static int 5006 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[]) 5007 { 5008 5009 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) 5010 return 1; 5011 5012 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, 5013 ETHER_ADDR_LEN)); 5014 } 5015 5016 static int 5017 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[]) 5018 { 5019 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = { 5020 /* NOTE: Order is critical */ 5021 bge_get_eaddr_fw, 5022 bge_get_eaddr_mem, 5023 bge_get_eaddr_nvram, 5024 bge_get_eaddr_eeprom, 5025 NULL 5026 }; 5027 const bge_eaddr_fcn_t *func; 5028 5029 for (func = bge_eaddr_funcs; *func != NULL; ++func) { 5030 if ((*func)(sc, eaddr) == 0) 5031 break; 5032 } 5033 return (*func == NULL ? ENXIO : 0); 5034 } 5035