1 /* $NetBSD: if_wm.c,v 1.83 2004/10/30 18:09:22 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips. 40 * 41 * TODO (in order of importance): 42 * 43 * - Rework how parameters are loaded from the EEPROM. 44 * - Figure out what to do with the i82545GM and i82546GB 45 * SERDES controllers. 46 * - Fix hw VLAN assist. 47 */ 48 49 #include <sys/cdefs.h> 50 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.83 2004/10/30 18:09:22 thorpej Exp $"); 51 52 #include "bpfilter.h" 53 #include "rnd.h" 54 55 #include <sys/param.h> 56 #include <sys/systm.h> 57 #include <sys/callout.h> 58 #include <sys/mbuf.h> 59 #include <sys/malloc.h> 60 #include <sys/kernel.h> 61 #include <sys/socket.h> 62 #include <sys/ioctl.h> 63 #include <sys/errno.h> 64 #include <sys/device.h> 65 #include <sys/queue.h> 66 67 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */ 68 69 #if NRND > 0 70 #include <sys/rnd.h> 71 #endif 72 73 #include <net/if.h> 74 #include <net/if_dl.h> 75 #include <net/if_media.h> 76 #include <net/if_ether.h> 77 78 #if NBPFILTER > 0 79 #include <net/bpf.h> 80 #endif 81 82 #include <netinet/in.h> /* XXX for struct ip */ 83 #include <netinet/in_systm.h> /* XXX for struct ip */ 84 #include <netinet/ip.h> /* XXX for struct ip */ 85 #include <netinet/tcp.h> /* XXX for struct tcphdr */ 86 87 #include <machine/bus.h> 88 #include <machine/intr.h> 89 #include <machine/endian.h> 90 91 #include <dev/mii/mii.h> 92 #include <dev/mii/miivar.h> 93 #include <dev/mii/mii_bitbang.h> 94 95 #include <dev/pci/pcireg.h> 96 #include <dev/pci/pcivar.h> 97 #include <dev/pci/pcidevs.h> 98 99 #include <dev/pci/if_wmreg.h> 100 101 #ifdef WM_DEBUG 102 #define WM_DEBUG_LINK 0x01 103 #define WM_DEBUG_TX 0x02 104 #define WM_DEBUG_RX 0x04 105 #define WM_DEBUG_GMII 0x08 106 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK; 107 108 #define DPRINTF(x, y) if (wm_debug & (x)) printf y 109 #else 110 #define DPRINTF(x, y) /* nothing */ 111 #endif /* WM_DEBUG */ 112 113 /* 114 * Transmit descriptor list size. Due to errata, we can only have 115 * 256 hardware descriptors in the ring on < 82544, but we use 4096 116 * on >= 82544. We tell the upper layers that they can queue a lot 117 * of packets, and we go ahead and manage up to 64 (16 for the i82547) 118 * of them at a time. 119 * 120 * We allow up to 256 (!) DMA segments per packet. Pathological packet 121 * chains containing many small mbufs have been observed in zero-copy 122 * situations with jumbo frames. 123 */ 124 #define WM_NTXSEGS 256 125 #define WM_IFQUEUELEN 256 126 #define WM_TXQUEUELEN_MAX 64 127 #define WM_TXQUEUELEN_MAX_82547 16 128 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum) 129 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1) 130 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8) 131 #define WM_NTXDESC_82542 256 132 #define WM_NTXDESC_82544 4096 133 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc) 134 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1) 135 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t)) 136 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc)) 137 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc)) 138 139 #define WM_MAXTXDMA ETHER_MAX_LEN_JUMBO 140 141 /* 142 * Receive descriptor list size. We have one Rx buffer for normal 143 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized 144 * packet. We allocate 256 receive descriptors, each with a 2k 145 * buffer (MCLBYTES), which gives us room for 50 jumbo packets. 146 */ 147 #define WM_NRXDESC 256 148 #define WM_NRXDESC_MASK (WM_NRXDESC - 1) 149 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) 150 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) 151 152 /* 153 * Control structures are DMA'd to the i82542 chip. We allocate them in 154 * a single clump that maps to a single DMA segment to make serveral things 155 * easier. 156 */ 157 struct wm_control_data_82544 { 158 /* 159 * The receive descriptors. 160 */ 161 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; 162 163 /* 164 * The transmit descriptors. Put these at the end, because 165 * we might use a smaller number of them. 166 */ 167 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544]; 168 }; 169 170 struct wm_control_data_82542 { 171 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; 172 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542]; 173 }; 174 175 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x) 176 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)]) 177 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)]) 178 179 /* 180 * Software state for transmit jobs. 181 */ 182 struct wm_txsoft { 183 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 184 bus_dmamap_t txs_dmamap; /* our DMA map */ 185 int txs_firstdesc; /* first descriptor in packet */ 186 int txs_lastdesc; /* last descriptor in packet */ 187 int txs_ndesc; /* # of descriptors used */ 188 }; 189 190 /* 191 * Software state for receive buffers. Each descriptor gets a 192 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill 193 * more than one buffer, we chain them together. 194 */ 195 struct wm_rxsoft { 196 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 197 bus_dmamap_t rxs_dmamap; /* our DMA map */ 198 }; 199 200 typedef enum { 201 WM_T_unknown = 0, 202 WM_T_82542_2_0, /* i82542 2.0 (really old) */ 203 WM_T_82542_2_1, /* i82542 2.1+ (old) */ 204 WM_T_82543, /* i82543 */ 205 WM_T_82544, /* i82544 */ 206 WM_T_82540, /* i82540 */ 207 WM_T_82545, /* i82545 */ 208 WM_T_82545_3, /* i82545 3.0+ */ 209 WM_T_82546, /* i82546 */ 210 WM_T_82546_3, /* i82546 3.0+ */ 211 WM_T_82541, /* i82541 */ 212 WM_T_82541_2, /* i82541 2.0+ */ 213 WM_T_82547, /* i82547 */ 214 WM_T_82547_2, /* i82547 2.0+ */ 215 } wm_chip_type; 216 217 /* 218 * Software state per device. 219 */ 220 struct wm_softc { 221 struct device sc_dev; /* generic device information */ 222 bus_space_tag_t sc_st; /* bus space tag */ 223 bus_space_handle_t sc_sh; /* bus space handle */ 224 bus_space_tag_t sc_iot; /* I/O space tag */ 225 bus_space_handle_t sc_ioh; /* I/O space handle */ 226 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 227 struct ethercom sc_ethercom; /* ethernet common data */ 228 void *sc_sdhook; /* shutdown hook */ 229 230 wm_chip_type sc_type; /* chip type */ 231 int sc_flags; /* flags; see below */ 232 int sc_bus_speed; /* PCI/PCIX bus speed */ 233 int sc_pcix_offset; /* PCIX capability register offset */ 234 int sc_flowflags; /* 802.3x flow control flags */ 235 236 void *sc_ih; /* interrupt cookie */ 237 238 int sc_ee_addrbits; /* EEPROM address bits */ 239 240 struct mii_data sc_mii; /* MII/media information */ 241 242 struct callout sc_tick_ch; /* tick callout */ 243 244 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 245 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 246 247 int sc_align_tweak; 248 249 /* 250 * Software state for the transmit and receive descriptors. 251 */ 252 int sc_txnum; /* must be a power of two */ 253 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX]; 254 struct wm_rxsoft sc_rxsoft[WM_NRXDESC]; 255 256 /* 257 * Control data structures. 258 */ 259 int sc_ntxdesc; /* must be a power of two */ 260 struct wm_control_data_82544 *sc_control_data; 261 #define sc_txdescs sc_control_data->wcd_txdescs 262 #define sc_rxdescs sc_control_data->wcd_rxdescs 263 264 #ifdef WM_EVENT_COUNTERS 265 /* Event counters. */ 266 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */ 267 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */ 268 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */ 269 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */ 270 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */ 271 struct evcnt sc_ev_rxintr; /* Rx interrupts */ 272 struct evcnt sc_ev_linkintr; /* Link interrupts */ 273 274 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */ 275 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */ 276 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */ 277 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */ 278 279 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */ 280 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */ 281 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */ 282 283 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ 284 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */ 285 286 struct evcnt sc_ev_tu; /* Tx underrun */ 287 288 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ 289 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ 290 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ 291 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ 292 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ 293 #endif /* WM_EVENT_COUNTERS */ 294 295 bus_addr_t sc_tdt_reg; /* offset of TDT register */ 296 297 int sc_txfree; /* number of free Tx descriptors */ 298 int sc_txnext; /* next ready Tx descriptor */ 299 300 int sc_txsfree; /* number of free Tx jobs */ 301 int sc_txsnext; /* next free Tx job */ 302 int sc_txsdirty; /* dirty Tx jobs */ 303 304 /* These 5 variables are used only on the 82547. */ 305 int sc_txfifo_size; /* Tx FIFO size */ 306 int sc_txfifo_head; /* current head of FIFO */ 307 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */ 308 int sc_txfifo_stall; /* Tx FIFO is stalled */ 309 struct callout sc_txfifo_ch; /* Tx FIFO stall work-around timer */ 310 311 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */ 312 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */ 313 314 bus_addr_t sc_rdt_reg; /* offset of RDT register */ 315 316 int sc_rxptr; /* next ready Rx descriptor/queue ent */ 317 int sc_rxdiscard; 318 int sc_rxlen; 319 struct mbuf *sc_rxhead; 320 struct mbuf *sc_rxtail; 321 struct mbuf **sc_rxtailp; 322 323 uint32_t sc_ctrl; /* prototype CTRL register */ 324 #if 0 325 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ 326 #endif 327 uint32_t sc_icr; /* prototype interrupt bits */ 328 uint32_t sc_tctl; /* prototype TCTL register */ 329 uint32_t sc_rctl; /* prototype RCTL register */ 330 uint32_t sc_txcw; /* prototype TXCW register */ 331 uint32_t sc_tipg; /* prototype TIPG register */ 332 uint32_t sc_fcrtl; /* prototype FCRTL register */ 333 uint32_t sc_pba; /* prototype PBA register */ 334 335 int sc_tbi_linkup; /* TBI link status */ 336 int sc_tbi_anstate; /* autonegotiation state */ 337 338 int sc_mchash_type; /* multicast filter offset */ 339 340 #if NRND > 0 341 rndsource_element_t rnd_source; /* random source */ 342 #endif 343 }; 344 345 #define WM_RXCHAIN_RESET(sc) \ 346 do { \ 347 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \ 348 *(sc)->sc_rxtailp = NULL; \ 349 (sc)->sc_rxlen = 0; \ 350 } while (/*CONSTCOND*/0) 351 352 #define WM_RXCHAIN_LINK(sc, m) \ 353 do { \ 354 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \ 355 (sc)->sc_rxtailp = &(m)->m_next; \ 356 } while (/*CONSTCOND*/0) 357 358 /* sc_flags */ 359 #define WM_F_HAS_MII 0x01 /* has MII */ 360 #define WM_F_EEPROM_HANDSHAKE 0x02 /* requires EEPROM handshake */ 361 #define WM_F_EEPROM_SPI 0x04 /* EEPROM is SPI */ 362 #define WM_F_IOH_VALID 0x10 /* I/O handle is valid */ 363 #define WM_F_BUS64 0x20 /* bus is 64-bit */ 364 #define WM_F_PCIX 0x40 /* bus is PCI-X */ 365 #define WM_F_CSA 0x80 /* bus is CSA */ 366 367 #ifdef WM_EVENT_COUNTERS 368 #define WM_EVCNT_INCR(ev) (ev)->ev_count++ 369 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val) 370 #else 371 #define WM_EVCNT_INCR(ev) /* nothing */ 372 #define WM_EVCNT_ADD(ev, val) /* nothing */ 373 #endif 374 375 #define CSR_READ(sc, reg) \ 376 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) 377 #define CSR_WRITE(sc, reg, val) \ 378 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) 379 #define CSR_WRITE_FLUSH(sc) \ 380 (void) CSR_READ((sc), WMREG_STATUS) 381 382 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x))) 383 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x))) 384 385 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU) 386 #define WM_CDTXADDR_HI(sc, x) \ 387 (sizeof(bus_addr_t) == 8 ? \ 388 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0) 389 390 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU) 391 #define WM_CDRXADDR_HI(sc, x) \ 392 (sizeof(bus_addr_t) == 8 ? \ 393 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0) 394 395 #define WM_CDTXSYNC(sc, x, n, ops) \ 396 do { \ 397 int __x, __n; \ 398 \ 399 __x = (x); \ 400 __n = (n); \ 401 \ 402 /* If it will wrap around, sync to the end of the ring. */ \ 403 if ((__x + __n) > WM_NTXDESC(sc)) { \ 404 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 405 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \ 406 (WM_NTXDESC(sc) - __x), (ops)); \ 407 __n -= (WM_NTXDESC(sc) - __x); \ 408 __x = 0; \ 409 } \ 410 \ 411 /* Now sync whatever is left. */ \ 412 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 413 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \ 414 } while (/*CONSTCOND*/0) 415 416 #define WM_CDRXSYNC(sc, x, ops) \ 417 do { \ 418 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 419 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \ 420 } while (/*CONSTCOND*/0) 421 422 #define WM_INIT_RXDESC(sc, x) \ 423 do { \ 424 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 425 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \ 426 struct mbuf *__m = __rxs->rxs_mbuf; \ 427 \ 428 /* \ 429 * Note: We scoot the packet forward 2 bytes in the buffer \ 430 * so that the payload after the Ethernet header is aligned \ 431 * to a 4-byte boundary. \ 432 * \ 433 * XXX BRAINDAMAGE ALERT! \ 434 * The stupid chip uses the same size for every buffer, which \ 435 * is set in the Receive Control register. We are using the 2K \ 436 * size option, but what we REALLY want is (2K - 2)! For this \ 437 * reason, we can't "scoot" packets longer than the standard \ 438 * Ethernet MTU. On strict-alignment platforms, if the total \ 439 * size exceeds (2K - 2) we set align_tweak to 0 and let \ 440 * the upper layer copy the headers. \ 441 */ \ 442 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \ 443 \ 444 wm_set_dma_addr(&__rxd->wrx_addr, \ 445 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \ 446 __rxd->wrx_len = 0; \ 447 __rxd->wrx_cksum = 0; \ 448 __rxd->wrx_status = 0; \ 449 __rxd->wrx_errors = 0; \ 450 __rxd->wrx_special = 0; \ 451 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 452 \ 453 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \ 454 } while (/*CONSTCOND*/0) 455 456 static void wm_start(struct ifnet *); 457 static void wm_watchdog(struct ifnet *); 458 static int wm_ioctl(struct ifnet *, u_long, caddr_t); 459 static int wm_init(struct ifnet *); 460 static void wm_stop(struct ifnet *, int); 461 462 static void wm_shutdown(void *); 463 464 static void wm_reset(struct wm_softc *); 465 static void wm_rxdrain(struct wm_softc *); 466 static int wm_add_rxbuf(struct wm_softc *, int); 467 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *); 468 static void wm_tick(void *); 469 470 static void wm_set_filter(struct wm_softc *); 471 472 static int wm_intr(void *); 473 static void wm_txintr(struct wm_softc *); 474 static void wm_rxintr(struct wm_softc *); 475 static void wm_linkintr(struct wm_softc *, uint32_t); 476 477 static void wm_tbi_mediainit(struct wm_softc *); 478 static int wm_tbi_mediachange(struct ifnet *); 479 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); 480 481 static void wm_tbi_set_linkled(struct wm_softc *); 482 static void wm_tbi_check_link(struct wm_softc *); 483 484 static void wm_gmii_reset(struct wm_softc *); 485 486 static int wm_gmii_i82543_readreg(struct device *, int, int); 487 static void wm_gmii_i82543_writereg(struct device *, int, int, int); 488 489 static int wm_gmii_i82544_readreg(struct device *, int, int); 490 static void wm_gmii_i82544_writereg(struct device *, int, int, int); 491 492 static void wm_gmii_statchg(struct device *); 493 494 static void wm_gmii_mediainit(struct wm_softc *); 495 static int wm_gmii_mediachange(struct ifnet *); 496 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); 497 498 static int wm_match(struct device *, struct cfdata *, void *); 499 static void wm_attach(struct device *, struct device *, void *); 500 501 CFATTACH_DECL(wm, sizeof(struct wm_softc), 502 wm_match, wm_attach, NULL, NULL); 503 504 static void wm_82547_txfifo_stall(void *); 505 506 /* 507 * Devices supported by this driver. 508 */ 509 static const struct wm_product { 510 pci_vendor_id_t wmp_vendor; 511 pci_product_id_t wmp_product; 512 const char *wmp_name; 513 wm_chip_type wmp_type; 514 int wmp_flags; 515 #define WMP_F_1000X 0x01 516 #define WMP_F_1000T 0x02 517 } wm_products[] = { 518 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, 519 "Intel i82542 1000BASE-X Ethernet", 520 WM_T_82542_2_1, WMP_F_1000X }, 521 522 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, 523 "Intel i82543GC 1000BASE-X Ethernet", 524 WM_T_82543, WMP_F_1000X }, 525 526 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, 527 "Intel i82543GC 1000BASE-T Ethernet", 528 WM_T_82543, WMP_F_1000T }, 529 530 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, 531 "Intel i82544EI 1000BASE-T Ethernet", 532 WM_T_82544, WMP_F_1000T }, 533 534 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, 535 "Intel i82544EI 1000BASE-X Ethernet", 536 WM_T_82544, WMP_F_1000X }, 537 538 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, 539 "Intel i82544GC 1000BASE-T Ethernet", 540 WM_T_82544, WMP_F_1000T }, 541 542 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, 543 "Intel i82544GC (LOM) 1000BASE-T Ethernet", 544 WM_T_82544, WMP_F_1000T }, 545 546 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, 547 "Intel i82540EM 1000BASE-T Ethernet", 548 WM_T_82540, WMP_F_1000T }, 549 550 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM, 551 "Intel i82540EM (LOM) 1000BASE-T Ethernet", 552 WM_T_82540, WMP_F_1000T }, 553 554 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM, 555 "Intel i82540EP 1000BASE-T Ethernet", 556 WM_T_82540, WMP_F_1000T }, 557 558 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP, 559 "Intel i82540EP 1000BASE-T Ethernet", 560 WM_T_82540, WMP_F_1000T }, 561 562 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP, 563 "Intel i82540EP 1000BASE-T Ethernet", 564 WM_T_82540, WMP_F_1000T }, 565 566 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER, 567 "Intel i82545EM 1000BASE-T Ethernet", 568 WM_T_82545, WMP_F_1000T }, 569 570 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER, 571 "Intel i82545GM 1000BASE-T Ethernet", 572 WM_T_82545_3, WMP_F_1000T }, 573 574 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER, 575 "Intel i82545GM 1000BASE-X Ethernet", 576 WM_T_82545_3, WMP_F_1000X }, 577 #if 0 578 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES, 579 "Intel i82545GM Gigabit Ethernet (SERDES)", 580 WM_T_82545_3, WMP_F_SERDES }, 581 #endif 582 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER, 583 "Intel i82546EB 1000BASE-T Ethernet", 584 WM_T_82546, WMP_F_1000T }, 585 586 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD, 587 "Intel i82546EB 1000BASE-T Ethernet", 588 WM_T_82546, WMP_F_1000T }, 589 590 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER, 591 "Intel i82545EM 1000BASE-X Ethernet", 592 WM_T_82545, WMP_F_1000X }, 593 594 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER, 595 "Intel i82546EB 1000BASE-X Ethernet", 596 WM_T_82546, WMP_F_1000X }, 597 598 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER, 599 "Intel i82546GB 1000BASE-T Ethernet", 600 WM_T_82546_3, WMP_F_1000T }, 601 602 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER, 603 "Intel i82546GB 1000BASE-X Ethernet", 604 WM_T_82546_3, WMP_F_1000X }, 605 #if 0 606 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES, 607 "Intel i82546GB Gigabit Ethernet (SERDES)", 608 WM_T_82546_3, WMP_F_SERDES }, 609 #endif 610 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI, 611 "Intel i82541EI 1000BASE-T Ethernet", 612 WM_T_82541, WMP_F_1000T }, 613 614 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE, 615 "Intel i82541EI Mobile 1000BASE-T Ethernet", 616 WM_T_82541, WMP_F_1000T }, 617 618 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER, 619 "Intel i82541ER 1000BASE-T Ethernet", 620 WM_T_82541_2, WMP_F_1000T }, 621 622 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI, 623 "Intel i82541GI 1000BASE-T Ethernet", 624 WM_T_82541_2, WMP_F_1000T }, 625 626 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE, 627 "Intel i82541GI Mobile 1000BASE-T Ethernet", 628 WM_T_82541_2, WMP_F_1000T }, 629 630 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI, 631 "Intel i82547EI 1000BASE-T Ethernet", 632 WM_T_82547, WMP_F_1000T }, 633 634 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI, 635 "Intel i82547GI 1000BASE-T Ethernet", 636 WM_T_82547_2, WMP_F_1000T }, 637 { 0, 0, 638 NULL, 639 0, 0 }, 640 }; 641 642 #ifdef WM_EVENT_COUNTERS 643 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")]; 644 #endif /* WM_EVENT_COUNTERS */ 645 646 #if 0 /* Not currently used */ 647 static __inline uint32_t 648 wm_io_read(struct wm_softc *sc, int reg) 649 { 650 651 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 652 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4)); 653 } 654 #endif 655 656 static __inline void 657 wm_io_write(struct wm_softc *sc, int reg, uint32_t val) 658 { 659 660 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 661 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val); 662 } 663 664 static __inline void 665 wm_set_dma_addr(__volatile wiseman_addr_t *wa, bus_addr_t v) 666 { 667 wa->wa_low = htole32(v & 0xffffffffU); 668 if (sizeof(bus_addr_t) == 8) 669 wa->wa_high = htole32((uint64_t) v >> 32); 670 else 671 wa->wa_high = 0; 672 } 673 674 static const struct wm_product * 675 wm_lookup(const struct pci_attach_args *pa) 676 { 677 const struct wm_product *wmp; 678 679 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) { 680 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor && 681 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product) 682 return (wmp); 683 } 684 return (NULL); 685 } 686 687 static int 688 wm_match(struct device *parent, struct cfdata *cf, void *aux) 689 { 690 struct pci_attach_args *pa = aux; 691 692 if (wm_lookup(pa) != NULL) 693 return (1); 694 695 return (0); 696 } 697 698 static void 699 wm_attach(struct device *parent, struct device *self, void *aux) 700 { 701 struct wm_softc *sc = (void *) self; 702 struct pci_attach_args *pa = aux; 703 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 704 pci_chipset_tag_t pc = pa->pa_pc; 705 pci_intr_handle_t ih; 706 size_t cdata_size; 707 const char *intrstr = NULL; 708 const char *eetype; 709 bus_space_tag_t memt; 710 bus_space_handle_t memh; 711 bus_dma_segment_t seg; 712 int memh_valid; 713 int i, rseg, error; 714 const struct wm_product *wmp; 715 uint8_t enaddr[ETHER_ADDR_LEN]; 716 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin; 717 pcireg_t preg, memtype; 718 uint32_t reg; 719 int pmreg; 720 721 callout_init(&sc->sc_tick_ch); 722 723 wmp = wm_lookup(pa); 724 if (wmp == NULL) { 725 printf("\n"); 726 panic("wm_attach: impossible"); 727 } 728 729 if (pci_dma64_available(pa)) 730 sc->sc_dmat = pa->pa_dmat64; 731 else 732 sc->sc_dmat = pa->pa_dmat; 733 734 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG)); 735 aprint_naive(": Ethernet controller\n"); 736 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg); 737 738 sc->sc_type = wmp->wmp_type; 739 if (sc->sc_type < WM_T_82543) { 740 if (preg < 2) { 741 aprint_error("%s: i82542 must be at least rev. 2\n", 742 sc->sc_dev.dv_xname); 743 return; 744 } 745 if (preg < 3) 746 sc->sc_type = WM_T_82542_2_0; 747 } 748 749 /* 750 * Map the device. All devices support memory-mapped acccess, 751 * and it is really required for normal operation. 752 */ 753 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA); 754 switch (memtype) { 755 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 756 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 757 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA, 758 memtype, 0, &memt, &memh, NULL, NULL) == 0); 759 break; 760 default: 761 memh_valid = 0; 762 } 763 764 if (memh_valid) { 765 sc->sc_st = memt; 766 sc->sc_sh = memh; 767 } else { 768 aprint_error("%s: unable to map device registers\n", 769 sc->sc_dev.dv_xname); 770 return; 771 } 772 773 /* 774 * In addition, i82544 and later support I/O mapped indirect 775 * register access. It is not desirable (nor supported in 776 * this driver) to use it for normal operation, though it is 777 * required to work around bugs in some chip versions. 778 */ 779 if (sc->sc_type >= WM_T_82544) { 780 /* First we have to find the I/O BAR. */ 781 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) { 782 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) == 783 PCI_MAPREG_TYPE_IO) 784 break; 785 } 786 if (i == PCI_MAPREG_END) 787 aprint_error("%s: WARNING: unable to find I/O BAR\n", 788 sc->sc_dev.dv_xname); 789 else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO, 790 0, &sc->sc_iot, &sc->sc_ioh, 791 NULL, NULL) == 0) 792 sc->sc_flags |= WM_F_IOH_VALID; 793 else 794 aprint_error("%s: WARNING: unable to map I/O space\n", 795 sc->sc_dev.dv_xname); 796 } 797 798 /* Enable bus mastering. Disable MWI on the i82542 2.0. */ 799 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 800 preg |= PCI_COMMAND_MASTER_ENABLE; 801 if (sc->sc_type < WM_T_82542_2_1) 802 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE; 803 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg); 804 805 /* Get it out of power save mode, if needed. */ 806 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) { 807 preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) & 808 PCI_PMCSR_STATE_MASK; 809 if (preg == PCI_PMCSR_STATE_D3) { 810 /* 811 * The card has lost all configuration data in 812 * this state, so punt. 813 */ 814 aprint_error("%s: unable to wake from power state D3\n", 815 sc->sc_dev.dv_xname); 816 return; 817 } 818 if (preg != PCI_PMCSR_STATE_D0) { 819 aprint_normal("%s: waking up from power state D%d\n", 820 sc->sc_dev.dv_xname, preg); 821 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR, 822 PCI_PMCSR_STATE_D0); 823 } 824 } 825 826 /* 827 * Map and establish our interrupt. 828 */ 829 if (pci_intr_map(pa, &ih)) { 830 aprint_error("%s: unable to map interrupt\n", 831 sc->sc_dev.dv_xname); 832 return; 833 } 834 intrstr = pci_intr_string(pc, ih); 835 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc); 836 if (sc->sc_ih == NULL) { 837 aprint_error("%s: unable to establish interrupt", 838 sc->sc_dev.dv_xname); 839 if (intrstr != NULL) 840 aprint_normal(" at %s", intrstr); 841 aprint_normal("\n"); 842 return; 843 } 844 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr); 845 846 /* 847 * Determine a few things about the bus we're connected to. 848 */ 849 if (sc->sc_type < WM_T_82543) { 850 /* We don't really know the bus characteristics here. */ 851 sc->sc_bus_speed = 33; 852 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) { 853 /* 854 * CSA (Communication Streaming Architecture) is about as fast 855 * a 32-bit 66MHz PCI Bus. 856 */ 857 sc->sc_flags |= WM_F_CSA; 858 sc->sc_bus_speed = 66; 859 aprint_verbose("%s: Communication Streaming Architecture\n", 860 sc->sc_dev.dv_xname); 861 if (sc->sc_type == WM_T_82547) { 862 callout_init(&sc->sc_txfifo_ch); 863 callout_setfunc(&sc->sc_txfifo_ch, 864 wm_82547_txfifo_stall, sc); 865 aprint_verbose("%s: using 82547 Tx FIFO stall " 866 "work-around\n", sc->sc_dev.dv_xname); 867 } 868 } else { 869 reg = CSR_READ(sc, WMREG_STATUS); 870 if (reg & STATUS_BUS64) 871 sc->sc_flags |= WM_F_BUS64; 872 if (sc->sc_type >= WM_T_82544 && 873 (reg & STATUS_PCIX_MODE) != 0) { 874 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb; 875 876 sc->sc_flags |= WM_F_PCIX; 877 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 878 PCI_CAP_PCIX, 879 &sc->sc_pcix_offset, NULL) == 0) 880 aprint_error("%s: unable to find PCIX " 881 "capability\n", sc->sc_dev.dv_xname); 882 else if (sc->sc_type != WM_T_82545_3 && 883 sc->sc_type != WM_T_82546_3) { 884 /* 885 * Work around a problem caused by the BIOS 886 * setting the max memory read byte count 887 * incorrectly. 888 */ 889 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, 890 sc->sc_pcix_offset + PCI_PCIX_CMD); 891 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag, 892 sc->sc_pcix_offset + PCI_PCIX_STATUS); 893 894 bytecnt = 895 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >> 896 PCI_PCIX_CMD_BYTECNT_SHIFT; 897 maxb = 898 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >> 899 PCI_PCIX_STATUS_MAXB_SHIFT; 900 if (bytecnt > maxb) { 901 aprint_verbose("%s: resetting PCI-X " 902 "MMRBC: %d -> %d\n", 903 sc->sc_dev.dv_xname, 904 512 << bytecnt, 512 << maxb); 905 pcix_cmd = (pcix_cmd & 906 ~PCI_PCIX_CMD_BYTECNT_MASK) | 907 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT); 908 pci_conf_write(pa->pa_pc, pa->pa_tag, 909 sc->sc_pcix_offset + PCI_PCIX_CMD, 910 pcix_cmd); 911 } 912 } 913 } 914 /* 915 * The quad port adapter is special; it has a PCIX-PCIX 916 * bridge on the board, and can run the secondary bus at 917 * a higher speed. 918 */ 919 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) { 920 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120 921 : 66; 922 } else if (sc->sc_flags & WM_F_PCIX) { 923 switch (reg & STATUS_PCIXSPD_MASK) { 924 case STATUS_PCIXSPD_50_66: 925 sc->sc_bus_speed = 66; 926 break; 927 case STATUS_PCIXSPD_66_100: 928 sc->sc_bus_speed = 100; 929 break; 930 case STATUS_PCIXSPD_100_133: 931 sc->sc_bus_speed = 133; 932 break; 933 default: 934 aprint_error( 935 "%s: unknown PCIXSPD %d; assuming 66MHz\n", 936 sc->sc_dev.dv_xname, 937 reg & STATUS_PCIXSPD_MASK); 938 sc->sc_bus_speed = 66; 939 } 940 } else 941 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33; 942 aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname, 943 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed, 944 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI"); 945 } 946 947 /* 948 * Allocate the control data structures, and create and load the 949 * DMA map for it. 950 * 951 * NOTE: All Tx descriptors must be in the same 4G segment of 952 * memory. So must Rx descriptors. We simplify by allocating 953 * both sets within the same 4G segment. 954 */ 955 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ? 956 WM_NTXDESC_82542 : WM_NTXDESC_82544; 957 cdata_size = sc->sc_type < WM_T_82544 ? 958 sizeof(struct wm_control_data_82542) : 959 sizeof(struct wm_control_data_82544); 960 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE, 961 (bus_size_t) 0x100000000ULL, 962 &seg, 1, &rseg, 0)) != 0) { 963 aprint_error( 964 "%s: unable to allocate control data, error = %d\n", 965 sc->sc_dev.dv_xname, error); 966 goto fail_0; 967 } 968 969 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size, 970 (caddr_t *)&sc->sc_control_data, 0)) != 0) { 971 aprint_error("%s: unable to map control data, error = %d\n", 972 sc->sc_dev.dv_xname, error); 973 goto fail_1; 974 } 975 976 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size, 977 0, 0, &sc->sc_cddmamap)) != 0) { 978 aprint_error("%s: unable to create control data DMA map, " 979 "error = %d\n", sc->sc_dev.dv_xname, error); 980 goto fail_2; 981 } 982 983 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 984 sc->sc_control_data, cdata_size, NULL, 985 0)) != 0) { 986 aprint_error( 987 "%s: unable to load control data DMA map, error = %d\n", 988 sc->sc_dev.dv_xname, error); 989 goto fail_3; 990 } 991 992 993 /* 994 * Create the transmit buffer DMA maps. 995 */ 996 WM_TXQUEUELEN(sc) = 997 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ? 998 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX; 999 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 1000 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA, 1001 WM_NTXSEGS, WTX_MAX_LEN, 0, 0, 1002 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 1003 aprint_error("%s: unable to create Tx DMA map %d, " 1004 "error = %d\n", sc->sc_dev.dv_xname, i, error); 1005 goto fail_4; 1006 } 1007 } 1008 1009 /* 1010 * Create the receive buffer DMA maps. 1011 */ 1012 for (i = 0; i < WM_NRXDESC; i++) { 1013 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1014 MCLBYTES, 0, 0, 1015 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 1016 aprint_error("%s: unable to create Rx DMA map %d, " 1017 "error = %d\n", sc->sc_dev.dv_xname, i, error); 1018 goto fail_5; 1019 } 1020 sc->sc_rxsoft[i].rxs_mbuf = NULL; 1021 } 1022 1023 /* 1024 * Reset the chip to a known state. 1025 */ 1026 wm_reset(sc); 1027 1028 /* 1029 * Get some information about the EEPROM. 1030 */ 1031 if (sc->sc_type >= WM_T_82540) 1032 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE; 1033 if (sc->sc_type <= WM_T_82544) 1034 sc->sc_ee_addrbits = 6; 1035 else if (sc->sc_type <= WM_T_82546_3) { 1036 reg = CSR_READ(sc, WMREG_EECD); 1037 if (reg & EECD_EE_SIZE) 1038 sc->sc_ee_addrbits = 8; 1039 else 1040 sc->sc_ee_addrbits = 6; 1041 } else if (sc->sc_type <= WM_T_82547_2) { 1042 reg = CSR_READ(sc, WMREG_EECD); 1043 if (reg & EECD_EE_TYPE) { 1044 sc->sc_flags |= WM_F_EEPROM_SPI; 1045 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8; 1046 } else 1047 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6; 1048 } else { 1049 /* Assume everything else is SPI. */ 1050 reg = CSR_READ(sc, WMREG_EECD); 1051 sc->sc_flags |= WM_F_EEPROM_SPI; 1052 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8; 1053 } 1054 if (sc->sc_flags & WM_F_EEPROM_SPI) 1055 eetype = "SPI"; 1056 else 1057 eetype = "MicroWire"; 1058 aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n", 1059 sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits, 1060 sc->sc_ee_addrbits, eetype); 1061 1062 /* 1063 * Read the Ethernet address from the EEPROM. 1064 */ 1065 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR, 1066 sizeof(myea) / sizeof(myea[0]), myea)) { 1067 aprint_error("%s: unable to read Ethernet address\n", 1068 sc->sc_dev.dv_xname); 1069 return; 1070 } 1071 enaddr[0] = myea[0] & 0xff; 1072 enaddr[1] = myea[0] >> 8; 1073 enaddr[2] = myea[1] & 0xff; 1074 enaddr[3] = myea[1] >> 8; 1075 enaddr[4] = myea[2] & 0xff; 1076 enaddr[5] = myea[2] >> 8; 1077 1078 /* 1079 * Toggle the LSB of the MAC address on the second port 1080 * of the i82546. 1081 */ 1082 if (sc->sc_type == WM_T_82546) { 1083 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1) 1084 enaddr[5] ^= 1; 1085 } 1086 1087 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname, 1088 ether_sprintf(enaddr)); 1089 1090 /* 1091 * Read the config info from the EEPROM, and set up various 1092 * bits in the control registers based on their contents. 1093 */ 1094 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) { 1095 aprint_error("%s: unable to read CFG1 from EEPROM\n", 1096 sc->sc_dev.dv_xname); 1097 return; 1098 } 1099 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) { 1100 aprint_error("%s: unable to read CFG2 from EEPROM\n", 1101 sc->sc_dev.dv_xname); 1102 return; 1103 } 1104 if (sc->sc_type >= WM_T_82544) { 1105 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) { 1106 aprint_error("%s: unable to read SWDPIN from EEPROM\n", 1107 sc->sc_dev.dv_xname); 1108 return; 1109 } 1110 } 1111 1112 if (cfg1 & EEPROM_CFG1_ILOS) 1113 sc->sc_ctrl |= CTRL_ILOS; 1114 if (sc->sc_type >= WM_T_82544) { 1115 sc->sc_ctrl |= 1116 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) << 1117 CTRL_SWDPIO_SHIFT; 1118 sc->sc_ctrl |= 1119 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) << 1120 CTRL_SWDPINS_SHIFT; 1121 } else { 1122 sc->sc_ctrl |= 1123 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) << 1124 CTRL_SWDPIO_SHIFT; 1125 } 1126 1127 #if 0 1128 if (sc->sc_type >= WM_T_82544) { 1129 if (cfg1 & EEPROM_CFG1_IPS0) 1130 sc->sc_ctrl_ext |= CTRL_EXT_IPS; 1131 if (cfg1 & EEPROM_CFG1_IPS1) 1132 sc->sc_ctrl_ext |= CTRL_EXT_IPS1; 1133 sc->sc_ctrl_ext |= 1134 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) << 1135 CTRL_EXT_SWDPIO_SHIFT; 1136 sc->sc_ctrl_ext |= 1137 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) << 1138 CTRL_EXT_SWDPINS_SHIFT; 1139 } else { 1140 sc->sc_ctrl_ext |= 1141 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) << 1142 CTRL_EXT_SWDPIO_SHIFT; 1143 } 1144 #endif 1145 1146 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 1147 #if 0 1148 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 1149 #endif 1150 1151 /* 1152 * Set up some register offsets that are different between 1153 * the i82542 and the i82543 and later chips. 1154 */ 1155 if (sc->sc_type < WM_T_82543) { 1156 sc->sc_rdt_reg = WMREG_OLD_RDT0; 1157 sc->sc_tdt_reg = WMREG_OLD_TDT; 1158 } else { 1159 sc->sc_rdt_reg = WMREG_RDT; 1160 sc->sc_tdt_reg = WMREG_TDT; 1161 } 1162 1163 /* 1164 * Determine if we're TBI or GMII mode, and initialize the 1165 * media structures accordingly. 1166 */ 1167 if (sc->sc_type < WM_T_82543 || 1168 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) { 1169 if (wmp->wmp_flags & WMP_F_1000T) 1170 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T " 1171 "product!\n", sc->sc_dev.dv_xname); 1172 wm_tbi_mediainit(sc); 1173 } else { 1174 if (wmp->wmp_flags & WMP_F_1000X) 1175 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X " 1176 "product!\n", sc->sc_dev.dv_xname); 1177 wm_gmii_mediainit(sc); 1178 } 1179 1180 ifp = &sc->sc_ethercom.ec_if; 1181 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 1182 ifp->if_softc = sc; 1183 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1184 ifp->if_ioctl = wm_ioctl; 1185 ifp->if_start = wm_start; 1186 ifp->if_watchdog = wm_watchdog; 1187 ifp->if_init = wm_init; 1188 ifp->if_stop = wm_stop; 1189 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN)); 1190 IFQ_SET_READY(&ifp->if_snd); 1191 1192 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1193 1194 /* 1195 * If we're a i82543 or greater, we can support VLANs. 1196 */ 1197 if (sc->sc_type >= WM_T_82543) 1198 sc->sc_ethercom.ec_capabilities |= 1199 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */; 1200 1201 /* 1202 * We can perform TCPv4 and UDPv4 checkums in-bound. Only 1203 * on i82543 and later. 1204 */ 1205 if (sc->sc_type >= WM_T_82543) 1206 ifp->if_capabilities |= 1207 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4; 1208 1209 /* 1210 * Attach the interface. 1211 */ 1212 if_attach(ifp); 1213 ether_ifattach(ifp, enaddr); 1214 #if NRND > 0 1215 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname, 1216 RND_TYPE_NET, 0); 1217 #endif 1218 1219 #ifdef WM_EVENT_COUNTERS 1220 /* Attach event counters. */ 1221 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC, 1222 NULL, sc->sc_dev.dv_xname, "txsstall"); 1223 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC, 1224 NULL, sc->sc_dev.dv_xname, "txdstall"); 1225 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC, 1226 NULL, sc->sc_dev.dv_xname, "txfifo_stall"); 1227 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR, 1228 NULL, sc->sc_dev.dv_xname, "txdw"); 1229 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR, 1230 NULL, sc->sc_dev.dv_xname, "txqe"); 1231 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR, 1232 NULL, sc->sc_dev.dv_xname, "rxintr"); 1233 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR, 1234 NULL, sc->sc_dev.dv_xname, "linkintr"); 1235 1236 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC, 1237 NULL, sc->sc_dev.dv_xname, "rxipsum"); 1238 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC, 1239 NULL, sc->sc_dev.dv_xname, "rxtusum"); 1240 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC, 1241 NULL, sc->sc_dev.dv_xname, "txipsum"); 1242 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC, 1243 NULL, sc->sc_dev.dv_xname, "txtusum"); 1244 1245 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC, 1246 NULL, sc->sc_dev.dv_xname, "txctx init"); 1247 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC, 1248 NULL, sc->sc_dev.dv_xname, "txctx hit"); 1249 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC, 1250 NULL, sc->sc_dev.dv_xname, "txctx miss"); 1251 1252 for (i = 0; i < WM_NTXSEGS; i++) { 1253 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i); 1254 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC, 1255 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]); 1256 } 1257 1258 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC, 1259 NULL, sc->sc_dev.dv_xname, "txdrop"); 1260 1261 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC, 1262 NULL, sc->sc_dev.dv_xname, "tu"); 1263 1264 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC, 1265 NULL, sc->sc_dev.dv_xname, "tx_xoff"); 1266 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC, 1267 NULL, sc->sc_dev.dv_xname, "tx_xon"); 1268 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC, 1269 NULL, sc->sc_dev.dv_xname, "rx_xoff"); 1270 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC, 1271 NULL, sc->sc_dev.dv_xname, "rx_xon"); 1272 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC, 1273 NULL, sc->sc_dev.dv_xname, "rx_macctl"); 1274 #endif /* WM_EVENT_COUNTERS */ 1275 1276 /* 1277 * Make sure the interface is shutdown during reboot. 1278 */ 1279 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc); 1280 if (sc->sc_sdhook == NULL) 1281 aprint_error("%s: WARNING: unable to establish shutdown hook\n", 1282 sc->sc_dev.dv_xname); 1283 return; 1284 1285 /* 1286 * Free any resources we've allocated during the failed attach 1287 * attempt. Do this in reverse order and fall through. 1288 */ 1289 fail_5: 1290 for (i = 0; i < WM_NRXDESC; i++) { 1291 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 1292 bus_dmamap_destroy(sc->sc_dmat, 1293 sc->sc_rxsoft[i].rxs_dmamap); 1294 } 1295 fail_4: 1296 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 1297 if (sc->sc_txsoft[i].txs_dmamap != NULL) 1298 bus_dmamap_destroy(sc->sc_dmat, 1299 sc->sc_txsoft[i].txs_dmamap); 1300 } 1301 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 1302 fail_3: 1303 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 1304 fail_2: 1305 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data, 1306 cdata_size); 1307 fail_1: 1308 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1309 fail_0: 1310 return; 1311 } 1312 1313 /* 1314 * wm_shutdown: 1315 * 1316 * Make sure the interface is stopped at reboot time. 1317 */ 1318 static void 1319 wm_shutdown(void *arg) 1320 { 1321 struct wm_softc *sc = arg; 1322 1323 wm_stop(&sc->sc_ethercom.ec_if, 1); 1324 } 1325 1326 /* 1327 * wm_tx_cksum: 1328 * 1329 * Set up TCP/IP checksumming parameters for the 1330 * specified packet. 1331 */ 1332 static int 1333 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp, 1334 uint8_t *fieldsp) 1335 { 1336 struct mbuf *m0 = txs->txs_mbuf; 1337 struct livengood_tcpip_ctxdesc *t; 1338 uint32_t ipcs, tucs; 1339 struct ip *ip; 1340 struct ether_header *eh; 1341 int offset, iphl; 1342 uint8_t fields = 0; 1343 1344 /* 1345 * XXX It would be nice if the mbuf pkthdr had offset 1346 * fields for the protocol headers. 1347 */ 1348 1349 eh = mtod(m0, struct ether_header *); 1350 switch (htons(eh->ether_type)) { 1351 case ETHERTYPE_IP: 1352 iphl = sizeof(struct ip); 1353 offset = ETHER_HDR_LEN; 1354 break; 1355 1356 case ETHERTYPE_VLAN: 1357 iphl = sizeof(struct ip); 1358 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1359 break; 1360 1361 default: 1362 /* 1363 * Don't support this protocol or encapsulation. 1364 */ 1365 *fieldsp = 0; 1366 *cmdp = 0; 1367 return (0); 1368 } 1369 1370 if (m0->m_len < (offset + iphl)) { 1371 if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) { 1372 printf("%s: wm_tx_cksum: mbuf allocation failed, " 1373 "packet dropped\n", sc->sc_dev.dv_xname); 1374 return (ENOMEM); 1375 } 1376 m0 = txs->txs_mbuf; 1377 } 1378 1379 ip = (struct ip *) (mtod(m0, caddr_t) + offset); 1380 iphl = ip->ip_hl << 2; 1381 1382 /* 1383 * NOTE: Even if we're not using the IP or TCP/UDP checksum 1384 * offload feature, if we load the context descriptor, we 1385 * MUST provide valid values for IPCSS and TUCSS fields. 1386 */ 1387 1388 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) { 1389 WM_EVCNT_INCR(&sc->sc_ev_txipsum); 1390 fields |= WTX_IXSM; 1391 ipcs = WTX_TCPIP_IPCSS(offset) | 1392 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) | 1393 WTX_TCPIP_IPCSE(offset + iphl - 1); 1394 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) { 1395 /* Use the cached value. */ 1396 ipcs = sc->sc_txctx_ipcs; 1397 } else { 1398 /* Just initialize it to the likely value anyway. */ 1399 ipcs = WTX_TCPIP_IPCSS(offset) | 1400 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) | 1401 WTX_TCPIP_IPCSE(offset + iphl - 1); 1402 } 1403 1404 offset += iphl; 1405 1406 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) { 1407 WM_EVCNT_INCR(&sc->sc_ev_txtusum); 1408 fields |= WTX_TXSM; 1409 tucs = WTX_TCPIP_TUCSS(offset) | 1410 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) | 1411 WTX_TCPIP_TUCSE(0) /* rest of packet */; 1412 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) { 1413 /* Use the cached value. */ 1414 tucs = sc->sc_txctx_tucs; 1415 } else { 1416 /* Just initialize it to a valid TCP context. */ 1417 tucs = WTX_TCPIP_TUCSS(offset) | 1418 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) | 1419 WTX_TCPIP_TUCSE(0) /* rest of packet */; 1420 } 1421 1422 if (sc->sc_txctx_ipcs == ipcs && 1423 sc->sc_txctx_tucs == tucs) { 1424 /* Cached context is fine. */ 1425 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit); 1426 } else { 1427 /* Fill in the context descriptor. */ 1428 #ifdef WM_EVENT_COUNTERS 1429 if (sc->sc_txctx_ipcs == 0xffffffff && 1430 sc->sc_txctx_tucs == 0xffffffff) 1431 WM_EVCNT_INCR(&sc->sc_ev_txctx_init); 1432 else 1433 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss); 1434 #endif 1435 t = (struct livengood_tcpip_ctxdesc *) 1436 &sc->sc_txdescs[sc->sc_txnext]; 1437 t->tcpip_ipcs = htole32(ipcs); 1438 t->tcpip_tucs = htole32(tucs); 1439 t->tcpip_cmdlen = htole32(WTX_CMD_DEXT | WTX_DTYP_C); 1440 t->tcpip_seg = 0; 1441 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE); 1442 1443 sc->sc_txctx_ipcs = ipcs; 1444 sc->sc_txctx_tucs = tucs; 1445 1446 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext); 1447 txs->txs_ndesc++; 1448 } 1449 1450 *cmdp = WTX_CMD_DEXT | WTX_DTYP_D; 1451 *fieldsp = fields; 1452 1453 return (0); 1454 } 1455 1456 static void 1457 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0) 1458 { 1459 struct mbuf *m; 1460 int i; 1461 1462 printf("%s: mbuf chain:\n", sc->sc_dev.dv_xname); 1463 for (m = m0, i = 0; m != NULL; m = m->m_next, i++) 1464 printf("\tm_data = %p, m_len = %d, m_flags = 0x%08x\n", 1465 m->m_data, m->m_len, m->m_flags); 1466 printf("\t%d mbuf%s in chain\n", i, i == 1 ? "" : "s"); 1467 } 1468 1469 /* 1470 * wm_82547_txfifo_stall: 1471 * 1472 * Callout used to wait for the 82547 Tx FIFO to drain, 1473 * reset the FIFO pointers, and restart packet transmission. 1474 */ 1475 static void 1476 wm_82547_txfifo_stall(void *arg) 1477 { 1478 struct wm_softc *sc = arg; 1479 int s; 1480 1481 s = splnet(); 1482 1483 if (sc->sc_txfifo_stall) { 1484 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) && 1485 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) && 1486 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) { 1487 /* 1488 * Packets have drained. Stop transmitter, reset 1489 * FIFO pointers, restart transmitter, and kick 1490 * the packet queue. 1491 */ 1492 uint32_t tctl = CSR_READ(sc, WMREG_TCTL); 1493 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN); 1494 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr); 1495 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr); 1496 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr); 1497 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr); 1498 CSR_WRITE(sc, WMREG_TCTL, tctl); 1499 CSR_WRITE_FLUSH(sc); 1500 1501 sc->sc_txfifo_head = 0; 1502 sc->sc_txfifo_stall = 0; 1503 wm_start(&sc->sc_ethercom.ec_if); 1504 } else { 1505 /* 1506 * Still waiting for packets to drain; try again in 1507 * another tick. 1508 */ 1509 callout_schedule(&sc->sc_txfifo_ch, 1); 1510 } 1511 } 1512 1513 splx(s); 1514 } 1515 1516 /* 1517 * wm_82547_txfifo_bugchk: 1518 * 1519 * Check for bug condition in the 82547 Tx FIFO. We need to 1520 * prevent enqueueing a packet that would wrap around the end 1521 * if the Tx FIFO ring buffer, otherwise the chip will croak. 1522 * 1523 * We do this by checking the amount of space before the end 1524 * of the Tx FIFO buffer. If the packet will not fit, we "stall" 1525 * the Tx FIFO, wait for all remaining packets to drain, reset 1526 * the internal FIFO pointers to the beginning, and restart 1527 * transmission on the interface. 1528 */ 1529 #define WM_FIFO_HDR 0x10 1530 #define WM_82547_PAD_LEN 0x3e0 1531 static int 1532 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0) 1533 { 1534 int space = sc->sc_txfifo_size - sc->sc_txfifo_head; 1535 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR); 1536 1537 /* Just return if already stalled. */ 1538 if (sc->sc_txfifo_stall) 1539 return (1); 1540 1541 if (sc->sc_mii.mii_media_active & IFM_FDX) { 1542 /* Stall only occurs in half-duplex mode. */ 1543 goto send_packet; 1544 } 1545 1546 if (len >= WM_82547_PAD_LEN + space) { 1547 sc->sc_txfifo_stall = 1; 1548 callout_schedule(&sc->sc_txfifo_ch, 1); 1549 return (1); 1550 } 1551 1552 send_packet: 1553 sc->sc_txfifo_head += len; 1554 if (sc->sc_txfifo_head >= sc->sc_txfifo_size) 1555 sc->sc_txfifo_head -= sc->sc_txfifo_size; 1556 1557 return (0); 1558 } 1559 1560 /* 1561 * wm_start: [ifnet interface function] 1562 * 1563 * Start packet transmission on the interface. 1564 */ 1565 static void 1566 wm_start(struct ifnet *ifp) 1567 { 1568 struct wm_softc *sc = ifp->if_softc; 1569 struct mbuf *m0; 1570 #if 0 /* XXXJRT */ 1571 struct m_tag *mtag; 1572 #endif 1573 struct wm_txsoft *txs; 1574 bus_dmamap_t dmamap; 1575 int error, nexttx, lasttx = -1, ofree, seg, segs_needed; 1576 bus_addr_t curaddr; 1577 bus_size_t seglen, curlen; 1578 uint32_t cksumcmd; 1579 uint8_t cksumfields; 1580 1581 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 1582 return; 1583 1584 /* 1585 * Remember the previous number of free descriptors. 1586 */ 1587 ofree = sc->sc_txfree; 1588 1589 /* 1590 * Loop through the send queue, setting up transmit descriptors 1591 * until we drain the queue, or use up all available transmit 1592 * descriptors. 1593 */ 1594 for (;;) { 1595 /* Grab a packet off the queue. */ 1596 IFQ_POLL(&ifp->if_snd, m0); 1597 if (m0 == NULL) 1598 break; 1599 1600 DPRINTF(WM_DEBUG_TX, 1601 ("%s: TX: have packet to transmit: %p\n", 1602 sc->sc_dev.dv_xname, m0)); 1603 1604 /* Get a work queue entry. */ 1605 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) { 1606 wm_txintr(sc); 1607 if (sc->sc_txsfree == 0) { 1608 DPRINTF(WM_DEBUG_TX, 1609 ("%s: TX: no free job descriptors\n", 1610 sc->sc_dev.dv_xname)); 1611 WM_EVCNT_INCR(&sc->sc_ev_txsstall); 1612 break; 1613 } 1614 } 1615 1616 txs = &sc->sc_txsoft[sc->sc_txsnext]; 1617 dmamap = txs->txs_dmamap; 1618 1619 /* 1620 * Load the DMA map. If this fails, the packet either 1621 * didn't fit in the allotted number of segments, or we 1622 * were short on resources. For the too-many-segments 1623 * case, we simply report an error and drop the packet, 1624 * since we can't sanely copy a jumbo packet to a single 1625 * buffer. 1626 */ 1627 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 1628 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1629 if (error) { 1630 if (error == EFBIG) { 1631 WM_EVCNT_INCR(&sc->sc_ev_txdrop); 1632 printf("%s: Tx packet consumes too many " 1633 "DMA segments, dropping...\n", 1634 sc->sc_dev.dv_xname); 1635 IFQ_DEQUEUE(&ifp->if_snd, m0); 1636 wm_dump_mbuf_chain(sc, m0); 1637 m_freem(m0); 1638 continue; 1639 } 1640 /* 1641 * Short on resources, just stop for now. 1642 */ 1643 DPRINTF(WM_DEBUG_TX, 1644 ("%s: TX: dmamap load failed: %d\n", 1645 sc->sc_dev.dv_xname, error)); 1646 break; 1647 } 1648 1649 segs_needed = dmamap->dm_nsegs; 1650 1651 /* 1652 * Ensure we have enough descriptors free to describe 1653 * the packet. Note, we always reserve one descriptor 1654 * at the end of the ring due to the semantics of the 1655 * TDT register, plus one more in the event we need 1656 * to re-load checksum offload context. 1657 */ 1658 if (segs_needed > sc->sc_txfree - 2) { 1659 /* 1660 * Not enough free descriptors to transmit this 1661 * packet. We haven't committed anything yet, 1662 * so just unload the DMA map, put the packet 1663 * pack on the queue, and punt. Notify the upper 1664 * layer that there are no more slots left. 1665 */ 1666 DPRINTF(WM_DEBUG_TX, 1667 ("%s: TX: need %d (%) descriptors, have %d\n", 1668 sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed, 1669 sc->sc_txfree - 1)); 1670 ifp->if_flags |= IFF_OACTIVE; 1671 bus_dmamap_unload(sc->sc_dmat, dmamap); 1672 WM_EVCNT_INCR(&sc->sc_ev_txdstall); 1673 break; 1674 } 1675 1676 /* 1677 * Check for 82547 Tx FIFO bug. We need to do this 1678 * once we know we can transmit the packet, since we 1679 * do some internal FIFO space accounting here. 1680 */ 1681 if (sc->sc_type == WM_T_82547 && 1682 wm_82547_txfifo_bugchk(sc, m0)) { 1683 DPRINTF(WM_DEBUG_TX, 1684 ("%s: TX: 82547 Tx FIFO bug detected\n", 1685 sc->sc_dev.dv_xname)); 1686 ifp->if_flags |= IFF_OACTIVE; 1687 bus_dmamap_unload(sc->sc_dmat, dmamap); 1688 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall); 1689 break; 1690 } 1691 1692 IFQ_DEQUEUE(&ifp->if_snd, m0); 1693 1694 /* 1695 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 1696 */ 1697 1698 DPRINTF(WM_DEBUG_TX, 1699 ("%s: TX: packet has %d (%d) DMA segments\n", 1700 sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed)); 1701 1702 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]); 1703 1704 /* 1705 * Store a pointer to the packet so that we can free it 1706 * later. 1707 * 1708 * Initially, we consider the number of descriptors the 1709 * packet uses the number of DMA segments. This may be 1710 * incremented by 1 if we do checksum offload (a descriptor 1711 * is used to set the checksum context). 1712 */ 1713 txs->txs_mbuf = m0; 1714 txs->txs_firstdesc = sc->sc_txnext; 1715 txs->txs_ndesc = segs_needed; 1716 1717 /* 1718 * Set up checksum offload parameters for 1719 * this packet. 1720 */ 1721 if (m0->m_pkthdr.csum_flags & 1722 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) { 1723 if (wm_tx_cksum(sc, txs, &cksumcmd, 1724 &cksumfields) != 0) { 1725 /* Error message already displayed. */ 1726 bus_dmamap_unload(sc->sc_dmat, dmamap); 1727 continue; 1728 } 1729 } else { 1730 cksumcmd = 0; 1731 cksumfields = 0; 1732 } 1733 1734 cksumcmd |= WTX_CMD_IDE; 1735 1736 /* Sync the DMA map. */ 1737 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 1738 BUS_DMASYNC_PREWRITE); 1739 1740 /* 1741 * Initialize the transmit descriptor. 1742 */ 1743 for (nexttx = sc->sc_txnext, seg = 0; 1744 seg < dmamap->dm_nsegs; seg++) { 1745 for (seglen = dmamap->dm_segs[seg].ds_len, 1746 curaddr = dmamap->dm_segs[seg].ds_addr; 1747 seglen != 0; 1748 curaddr += curlen, seglen -= curlen, 1749 nexttx = WM_NEXTTX(sc, nexttx)) { 1750 curlen = seglen; 1751 1752 wm_set_dma_addr( 1753 &sc->sc_txdescs[nexttx].wtx_addr, 1754 curaddr); 1755 sc->sc_txdescs[nexttx].wtx_cmdlen = 1756 htole32(cksumcmd | curlen); 1757 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 1758 0; 1759 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 1760 cksumfields; 1761 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0; 1762 lasttx = nexttx; 1763 1764 DPRINTF(WM_DEBUG_TX, 1765 ("%s: TX: desc %d: low 0x%08x, " 1766 "len 0x%04x\n", 1767 sc->sc_dev.dv_xname, nexttx, 1768 curaddr & 0xffffffffU, curlen, curlen)); 1769 } 1770 } 1771 1772 KASSERT(lasttx != -1); 1773 1774 /* 1775 * Set up the command byte on the last descriptor of 1776 * the packet. If we're in the interrupt delay window, 1777 * delay the interrupt. 1778 */ 1779 sc->sc_txdescs[lasttx].wtx_cmdlen |= 1780 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS); 1781 1782 #if 0 /* XXXJRT */ 1783 /* 1784 * If VLANs are enabled and the packet has a VLAN tag, set 1785 * up the descriptor to encapsulate the packet for us. 1786 * 1787 * This is only valid on the last descriptor of the packet. 1788 */ 1789 if (sc->sc_ethercom.ec_nvlans != 0 && 1790 (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) { 1791 sc->sc_txdescs[lasttx].wtx_cmdlen |= 1792 htole32(WTX_CMD_VLE); 1793 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan 1794 = htole16(*(u_int *)(mtag + 1) & 0xffff); 1795 } 1796 #endif /* XXXJRT */ 1797 1798 txs->txs_lastdesc = lasttx; 1799 1800 DPRINTF(WM_DEBUG_TX, 1801 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname, 1802 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen))); 1803 1804 /* Sync the descriptors we're using. */ 1805 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc, 1806 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1807 1808 /* Give the packet to the chip. */ 1809 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx); 1810 1811 DPRINTF(WM_DEBUG_TX, 1812 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx)); 1813 1814 DPRINTF(WM_DEBUG_TX, 1815 ("%s: TX: finished transmitting packet, job %d\n", 1816 sc->sc_dev.dv_xname, sc->sc_txsnext)); 1817 1818 /* Advance the tx pointer. */ 1819 sc->sc_txfree -= txs->txs_ndesc; 1820 sc->sc_txnext = nexttx; 1821 1822 sc->sc_txsfree--; 1823 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext); 1824 1825 #if NBPFILTER > 0 1826 /* Pass the packet to any BPF listeners. */ 1827 if (ifp->if_bpf) 1828 bpf_mtap(ifp->if_bpf, m0); 1829 #endif /* NBPFILTER > 0 */ 1830 } 1831 1832 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) { 1833 /* No more slots; notify upper layer. */ 1834 ifp->if_flags |= IFF_OACTIVE; 1835 } 1836 1837 if (sc->sc_txfree != ofree) { 1838 /* Set a watchdog timer in case the chip flakes out. */ 1839 ifp->if_timer = 5; 1840 } 1841 } 1842 1843 /* 1844 * wm_watchdog: [ifnet interface function] 1845 * 1846 * Watchdog timer handler. 1847 */ 1848 static void 1849 wm_watchdog(struct ifnet *ifp) 1850 { 1851 struct wm_softc *sc = ifp->if_softc; 1852 1853 /* 1854 * Since we're using delayed interrupts, sweep up 1855 * before we report an error. 1856 */ 1857 wm_txintr(sc); 1858 1859 if (sc->sc_txfree != WM_NTXDESC(sc)) { 1860 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n", 1861 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree, 1862 sc->sc_txnext); 1863 ifp->if_oerrors++; 1864 1865 /* Reset the interface. */ 1866 (void) wm_init(ifp); 1867 } 1868 1869 /* Try to get more packets going. */ 1870 wm_start(ifp); 1871 } 1872 1873 /* 1874 * wm_ioctl: [ifnet interface function] 1875 * 1876 * Handle control requests from the operator. 1877 */ 1878 static int 1879 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1880 { 1881 struct wm_softc *sc = ifp->if_softc; 1882 struct ifreq *ifr = (struct ifreq *) data; 1883 int s, error; 1884 1885 s = splnet(); 1886 1887 switch (cmd) { 1888 case SIOCSIFMEDIA: 1889 case SIOCGIFMEDIA: 1890 /* Flow control requires full-duplex mode. */ 1891 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1892 (ifr->ifr_media & IFM_FDX) == 0) 1893 ifr->ifr_media &= ~IFM_ETH_FMASK; 1894 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1895 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1896 /* We can do both TXPAUSE and RXPAUSE. */ 1897 ifr->ifr_media |= 1898 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1899 } 1900 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 1901 } 1902 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 1903 break; 1904 default: 1905 error = ether_ioctl(ifp, cmd, data); 1906 if (error == ENETRESET) { 1907 /* 1908 * Multicast list has changed; set the hardware filter 1909 * accordingly. 1910 */ 1911 if (ifp->if_flags & IFF_RUNNING) 1912 wm_set_filter(sc); 1913 error = 0; 1914 } 1915 break; 1916 } 1917 1918 /* Try to get more packets going. */ 1919 wm_start(ifp); 1920 1921 splx(s); 1922 return (error); 1923 } 1924 1925 /* 1926 * wm_intr: 1927 * 1928 * Interrupt service routine. 1929 */ 1930 static int 1931 wm_intr(void *arg) 1932 { 1933 struct wm_softc *sc = arg; 1934 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1935 uint32_t icr; 1936 int wantinit, handled = 0; 1937 1938 for (wantinit = 0; wantinit == 0;) { 1939 icr = CSR_READ(sc, WMREG_ICR); 1940 if ((icr & sc->sc_icr) == 0) 1941 break; 1942 1943 #if 0 /*NRND > 0*/ 1944 if (RND_ENABLED(&sc->rnd_source)) 1945 rnd_add_uint32(&sc->rnd_source, icr); 1946 #endif 1947 1948 handled = 1; 1949 1950 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 1951 if (icr & (ICR_RXDMT0|ICR_RXT0)) { 1952 DPRINTF(WM_DEBUG_RX, 1953 ("%s: RX: got Rx intr 0x%08x\n", 1954 sc->sc_dev.dv_xname, 1955 icr & (ICR_RXDMT0|ICR_RXT0))); 1956 WM_EVCNT_INCR(&sc->sc_ev_rxintr); 1957 } 1958 #endif 1959 wm_rxintr(sc); 1960 1961 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 1962 if (icr & ICR_TXDW) { 1963 DPRINTF(WM_DEBUG_TX, 1964 ("%s: TX: got TXDW interrupt\n", 1965 sc->sc_dev.dv_xname)); 1966 WM_EVCNT_INCR(&sc->sc_ev_txdw); 1967 } 1968 #endif 1969 wm_txintr(sc); 1970 1971 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) { 1972 WM_EVCNT_INCR(&sc->sc_ev_linkintr); 1973 wm_linkintr(sc, icr); 1974 } 1975 1976 if (icr & ICR_RXO) { 1977 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname); 1978 wantinit = 1; 1979 } 1980 } 1981 1982 if (handled) { 1983 if (wantinit) 1984 wm_init(ifp); 1985 1986 /* Try to get more packets going. */ 1987 wm_start(ifp); 1988 } 1989 1990 return (handled); 1991 } 1992 1993 /* 1994 * wm_txintr: 1995 * 1996 * Helper; handle transmit interrupts. 1997 */ 1998 static void 1999 wm_txintr(struct wm_softc *sc) 2000 { 2001 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2002 struct wm_txsoft *txs; 2003 uint8_t status; 2004 int i; 2005 2006 ifp->if_flags &= ~IFF_OACTIVE; 2007 2008 /* 2009 * Go through the Tx list and free mbufs for those 2010 * frames which have been transmitted. 2011 */ 2012 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc); 2013 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) { 2014 txs = &sc->sc_txsoft[i]; 2015 2016 DPRINTF(WM_DEBUG_TX, 2017 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i)); 2018 2019 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc, 2020 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2021 2022 status = 2023 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status; 2024 if ((status & WTX_ST_DD) == 0) { 2025 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1, 2026 BUS_DMASYNC_PREREAD); 2027 break; 2028 } 2029 2030 DPRINTF(WM_DEBUG_TX, 2031 ("%s: TX: job %d done: descs %d..%d\n", 2032 sc->sc_dev.dv_xname, i, txs->txs_firstdesc, 2033 txs->txs_lastdesc)); 2034 2035 /* 2036 * XXX We should probably be using the statistics 2037 * XXX registers, but I don't know if they exist 2038 * XXX on chips before the i82544. 2039 */ 2040 2041 #ifdef WM_EVENT_COUNTERS 2042 if (status & WTX_ST_TU) 2043 WM_EVCNT_INCR(&sc->sc_ev_tu); 2044 #endif /* WM_EVENT_COUNTERS */ 2045 2046 if (status & (WTX_ST_EC|WTX_ST_LC)) { 2047 ifp->if_oerrors++; 2048 if (status & WTX_ST_LC) 2049 printf("%s: late collision\n", 2050 sc->sc_dev.dv_xname); 2051 else if (status & WTX_ST_EC) { 2052 ifp->if_collisions += 16; 2053 printf("%s: excessive collisions\n", 2054 sc->sc_dev.dv_xname); 2055 } 2056 } else 2057 ifp->if_opackets++; 2058 2059 sc->sc_txfree += txs->txs_ndesc; 2060 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 2061 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2062 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 2063 m_freem(txs->txs_mbuf); 2064 txs->txs_mbuf = NULL; 2065 } 2066 2067 /* Update the dirty transmit buffer pointer. */ 2068 sc->sc_txsdirty = i; 2069 DPRINTF(WM_DEBUG_TX, 2070 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i)); 2071 2072 /* 2073 * If there are no more pending transmissions, cancel the watchdog 2074 * timer. 2075 */ 2076 if (sc->sc_txsfree == WM_TXQUEUELEN(sc)) 2077 ifp->if_timer = 0; 2078 } 2079 2080 /* 2081 * wm_rxintr: 2082 * 2083 * Helper; handle receive interrupts. 2084 */ 2085 static void 2086 wm_rxintr(struct wm_softc *sc) 2087 { 2088 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2089 struct wm_rxsoft *rxs; 2090 struct mbuf *m; 2091 int i, len; 2092 uint8_t status, errors; 2093 2094 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) { 2095 rxs = &sc->sc_rxsoft[i]; 2096 2097 DPRINTF(WM_DEBUG_RX, 2098 ("%s: RX: checking descriptor %d\n", 2099 sc->sc_dev.dv_xname, i)); 2100 2101 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2102 2103 status = sc->sc_rxdescs[i].wrx_status; 2104 errors = sc->sc_rxdescs[i].wrx_errors; 2105 len = le16toh(sc->sc_rxdescs[i].wrx_len); 2106 2107 if ((status & WRX_ST_DD) == 0) { 2108 /* 2109 * We have processed all of the receive descriptors. 2110 */ 2111 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD); 2112 break; 2113 } 2114 2115 if (__predict_false(sc->sc_rxdiscard)) { 2116 DPRINTF(WM_DEBUG_RX, 2117 ("%s: RX: discarding contents of descriptor %d\n", 2118 sc->sc_dev.dv_xname, i)); 2119 WM_INIT_RXDESC(sc, i); 2120 if (status & WRX_ST_EOP) { 2121 /* Reset our state. */ 2122 DPRINTF(WM_DEBUG_RX, 2123 ("%s: RX: resetting rxdiscard -> 0\n", 2124 sc->sc_dev.dv_xname)); 2125 sc->sc_rxdiscard = 0; 2126 } 2127 continue; 2128 } 2129 2130 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 2131 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2132 2133 m = rxs->rxs_mbuf; 2134 2135 /* 2136 * Add a new receive buffer to the ring. 2137 */ 2138 if (wm_add_rxbuf(sc, i) != 0) { 2139 /* 2140 * Failed, throw away what we've done so 2141 * far, and discard the rest of the packet. 2142 */ 2143 ifp->if_ierrors++; 2144 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 2145 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 2146 WM_INIT_RXDESC(sc, i); 2147 if ((status & WRX_ST_EOP) == 0) 2148 sc->sc_rxdiscard = 1; 2149 if (sc->sc_rxhead != NULL) 2150 m_freem(sc->sc_rxhead); 2151 WM_RXCHAIN_RESET(sc); 2152 DPRINTF(WM_DEBUG_RX, 2153 ("%s: RX: Rx buffer allocation failed, " 2154 "dropping packet%s\n", sc->sc_dev.dv_xname, 2155 sc->sc_rxdiscard ? " (discard)" : "")); 2156 continue; 2157 } 2158 2159 WM_RXCHAIN_LINK(sc, m); 2160 2161 m->m_len = len; 2162 2163 DPRINTF(WM_DEBUG_RX, 2164 ("%s: RX: buffer at %p len %d\n", 2165 sc->sc_dev.dv_xname, m->m_data, len)); 2166 2167 /* 2168 * If this is not the end of the packet, keep 2169 * looking. 2170 */ 2171 if ((status & WRX_ST_EOP) == 0) { 2172 sc->sc_rxlen += len; 2173 DPRINTF(WM_DEBUG_RX, 2174 ("%s: RX: not yet EOP, rxlen -> %d\n", 2175 sc->sc_dev.dv_xname, sc->sc_rxlen)); 2176 continue; 2177 } 2178 2179 /* 2180 * Okay, we have the entire packet now... 2181 */ 2182 *sc->sc_rxtailp = NULL; 2183 m = sc->sc_rxhead; 2184 len += sc->sc_rxlen; 2185 2186 WM_RXCHAIN_RESET(sc); 2187 2188 DPRINTF(WM_DEBUG_RX, 2189 ("%s: RX: have entire packet, len -> %d\n", 2190 sc->sc_dev.dv_xname, len)); 2191 2192 /* 2193 * If an error occurred, update stats and drop the packet. 2194 */ 2195 if (errors & 2196 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) { 2197 ifp->if_ierrors++; 2198 if (errors & WRX_ER_SE) 2199 printf("%s: symbol error\n", 2200 sc->sc_dev.dv_xname); 2201 else if (errors & WRX_ER_SEQ) 2202 printf("%s: receive sequence error\n", 2203 sc->sc_dev.dv_xname); 2204 else if (errors & WRX_ER_CE) 2205 printf("%s: CRC error\n", 2206 sc->sc_dev.dv_xname); 2207 m_freem(m); 2208 continue; 2209 } 2210 2211 /* 2212 * No errors. Receive the packet. 2213 * 2214 * Note, we have configured the chip to include the 2215 * CRC with every packet. 2216 */ 2217 m->m_flags |= M_HASFCS; 2218 m->m_pkthdr.rcvif = ifp; 2219 m->m_pkthdr.len = len; 2220 2221 #if 0 /* XXXJRT */ 2222 /* 2223 * If VLANs are enabled, VLAN packets have been unwrapped 2224 * for us. Associate the tag with the packet. 2225 */ 2226 if (sc->sc_ethercom.ec_nvlans != 0 && 2227 (status & WRX_ST_VP) != 0) { 2228 struct m_tag *vtag; 2229 2230 vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int), 2231 M_NOWAIT); 2232 if (vtag == NULL) { 2233 ifp->if_ierrors++; 2234 printf("%s: unable to allocate VLAN tag\n", 2235 sc->sc_dev.dv_xname); 2236 m_freem(m); 2237 continue; 2238 } 2239 2240 *(u_int *)(vtag + 1) = 2241 le16toh(sc->sc_rxdescs[i].wrx_special); 2242 } 2243 #endif /* XXXJRT */ 2244 2245 /* 2246 * Set up checksum info for this packet. 2247 */ 2248 if (status & WRX_ST_IPCS) { 2249 WM_EVCNT_INCR(&sc->sc_ev_rxipsum); 2250 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 2251 if (errors & WRX_ER_IPE) 2252 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 2253 } 2254 if (status & WRX_ST_TCPCS) { 2255 /* 2256 * Note: we don't know if this was TCP or UDP, 2257 * so we just set both bits, and expect the 2258 * upper layers to deal. 2259 */ 2260 WM_EVCNT_INCR(&sc->sc_ev_rxtusum); 2261 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4; 2262 if (errors & WRX_ER_TCPE) 2263 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 2264 } 2265 2266 ifp->if_ipackets++; 2267 2268 #if NBPFILTER > 0 2269 /* Pass this up to any BPF listeners. */ 2270 if (ifp->if_bpf) 2271 bpf_mtap(ifp->if_bpf, m); 2272 #endif /* NBPFILTER > 0 */ 2273 2274 /* Pass it on. */ 2275 (*ifp->if_input)(ifp, m); 2276 } 2277 2278 /* Update the receive pointer. */ 2279 sc->sc_rxptr = i; 2280 2281 DPRINTF(WM_DEBUG_RX, 2282 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i)); 2283 } 2284 2285 /* 2286 * wm_linkintr: 2287 * 2288 * Helper; handle link interrupts. 2289 */ 2290 static void 2291 wm_linkintr(struct wm_softc *sc, uint32_t icr) 2292 { 2293 uint32_t status; 2294 2295 /* 2296 * If we get a link status interrupt on a 1000BASE-T 2297 * device, just fall into the normal MII tick path. 2298 */ 2299 if (sc->sc_flags & WM_F_HAS_MII) { 2300 if (icr & ICR_LSC) { 2301 DPRINTF(WM_DEBUG_LINK, 2302 ("%s: LINK: LSC -> mii_tick\n", 2303 sc->sc_dev.dv_xname)); 2304 mii_tick(&sc->sc_mii); 2305 } else if (icr & ICR_RXSEQ) { 2306 DPRINTF(WM_DEBUG_LINK, 2307 ("%s: LINK Receive sequence error\n", 2308 sc->sc_dev.dv_xname)); 2309 } 2310 return; 2311 } 2312 2313 /* 2314 * If we are now receiving /C/, check for link again in 2315 * a couple of link clock ticks. 2316 */ 2317 if (icr & ICR_RXCFG) { 2318 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n", 2319 sc->sc_dev.dv_xname)); 2320 sc->sc_tbi_anstate = 2; 2321 } 2322 2323 if (icr & ICR_LSC) { 2324 status = CSR_READ(sc, WMREG_STATUS); 2325 if (status & STATUS_LU) { 2326 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n", 2327 sc->sc_dev.dv_xname, 2328 (status & STATUS_FD) ? "FDX" : "HDX")); 2329 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 2330 sc->sc_fcrtl &= ~FCRTL_XONE; 2331 if (status & STATUS_FD) 2332 sc->sc_tctl |= 2333 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 2334 else 2335 sc->sc_tctl |= 2336 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 2337 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE) 2338 sc->sc_fcrtl |= FCRTL_XONE; 2339 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 2340 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 2341 WMREG_OLD_FCRTL : WMREG_FCRTL, 2342 sc->sc_fcrtl); 2343 sc->sc_tbi_linkup = 1; 2344 } else { 2345 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", 2346 sc->sc_dev.dv_xname)); 2347 sc->sc_tbi_linkup = 0; 2348 } 2349 sc->sc_tbi_anstate = 2; 2350 wm_tbi_set_linkled(sc); 2351 } else if (icr & ICR_RXSEQ) { 2352 DPRINTF(WM_DEBUG_LINK, 2353 ("%s: LINK: Receive sequence error\n", 2354 sc->sc_dev.dv_xname)); 2355 } 2356 } 2357 2358 /* 2359 * wm_tick: 2360 * 2361 * One second timer, used to check link status, sweep up 2362 * completed transmit jobs, etc. 2363 */ 2364 static void 2365 wm_tick(void *arg) 2366 { 2367 struct wm_softc *sc = arg; 2368 int s; 2369 2370 s = splnet(); 2371 2372 if (sc->sc_type >= WM_T_82542_2_1) { 2373 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC)); 2374 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC)); 2375 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC)); 2376 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC)); 2377 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC)); 2378 } 2379 2380 if (sc->sc_flags & WM_F_HAS_MII) 2381 mii_tick(&sc->sc_mii); 2382 else 2383 wm_tbi_check_link(sc); 2384 2385 splx(s); 2386 2387 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); 2388 } 2389 2390 /* 2391 * wm_reset: 2392 * 2393 * Reset the i82542 chip. 2394 */ 2395 static void 2396 wm_reset(struct wm_softc *sc) 2397 { 2398 int i; 2399 2400 /* 2401 * Allocate on-chip memory according to the MTU size. 2402 * The Packet Buffer Allocation register must be written 2403 * before the chip is reset. 2404 */ 2405 if (sc->sc_type < WM_T_82547) { 2406 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? 2407 PBA_40K : PBA_48K; 2408 } else { 2409 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? 2410 PBA_22K : PBA_30K; 2411 sc->sc_txfifo_head = 0; 2412 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT; 2413 sc->sc_txfifo_size = 2414 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT; 2415 sc->sc_txfifo_stall = 0; 2416 } 2417 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba); 2418 2419 switch (sc->sc_type) { 2420 case WM_T_82544: 2421 case WM_T_82540: 2422 case WM_T_82545: 2423 case WM_T_82546: 2424 case WM_T_82541: 2425 case WM_T_82541_2: 2426 /* 2427 * These chips have a problem with the memory-mapped 2428 * write cycle when issuing the reset, so use I/O-mapped 2429 * access, if possible. 2430 */ 2431 if (sc->sc_flags & WM_F_IOH_VALID) 2432 wm_io_write(sc, WMREG_CTRL, CTRL_RST); 2433 else 2434 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST); 2435 break; 2436 2437 case WM_T_82545_3: 2438 case WM_T_82546_3: 2439 /* Use the shadow control register on these chips. */ 2440 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST); 2441 break; 2442 2443 default: 2444 /* Everything else can safely use the documented method. */ 2445 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST); 2446 break; 2447 } 2448 delay(10000); 2449 2450 for (i = 0; i < 1000; i++) { 2451 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) 2452 return; 2453 delay(20); 2454 } 2455 2456 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST) 2457 printf("%s: WARNING: reset failed to complete\n", 2458 sc->sc_dev.dv_xname); 2459 } 2460 2461 /* 2462 * wm_init: [ifnet interface function] 2463 * 2464 * Initialize the interface. Must be called at splnet(). 2465 */ 2466 static int 2467 wm_init(struct ifnet *ifp) 2468 { 2469 struct wm_softc *sc = ifp->if_softc; 2470 struct wm_rxsoft *rxs; 2471 int i, error = 0; 2472 uint32_t reg; 2473 2474 /* 2475 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set. 2476 * There is a small but measurable benefit to avoiding the adjusment 2477 * of the descriptor so that the headers are aligned, for normal mtu, 2478 * on such platforms. One possibility is that the DMA itself is 2479 * slightly more efficient if the front of the entire packet (instead 2480 * of the front of the headers) is aligned. 2481 * 2482 * Note we must always set align_tweak to 0 if we are using 2483 * jumbo frames. 2484 */ 2485 #ifdef __NO_STRICT_ALIGNMENT 2486 sc->sc_align_tweak = 0; 2487 #else 2488 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2)) 2489 sc->sc_align_tweak = 0; 2490 else 2491 sc->sc_align_tweak = 2; 2492 #endif /* __NO_STRICT_ALIGNMENT */ 2493 2494 /* Cancel any pending I/O. */ 2495 wm_stop(ifp, 0); 2496 2497 /* Reset the chip to a known state. */ 2498 wm_reset(sc); 2499 2500 /* Initialize the transmit descriptor ring. */ 2501 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc)); 2502 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc), 2503 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2504 sc->sc_txfree = WM_NTXDESC(sc); 2505 sc->sc_txnext = 0; 2506 2507 sc->sc_txctx_ipcs = 0xffffffff; 2508 sc->sc_txctx_tucs = 0xffffffff; 2509 2510 if (sc->sc_type < WM_T_82543) { 2511 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0)); 2512 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0)); 2513 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc)); 2514 CSR_WRITE(sc, WMREG_OLD_TDH, 0); 2515 CSR_WRITE(sc, WMREG_OLD_TDT, 0); 2516 CSR_WRITE(sc, WMREG_OLD_TIDV, 128); 2517 } else { 2518 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0)); 2519 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0)); 2520 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc)); 2521 CSR_WRITE(sc, WMREG_TDH, 0); 2522 CSR_WRITE(sc, WMREG_TDT, 0); 2523 CSR_WRITE(sc, WMREG_TIDV, 128); 2524 2525 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) | 2526 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0)); 2527 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) | 2528 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1)); 2529 } 2530 CSR_WRITE(sc, WMREG_TQSA_LO, 0); 2531 CSR_WRITE(sc, WMREG_TQSA_HI, 0); 2532 2533 /* Initialize the transmit job descriptors. */ 2534 for (i = 0; i < WM_TXQUEUELEN(sc); i++) 2535 sc->sc_txsoft[i].txs_mbuf = NULL; 2536 sc->sc_txsfree = WM_TXQUEUELEN(sc); 2537 sc->sc_txsnext = 0; 2538 sc->sc_txsdirty = 0; 2539 2540 /* 2541 * Initialize the receive descriptor and receive job 2542 * descriptor rings. 2543 */ 2544 if (sc->sc_type < WM_T_82543) { 2545 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0)); 2546 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0)); 2547 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs)); 2548 CSR_WRITE(sc, WMREG_OLD_RDH0, 0); 2549 CSR_WRITE(sc, WMREG_OLD_RDT0, 0); 2550 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD); 2551 2552 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0); 2553 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0); 2554 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0); 2555 CSR_WRITE(sc, WMREG_OLD_RDH1, 0); 2556 CSR_WRITE(sc, WMREG_OLD_RDT1, 0); 2557 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0); 2558 } else { 2559 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0)); 2560 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0)); 2561 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs)); 2562 CSR_WRITE(sc, WMREG_RDH, 0); 2563 CSR_WRITE(sc, WMREG_RDT, 0); 2564 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD); 2565 } 2566 for (i = 0; i < WM_NRXDESC; i++) { 2567 rxs = &sc->sc_rxsoft[i]; 2568 if (rxs->rxs_mbuf == NULL) { 2569 if ((error = wm_add_rxbuf(sc, i)) != 0) { 2570 printf("%s: unable to allocate or map rx " 2571 "buffer %d, error = %d\n", 2572 sc->sc_dev.dv_xname, i, error); 2573 /* 2574 * XXX Should attempt to run with fewer receive 2575 * XXX buffers instead of just failing. 2576 */ 2577 wm_rxdrain(sc); 2578 goto out; 2579 } 2580 } else 2581 WM_INIT_RXDESC(sc, i); 2582 } 2583 sc->sc_rxptr = 0; 2584 sc->sc_rxdiscard = 0; 2585 WM_RXCHAIN_RESET(sc); 2586 2587 /* 2588 * Clear out the VLAN table -- we don't use it (yet). 2589 */ 2590 CSR_WRITE(sc, WMREG_VET, 0); 2591 for (i = 0; i < WM_VLAN_TABSIZE; i++) 2592 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0); 2593 2594 /* 2595 * Set up flow-control parameters. 2596 * 2597 * XXX Values could probably stand some tuning. 2598 */ 2599 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST); 2600 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST); 2601 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL); 2602 2603 sc->sc_fcrtl = FCRTL_DFLT; 2604 if (sc->sc_type < WM_T_82543) { 2605 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT); 2606 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl); 2607 } else { 2608 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT); 2609 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl); 2610 } 2611 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT); 2612 2613 #if 0 /* XXXJRT */ 2614 /* Deal with VLAN enables. */ 2615 if (sc->sc_ethercom.ec_nvlans != 0) 2616 sc->sc_ctrl |= CTRL_VME; 2617 else 2618 #endif /* XXXJRT */ 2619 sc->sc_ctrl &= ~CTRL_VME; 2620 2621 /* Write the control registers. */ 2622 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 2623 #if 0 2624 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 2625 #endif 2626 2627 /* 2628 * Set up checksum offload parameters. 2629 */ 2630 reg = CSR_READ(sc, WMREG_RXCSUM); 2631 if (ifp->if_capenable & IFCAP_CSUM_IPv4) 2632 reg |= RXCSUM_IPOFL; 2633 else 2634 reg &= ~RXCSUM_IPOFL; 2635 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4)) 2636 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL; 2637 else { 2638 reg &= ~RXCSUM_TUOFL; 2639 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0) 2640 reg &= ~RXCSUM_IPOFL; 2641 } 2642 CSR_WRITE(sc, WMREG_RXCSUM, reg); 2643 2644 /* 2645 * Set up the interrupt registers. 2646 */ 2647 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 2648 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 | 2649 ICR_RXO | ICR_RXT0; 2650 if ((sc->sc_flags & WM_F_HAS_MII) == 0) 2651 sc->sc_icr |= ICR_RXCFG; 2652 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr); 2653 2654 /* Set up the inter-packet gap. */ 2655 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); 2656 2657 #if 0 /* XXXJRT */ 2658 /* Set the VLAN ethernetype. */ 2659 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN); 2660 #endif 2661 2662 /* 2663 * Set up the transmit control register; we start out with 2664 * a collision distance suitable for FDX, but update it whe 2665 * we resolve the media type. 2666 */ 2667 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) | 2668 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 2669 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 2670 2671 /* Set the media. */ 2672 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp); 2673 2674 /* 2675 * Set up the receive control register; we actually program 2676 * the register when we set the receive filter. Use multicast 2677 * address offset type 0. 2678 * 2679 * Only the i82544 has the ability to strip the incoming 2680 * CRC, so we don't enable that feature. 2681 */ 2682 sc->sc_mchash_type = 0; 2683 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_LPE | 2684 RCTL_DPF | RCTL_MO(sc->sc_mchash_type); 2685 2686 if(MCLBYTES == 2048) { 2687 sc->sc_rctl |= RCTL_2k; 2688 } else { 2689 if(sc->sc_type >= WM_T_82543) { 2690 switch(MCLBYTES) { 2691 case 4096: 2692 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k; 2693 break; 2694 case 8192: 2695 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k; 2696 break; 2697 case 16384: 2698 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k; 2699 break; 2700 default: 2701 panic("wm_init: MCLBYTES %d unsupported", 2702 MCLBYTES); 2703 break; 2704 } 2705 } else panic("wm_init: i82542 requires MCLBYTES = 2048"); 2706 } 2707 2708 /* Set the receive filter. */ 2709 wm_set_filter(sc); 2710 2711 /* Start the one second link check clock. */ 2712 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); 2713 2714 /* ...all done! */ 2715 ifp->if_flags |= IFF_RUNNING; 2716 ifp->if_flags &= ~IFF_OACTIVE; 2717 2718 out: 2719 if (error) 2720 printf("%s: interface not running\n", sc->sc_dev.dv_xname); 2721 return (error); 2722 } 2723 2724 /* 2725 * wm_rxdrain: 2726 * 2727 * Drain the receive queue. 2728 */ 2729 static void 2730 wm_rxdrain(struct wm_softc *sc) 2731 { 2732 struct wm_rxsoft *rxs; 2733 int i; 2734 2735 for (i = 0; i < WM_NRXDESC; i++) { 2736 rxs = &sc->sc_rxsoft[i]; 2737 if (rxs->rxs_mbuf != NULL) { 2738 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 2739 m_freem(rxs->rxs_mbuf); 2740 rxs->rxs_mbuf = NULL; 2741 } 2742 } 2743 } 2744 2745 /* 2746 * wm_stop: [ifnet interface function] 2747 * 2748 * Stop transmission on the interface. 2749 */ 2750 static void 2751 wm_stop(struct ifnet *ifp, int disable) 2752 { 2753 struct wm_softc *sc = ifp->if_softc; 2754 struct wm_txsoft *txs; 2755 int i; 2756 2757 /* Stop the one second clock. */ 2758 callout_stop(&sc->sc_tick_ch); 2759 2760 /* Stop the 82547 Tx FIFO stall check timer. */ 2761 if (sc->sc_type == WM_T_82547) 2762 callout_stop(&sc->sc_txfifo_ch); 2763 2764 if (sc->sc_flags & WM_F_HAS_MII) { 2765 /* Down the MII. */ 2766 mii_down(&sc->sc_mii); 2767 } 2768 2769 /* Stop the transmit and receive processes. */ 2770 CSR_WRITE(sc, WMREG_TCTL, 0); 2771 CSR_WRITE(sc, WMREG_RCTL, 0); 2772 2773 /* Release any queued transmit buffers. */ 2774 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 2775 txs = &sc->sc_txsoft[i]; 2776 if (txs->txs_mbuf != NULL) { 2777 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 2778 m_freem(txs->txs_mbuf); 2779 txs->txs_mbuf = NULL; 2780 } 2781 } 2782 2783 if (disable) 2784 wm_rxdrain(sc); 2785 2786 /* Mark the interface as down and cancel the watchdog timer. */ 2787 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2788 ifp->if_timer = 0; 2789 } 2790 2791 /* 2792 * wm_acquire_eeprom: 2793 * 2794 * Perform the EEPROM handshake required on some chips. 2795 */ 2796 static int 2797 wm_acquire_eeprom(struct wm_softc *sc) 2798 { 2799 uint32_t reg; 2800 int x; 2801 2802 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) { 2803 reg = CSR_READ(sc, WMREG_EECD); 2804 2805 /* Request EEPROM access. */ 2806 reg |= EECD_EE_REQ; 2807 CSR_WRITE(sc, WMREG_EECD, reg); 2808 2809 /* ..and wait for it to be granted. */ 2810 for (x = 0; x < 100; x++) { 2811 reg = CSR_READ(sc, WMREG_EECD); 2812 if (reg & EECD_EE_GNT) 2813 break; 2814 delay(5); 2815 } 2816 if ((reg & EECD_EE_GNT) == 0) { 2817 aprint_error("%s: could not acquire EEPROM GNT\n", 2818 sc->sc_dev.dv_xname); 2819 reg &= ~EECD_EE_REQ; 2820 CSR_WRITE(sc, WMREG_EECD, reg); 2821 return (1); 2822 } 2823 } 2824 2825 return (0); 2826 } 2827 2828 /* 2829 * wm_release_eeprom: 2830 * 2831 * Release the EEPROM mutex. 2832 */ 2833 static void 2834 wm_release_eeprom(struct wm_softc *sc) 2835 { 2836 uint32_t reg; 2837 2838 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) { 2839 reg = CSR_READ(sc, WMREG_EECD); 2840 reg &= ~EECD_EE_REQ; 2841 CSR_WRITE(sc, WMREG_EECD, reg); 2842 } 2843 } 2844 2845 /* 2846 * wm_eeprom_sendbits: 2847 * 2848 * Send a series of bits to the EEPROM. 2849 */ 2850 static void 2851 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits) 2852 { 2853 uint32_t reg; 2854 int x; 2855 2856 reg = CSR_READ(sc, WMREG_EECD); 2857 2858 for (x = nbits; x > 0; x--) { 2859 if (bits & (1U << (x - 1))) 2860 reg |= EECD_DI; 2861 else 2862 reg &= ~EECD_DI; 2863 CSR_WRITE(sc, WMREG_EECD, reg); 2864 delay(2); 2865 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK); 2866 delay(2); 2867 CSR_WRITE(sc, WMREG_EECD, reg); 2868 delay(2); 2869 } 2870 } 2871 2872 /* 2873 * wm_eeprom_recvbits: 2874 * 2875 * Receive a series of bits from the EEPROM. 2876 */ 2877 static void 2878 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits) 2879 { 2880 uint32_t reg, val; 2881 int x; 2882 2883 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI; 2884 2885 val = 0; 2886 for (x = nbits; x > 0; x--) { 2887 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK); 2888 delay(2); 2889 if (CSR_READ(sc, WMREG_EECD) & EECD_DO) 2890 val |= (1U << (x - 1)); 2891 CSR_WRITE(sc, WMREG_EECD, reg); 2892 delay(2); 2893 } 2894 *valp = val; 2895 } 2896 2897 /* 2898 * wm_read_eeprom_uwire: 2899 * 2900 * Read a word from the EEPROM using the MicroWire protocol. 2901 */ 2902 static int 2903 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 2904 { 2905 uint32_t reg, val; 2906 int i; 2907 2908 for (i = 0; i < wordcnt; i++) { 2909 /* Clear SK and DI. */ 2910 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI); 2911 CSR_WRITE(sc, WMREG_EECD, reg); 2912 2913 /* Set CHIP SELECT. */ 2914 reg |= EECD_CS; 2915 CSR_WRITE(sc, WMREG_EECD, reg); 2916 delay(2); 2917 2918 /* Shift in the READ command. */ 2919 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3); 2920 2921 /* Shift in address. */ 2922 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits); 2923 2924 /* Shift out the data. */ 2925 wm_eeprom_recvbits(sc, &val, 16); 2926 data[i] = val & 0xffff; 2927 2928 /* Clear CHIP SELECT. */ 2929 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS; 2930 CSR_WRITE(sc, WMREG_EECD, reg); 2931 delay(2); 2932 } 2933 2934 return (0); 2935 } 2936 2937 /* 2938 * wm_spi_eeprom_ready: 2939 * 2940 * Wait for a SPI EEPROM to be ready for commands. 2941 */ 2942 static int 2943 wm_spi_eeprom_ready(struct wm_softc *sc) 2944 { 2945 uint32_t val; 2946 int usec; 2947 2948 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) { 2949 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8); 2950 wm_eeprom_recvbits(sc, &val, 8); 2951 if ((val & SPI_SR_RDY) == 0) 2952 break; 2953 } 2954 if (usec >= SPI_MAX_RETRIES) { 2955 aprint_error("%s: EEPROM failed to become ready\n", 2956 sc->sc_dev.dv_xname); 2957 return (1); 2958 } 2959 return (0); 2960 } 2961 2962 /* 2963 * wm_read_eeprom_spi: 2964 * 2965 * Read a work from the EEPROM using the SPI protocol. 2966 */ 2967 static int 2968 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 2969 { 2970 uint32_t reg, val; 2971 int i; 2972 uint8_t opc; 2973 2974 /* Clear SK and CS. */ 2975 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS); 2976 CSR_WRITE(sc, WMREG_EECD, reg); 2977 delay(2); 2978 2979 if (wm_spi_eeprom_ready(sc)) 2980 return (1); 2981 2982 /* Toggle CS to flush commands. */ 2983 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS); 2984 delay(2); 2985 CSR_WRITE(sc, WMREG_EECD, reg); 2986 delay(2); 2987 2988 opc = SPI_OPC_READ; 2989 if (sc->sc_ee_addrbits == 8 && word >= 128) 2990 opc |= SPI_OPC_A8; 2991 2992 wm_eeprom_sendbits(sc, opc, 8); 2993 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits); 2994 2995 for (i = 0; i < wordcnt; i++) { 2996 wm_eeprom_recvbits(sc, &val, 16); 2997 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8); 2998 } 2999 3000 /* Raise CS and clear SK. */ 3001 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS; 3002 CSR_WRITE(sc, WMREG_EECD, reg); 3003 delay(2); 3004 3005 return (0); 3006 } 3007 3008 /* 3009 * wm_read_eeprom: 3010 * 3011 * Read data from the serial EEPROM. 3012 */ 3013 static int 3014 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 3015 { 3016 int rv; 3017 3018 if (wm_acquire_eeprom(sc)) 3019 return (1); 3020 3021 if (sc->sc_flags & WM_F_EEPROM_SPI) 3022 rv = wm_read_eeprom_spi(sc, word, wordcnt, data); 3023 else 3024 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data); 3025 3026 wm_release_eeprom(sc); 3027 return (rv); 3028 } 3029 3030 /* 3031 * wm_add_rxbuf: 3032 * 3033 * Add a receive buffer to the indiciated descriptor. 3034 */ 3035 static int 3036 wm_add_rxbuf(struct wm_softc *sc, int idx) 3037 { 3038 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx]; 3039 struct mbuf *m; 3040 int error; 3041 3042 MGETHDR(m, M_DONTWAIT, MT_DATA); 3043 if (m == NULL) 3044 return (ENOBUFS); 3045 3046 MCLGET(m, M_DONTWAIT); 3047 if ((m->m_flags & M_EXT) == 0) { 3048 m_freem(m); 3049 return (ENOBUFS); 3050 } 3051 3052 if (rxs->rxs_mbuf != NULL) 3053 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 3054 3055 rxs->rxs_mbuf = m; 3056 3057 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 3058 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m, 3059 BUS_DMA_READ|BUS_DMA_NOWAIT); 3060 if (error) { 3061 printf("%s: unable to load rx DMA map %d, error = %d\n", 3062 sc->sc_dev.dv_xname, idx, error); 3063 panic("wm_add_rxbuf"); /* XXX XXX XXX */ 3064 } 3065 3066 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 3067 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 3068 3069 WM_INIT_RXDESC(sc, idx); 3070 3071 return (0); 3072 } 3073 3074 /* 3075 * wm_set_ral: 3076 * 3077 * Set an entery in the receive address list. 3078 */ 3079 static void 3080 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx) 3081 { 3082 uint32_t ral_lo, ral_hi; 3083 3084 if (enaddr != NULL) { 3085 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) | 3086 (enaddr[3] << 24); 3087 ral_hi = enaddr[4] | (enaddr[5] << 8); 3088 ral_hi |= RAL_AV; 3089 } else { 3090 ral_lo = 0; 3091 ral_hi = 0; 3092 } 3093 3094 if (sc->sc_type >= WM_T_82544) { 3095 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx), 3096 ral_lo); 3097 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx), 3098 ral_hi); 3099 } else { 3100 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo); 3101 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi); 3102 } 3103 } 3104 3105 /* 3106 * wm_mchash: 3107 * 3108 * Compute the hash of the multicast address for the 4096-bit 3109 * multicast filter. 3110 */ 3111 static uint32_t 3112 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr) 3113 { 3114 static const int lo_shift[4] = { 4, 3, 2, 0 }; 3115 static const int hi_shift[4] = { 4, 5, 6, 8 }; 3116 uint32_t hash; 3117 3118 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) | 3119 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]); 3120 3121 return (hash & 0xfff); 3122 } 3123 3124 /* 3125 * wm_set_filter: 3126 * 3127 * Set up the receive filter. 3128 */ 3129 static void 3130 wm_set_filter(struct wm_softc *sc) 3131 { 3132 struct ethercom *ec = &sc->sc_ethercom; 3133 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3134 struct ether_multi *enm; 3135 struct ether_multistep step; 3136 bus_addr_t mta_reg; 3137 uint32_t hash, reg, bit; 3138 int i; 3139 3140 if (sc->sc_type >= WM_T_82544) 3141 mta_reg = WMREG_CORDOVA_MTA; 3142 else 3143 mta_reg = WMREG_MTA; 3144 3145 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE); 3146 3147 if (ifp->if_flags & IFF_BROADCAST) 3148 sc->sc_rctl |= RCTL_BAM; 3149 if (ifp->if_flags & IFF_PROMISC) { 3150 sc->sc_rctl |= RCTL_UPE; 3151 goto allmulti; 3152 } 3153 3154 /* 3155 * Set the station address in the first RAL slot, and 3156 * clear the remaining slots. 3157 */ 3158 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0); 3159 for (i = 1; i < WM_RAL_TABSIZE; i++) 3160 wm_set_ral(sc, NULL, i); 3161 3162 /* Clear out the multicast table. */ 3163 for (i = 0; i < WM_MC_TABSIZE; i++) 3164 CSR_WRITE(sc, mta_reg + (i << 2), 0); 3165 3166 ETHER_FIRST_MULTI(step, ec, enm); 3167 while (enm != NULL) { 3168 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 3169 /* 3170 * We must listen to a range of multicast addresses. 3171 * For now, just accept all multicasts, rather than 3172 * trying to set only those filter bits needed to match 3173 * the range. (At this time, the only use of address 3174 * ranges is for IP multicast routing, for which the 3175 * range is big enough to require all bits set.) 3176 */ 3177 goto allmulti; 3178 } 3179 3180 hash = wm_mchash(sc, enm->enm_addrlo); 3181 3182 reg = (hash >> 5) & 0x7f; 3183 bit = hash & 0x1f; 3184 3185 hash = CSR_READ(sc, mta_reg + (reg << 2)); 3186 hash |= 1U << bit; 3187 3188 /* XXX Hardware bug?? */ 3189 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) { 3190 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2)); 3191 CSR_WRITE(sc, mta_reg + (reg << 2), hash); 3192 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit); 3193 } else 3194 CSR_WRITE(sc, mta_reg + (reg << 2), hash); 3195 3196 ETHER_NEXT_MULTI(step, enm); 3197 } 3198 3199 ifp->if_flags &= ~IFF_ALLMULTI; 3200 goto setit; 3201 3202 allmulti: 3203 ifp->if_flags |= IFF_ALLMULTI; 3204 sc->sc_rctl |= RCTL_MPE; 3205 3206 setit: 3207 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl); 3208 } 3209 3210 /* 3211 * wm_tbi_mediainit: 3212 * 3213 * Initialize media for use on 1000BASE-X devices. 3214 */ 3215 static void 3216 wm_tbi_mediainit(struct wm_softc *sc) 3217 { 3218 const char *sep = ""; 3219 3220 if (sc->sc_type < WM_T_82543) 3221 sc->sc_tipg = TIPG_WM_DFLT; 3222 else 3223 sc->sc_tipg = TIPG_LG_DFLT; 3224 3225 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange, 3226 wm_tbi_mediastatus); 3227 3228 /* 3229 * SWD Pins: 3230 * 3231 * 0 = Link LED (output) 3232 * 1 = Loss Of Signal (input) 3233 */ 3234 sc->sc_ctrl |= CTRL_SWDPIO(0); 3235 sc->sc_ctrl &= ~CTRL_SWDPIO(1); 3236 3237 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3238 3239 #define ADD(ss, mm, dd) \ 3240 do { \ 3241 printf("%s%s", sep, ss); \ 3242 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \ 3243 sep = ", "; \ 3244 } while (/*CONSTCOND*/0) 3245 3246 printf("%s: ", sc->sc_dev.dv_xname); 3247 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD); 3248 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD); 3249 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD); 3250 printf("\n"); 3251 3252 #undef ADD 3253 3254 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 3255 } 3256 3257 /* 3258 * wm_tbi_mediastatus: [ifmedia interface function] 3259 * 3260 * Get the current interface media status on a 1000BASE-X device. 3261 */ 3262 static void 3263 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 3264 { 3265 struct wm_softc *sc = ifp->if_softc; 3266 uint32_t ctrl; 3267 3268 ifmr->ifm_status = IFM_AVALID; 3269 ifmr->ifm_active = IFM_ETHER; 3270 3271 if (sc->sc_tbi_linkup == 0) { 3272 ifmr->ifm_active |= IFM_NONE; 3273 return; 3274 } 3275 3276 ifmr->ifm_status |= IFM_ACTIVE; 3277 ifmr->ifm_active |= IFM_1000_SX; 3278 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD) 3279 ifmr->ifm_active |= IFM_FDX; 3280 ctrl = CSR_READ(sc, WMREG_CTRL); 3281 if (ctrl & CTRL_RFCE) 3282 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE; 3283 if (ctrl & CTRL_TFCE) 3284 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE; 3285 } 3286 3287 /* 3288 * wm_tbi_mediachange: [ifmedia interface function] 3289 * 3290 * Set hardware to newly-selected media on a 1000BASE-X device. 3291 */ 3292 static int 3293 wm_tbi_mediachange(struct ifnet *ifp) 3294 { 3295 struct wm_softc *sc = ifp->if_softc; 3296 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 3297 uint32_t status; 3298 int i; 3299 3300 sc->sc_txcw = ife->ifm_data; 3301 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO || 3302 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0) 3303 sc->sc_txcw |= ANAR_X_PAUSE_SYM | ANAR_X_PAUSE_ASYM; 3304 sc->sc_txcw |= TXCW_ANE; 3305 3306 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); 3307 delay(10000); 3308 3309 /* NOTE: CTRL will update TFCE and RFCE automatically. */ 3310 3311 sc->sc_tbi_anstate = 0; 3312 3313 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) { 3314 /* Have signal; wait for the link to come up. */ 3315 for (i = 0; i < 50; i++) { 3316 delay(10000); 3317 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU) 3318 break; 3319 } 3320 3321 status = CSR_READ(sc, WMREG_STATUS); 3322 if (status & STATUS_LU) { 3323 /* Link is up. */ 3324 DPRINTF(WM_DEBUG_LINK, 3325 ("%s: LINK: set media -> link up %s\n", 3326 sc->sc_dev.dv_xname, 3327 (status & STATUS_FD) ? "FDX" : "HDX")); 3328 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 3329 sc->sc_fcrtl &= ~FCRTL_XONE; 3330 if (status & STATUS_FD) 3331 sc->sc_tctl |= 3332 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 3333 else 3334 sc->sc_tctl |= 3335 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 3336 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE) 3337 sc->sc_fcrtl |= FCRTL_XONE; 3338 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 3339 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 3340 WMREG_OLD_FCRTL : WMREG_FCRTL, 3341 sc->sc_fcrtl); 3342 sc->sc_tbi_linkup = 1; 3343 } else { 3344 /* Link is down. */ 3345 DPRINTF(WM_DEBUG_LINK, 3346 ("%s: LINK: set media -> link down\n", 3347 sc->sc_dev.dv_xname)); 3348 sc->sc_tbi_linkup = 0; 3349 } 3350 } else { 3351 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n", 3352 sc->sc_dev.dv_xname)); 3353 sc->sc_tbi_linkup = 0; 3354 } 3355 3356 wm_tbi_set_linkled(sc); 3357 3358 return (0); 3359 } 3360 3361 /* 3362 * wm_tbi_set_linkled: 3363 * 3364 * Update the link LED on 1000BASE-X devices. 3365 */ 3366 static void 3367 wm_tbi_set_linkled(struct wm_softc *sc) 3368 { 3369 3370 if (sc->sc_tbi_linkup) 3371 sc->sc_ctrl |= CTRL_SWDPIN(0); 3372 else 3373 sc->sc_ctrl &= ~CTRL_SWDPIN(0); 3374 3375 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3376 } 3377 3378 /* 3379 * wm_tbi_check_link: 3380 * 3381 * Check the link on 1000BASE-X devices. 3382 */ 3383 static void 3384 wm_tbi_check_link(struct wm_softc *sc) 3385 { 3386 uint32_t rxcw, ctrl, status; 3387 3388 if (sc->sc_tbi_anstate == 0) 3389 return; 3390 else if (sc->sc_tbi_anstate > 1) { 3391 DPRINTF(WM_DEBUG_LINK, 3392 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname, 3393 sc->sc_tbi_anstate)); 3394 sc->sc_tbi_anstate--; 3395 return; 3396 } 3397 3398 sc->sc_tbi_anstate = 0; 3399 3400 rxcw = CSR_READ(sc, WMREG_RXCW); 3401 ctrl = CSR_READ(sc, WMREG_CTRL); 3402 status = CSR_READ(sc, WMREG_STATUS); 3403 3404 if ((status & STATUS_LU) == 0) { 3405 DPRINTF(WM_DEBUG_LINK, 3406 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname)); 3407 sc->sc_tbi_linkup = 0; 3408 } else { 3409 DPRINTF(WM_DEBUG_LINK, 3410 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname, 3411 (status & STATUS_FD) ? "FDX" : "HDX")); 3412 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 3413 sc->sc_fcrtl &= ~FCRTL_XONE; 3414 if (status & STATUS_FD) 3415 sc->sc_tctl |= 3416 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 3417 else 3418 sc->sc_tctl |= 3419 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 3420 if (ctrl & CTRL_TFCE) 3421 sc->sc_fcrtl |= FCRTL_XONE; 3422 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 3423 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 3424 WMREG_OLD_FCRTL : WMREG_FCRTL, 3425 sc->sc_fcrtl); 3426 sc->sc_tbi_linkup = 1; 3427 } 3428 3429 wm_tbi_set_linkled(sc); 3430 } 3431 3432 /* 3433 * wm_gmii_reset: 3434 * 3435 * Reset the PHY. 3436 */ 3437 static void 3438 wm_gmii_reset(struct wm_softc *sc) 3439 { 3440 uint32_t reg; 3441 3442 if (sc->sc_type >= WM_T_82544) { 3443 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 3444 delay(20000); 3445 3446 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3447 delay(20000); 3448 } else { 3449 /* The PHY reset pin is active-low. */ 3450 reg = CSR_READ(sc, WMREG_CTRL_EXT); 3451 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) | 3452 CTRL_EXT_SWDPIN(4)); 3453 reg |= CTRL_EXT_SWDPIO(4); 3454 3455 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4)); 3456 delay(10); 3457 3458 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 3459 delay(10); 3460 3461 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4)); 3462 delay(10); 3463 #if 0 3464 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4); 3465 #endif 3466 } 3467 } 3468 3469 /* 3470 * wm_gmii_mediainit: 3471 * 3472 * Initialize media for use on 1000BASE-T devices. 3473 */ 3474 static void 3475 wm_gmii_mediainit(struct wm_softc *sc) 3476 { 3477 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3478 3479 /* We have MII. */ 3480 sc->sc_flags |= WM_F_HAS_MII; 3481 3482 sc->sc_tipg = TIPG_1000T_DFLT; 3483 3484 /* 3485 * Let the chip set speed/duplex on its own based on 3486 * signals from the PHY. 3487 */ 3488 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE; 3489 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3490 3491 /* Initialize our media structures and probe the GMII. */ 3492 sc->sc_mii.mii_ifp = ifp; 3493 3494 if (sc->sc_type >= WM_T_82544) { 3495 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg; 3496 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg; 3497 } else { 3498 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg; 3499 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg; 3500 } 3501 sc->sc_mii.mii_statchg = wm_gmii_statchg; 3502 3503 wm_gmii_reset(sc); 3504 3505 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange, 3506 wm_gmii_mediastatus); 3507 3508 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 3509 MII_OFFSET_ANY, MIIF_DOPAUSE); 3510 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 3511 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 3512 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 3513 } else 3514 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 3515 } 3516 3517 /* 3518 * wm_gmii_mediastatus: [ifmedia interface function] 3519 * 3520 * Get the current interface media status on a 1000BASE-T device. 3521 */ 3522 static void 3523 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 3524 { 3525 struct wm_softc *sc = ifp->if_softc; 3526 3527 mii_pollstat(&sc->sc_mii); 3528 ifmr->ifm_status = sc->sc_mii.mii_media_status; 3529 ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) | 3530 sc->sc_flowflags; 3531 } 3532 3533 /* 3534 * wm_gmii_mediachange: [ifmedia interface function] 3535 * 3536 * Set hardware to newly-selected media on a 1000BASE-T device. 3537 */ 3538 static int 3539 wm_gmii_mediachange(struct ifnet *ifp) 3540 { 3541 struct wm_softc *sc = ifp->if_softc; 3542 3543 if (ifp->if_flags & IFF_UP) 3544 mii_mediachg(&sc->sc_mii); 3545 return (0); 3546 } 3547 3548 #define MDI_IO CTRL_SWDPIN(2) 3549 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */ 3550 #define MDI_CLK CTRL_SWDPIN(3) 3551 3552 static void 3553 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits) 3554 { 3555 uint32_t i, v; 3556 3557 v = CSR_READ(sc, WMREG_CTRL); 3558 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 3559 v |= MDI_DIR | CTRL_SWDPIO(3); 3560 3561 for (i = 1 << (nbits - 1); i != 0; i >>= 1) { 3562 if (data & i) 3563 v |= MDI_IO; 3564 else 3565 v &= ~MDI_IO; 3566 CSR_WRITE(sc, WMREG_CTRL, v); 3567 delay(10); 3568 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 3569 delay(10); 3570 CSR_WRITE(sc, WMREG_CTRL, v); 3571 delay(10); 3572 } 3573 } 3574 3575 static uint32_t 3576 i82543_mii_recvbits(struct wm_softc *sc) 3577 { 3578 uint32_t v, i, data = 0; 3579 3580 v = CSR_READ(sc, WMREG_CTRL); 3581 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 3582 v |= CTRL_SWDPIO(3); 3583 3584 CSR_WRITE(sc, WMREG_CTRL, v); 3585 delay(10); 3586 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 3587 delay(10); 3588 CSR_WRITE(sc, WMREG_CTRL, v); 3589 delay(10); 3590 3591 for (i = 0; i < 16; i++) { 3592 data <<= 1; 3593 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 3594 delay(10); 3595 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO) 3596 data |= 1; 3597 CSR_WRITE(sc, WMREG_CTRL, v); 3598 delay(10); 3599 } 3600 3601 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 3602 delay(10); 3603 CSR_WRITE(sc, WMREG_CTRL, v); 3604 delay(10); 3605 3606 return (data); 3607 } 3608 3609 #undef MDI_IO 3610 #undef MDI_DIR 3611 #undef MDI_CLK 3612 3613 /* 3614 * wm_gmii_i82543_readreg: [mii interface function] 3615 * 3616 * Read a PHY register on the GMII (i82543 version). 3617 */ 3618 static int 3619 wm_gmii_i82543_readreg(struct device *self, int phy, int reg) 3620 { 3621 struct wm_softc *sc = (void *) self; 3622 int rv; 3623 3624 i82543_mii_sendbits(sc, 0xffffffffU, 32); 3625 i82543_mii_sendbits(sc, reg | (phy << 5) | 3626 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14); 3627 rv = i82543_mii_recvbits(sc) & 0xffff; 3628 3629 DPRINTF(WM_DEBUG_GMII, 3630 ("%s: GMII: read phy %d reg %d -> 0x%04x\n", 3631 sc->sc_dev.dv_xname, phy, reg, rv)); 3632 3633 return (rv); 3634 } 3635 3636 /* 3637 * wm_gmii_i82543_writereg: [mii interface function] 3638 * 3639 * Write a PHY register on the GMII (i82543 version). 3640 */ 3641 static void 3642 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val) 3643 { 3644 struct wm_softc *sc = (void *) self; 3645 3646 i82543_mii_sendbits(sc, 0xffffffffU, 32); 3647 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) | 3648 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) | 3649 (MII_COMMAND_START << 30), 32); 3650 } 3651 3652 /* 3653 * wm_gmii_i82544_readreg: [mii interface function] 3654 * 3655 * Read a PHY register on the GMII. 3656 */ 3657 static int 3658 wm_gmii_i82544_readreg(struct device *self, int phy, int reg) 3659 { 3660 struct wm_softc *sc = (void *) self; 3661 uint32_t mdic = 0; 3662 int i, rv; 3663 3664 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) | 3665 MDIC_REGADD(reg)); 3666 3667 for (i = 0; i < 100; i++) { 3668 mdic = CSR_READ(sc, WMREG_MDIC); 3669 if (mdic & MDIC_READY) 3670 break; 3671 delay(10); 3672 } 3673 3674 if ((mdic & MDIC_READY) == 0) { 3675 printf("%s: MDIC read timed out: phy %d reg %d\n", 3676 sc->sc_dev.dv_xname, phy, reg); 3677 rv = 0; 3678 } else if (mdic & MDIC_E) { 3679 #if 0 /* This is normal if no PHY is present. */ 3680 printf("%s: MDIC read error: phy %d reg %d\n", 3681 sc->sc_dev.dv_xname, phy, reg); 3682 #endif 3683 rv = 0; 3684 } else { 3685 rv = MDIC_DATA(mdic); 3686 if (rv == 0xffff) 3687 rv = 0; 3688 } 3689 3690 return (rv); 3691 } 3692 3693 /* 3694 * wm_gmii_i82544_writereg: [mii interface function] 3695 * 3696 * Write a PHY register on the GMII. 3697 */ 3698 static void 3699 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val) 3700 { 3701 struct wm_softc *sc = (void *) self; 3702 uint32_t mdic = 0; 3703 int i; 3704 3705 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) | 3706 MDIC_REGADD(reg) | MDIC_DATA(val)); 3707 3708 for (i = 0; i < 100; i++) { 3709 mdic = CSR_READ(sc, WMREG_MDIC); 3710 if (mdic & MDIC_READY) 3711 break; 3712 delay(10); 3713 } 3714 3715 if ((mdic & MDIC_READY) == 0) 3716 printf("%s: MDIC write timed out: phy %d reg %d\n", 3717 sc->sc_dev.dv_xname, phy, reg); 3718 else if (mdic & MDIC_E) 3719 printf("%s: MDIC write error: phy %d reg %d\n", 3720 sc->sc_dev.dv_xname, phy, reg); 3721 } 3722 3723 /* 3724 * wm_gmii_statchg: [mii interface function] 3725 * 3726 * Callback from MII layer when media changes. 3727 */ 3728 static void 3729 wm_gmii_statchg(struct device *self) 3730 { 3731 struct wm_softc *sc = (void *) self; 3732 struct mii_data *mii = &sc->sc_mii; 3733 3734 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE); 3735 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 3736 sc->sc_fcrtl &= ~FCRTL_XONE; 3737 3738 /* 3739 * Get flow control negotiation result. 3740 */ 3741 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 3742 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { 3743 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 3744 mii->mii_media_active &= ~IFM_ETH_FMASK; 3745 } 3746 3747 if (sc->sc_flowflags & IFM_FLOW) { 3748 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) { 3749 sc->sc_ctrl |= CTRL_TFCE; 3750 sc->sc_fcrtl |= FCRTL_XONE; 3751 } 3752 if (sc->sc_flowflags & IFM_ETH_RXPAUSE) 3753 sc->sc_ctrl |= CTRL_RFCE; 3754 } 3755 3756 if (sc->sc_mii.mii_media_active & IFM_FDX) { 3757 DPRINTF(WM_DEBUG_LINK, 3758 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname)); 3759 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 3760 } else { 3761 DPRINTF(WM_DEBUG_LINK, 3762 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname)); 3763 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 3764 } 3765 3766 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3767 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 3768 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL 3769 : WMREG_FCRTL, sc->sc_fcrtl); 3770 } 3771