1 /* $NetBSD: if_wm.c,v 1.123 2006/06/20 01:16:23 jmcneill Exp $ */ 2 3 /* 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips. 40 * 41 * TODO (in order of importance): 42 * 43 * - Rework how parameters are loaded from the EEPROM. 44 * - Figure out what to do with the i82545GM and i82546GB 45 * SERDES controllers. 46 * - Fix hw VLAN assist. 47 */ 48 49 #include <sys/cdefs.h> 50 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.123 2006/06/20 01:16:23 jmcneill Exp $"); 51 52 #include "bpfilter.h" 53 #include "rnd.h" 54 55 #include <sys/param.h> 56 #include <sys/systm.h> 57 #include <sys/callout.h> 58 #include <sys/mbuf.h> 59 #include <sys/malloc.h> 60 #include <sys/kernel.h> 61 #include <sys/socket.h> 62 #include <sys/ioctl.h> 63 #include <sys/errno.h> 64 #include <sys/device.h> 65 #include <sys/queue.h> 66 #include <sys/syslog.h> 67 68 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */ 69 70 #if NRND > 0 71 #include <sys/rnd.h> 72 #endif 73 74 #include <net/if.h> 75 #include <net/if_dl.h> 76 #include <net/if_media.h> 77 #include <net/if_ether.h> 78 79 #if NBPFILTER > 0 80 #include <net/bpf.h> 81 #endif 82 83 #include <netinet/in.h> /* XXX for struct ip */ 84 #include <netinet/in_systm.h> /* XXX for struct ip */ 85 #include <netinet/ip.h> /* XXX for struct ip */ 86 #include <netinet/tcp.h> /* XXX for struct tcphdr */ 87 88 #include <machine/bus.h> 89 #include <machine/intr.h> 90 #include <machine/endian.h> 91 92 #include <dev/mii/mii.h> 93 #include <dev/mii/miivar.h> 94 #include <dev/mii/mii_bitbang.h> 95 96 #include <dev/pci/pcireg.h> 97 #include <dev/pci/pcivar.h> 98 #include <dev/pci/pcidevs.h> 99 100 #include <dev/pci/if_wmreg.h> 101 102 #ifdef WM_DEBUG 103 #define WM_DEBUG_LINK 0x01 104 #define WM_DEBUG_TX 0x02 105 #define WM_DEBUG_RX 0x04 106 #define WM_DEBUG_GMII 0x08 107 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK; 108 109 #define DPRINTF(x, y) if (wm_debug & (x)) printf y 110 #else 111 #define DPRINTF(x, y) /* nothing */ 112 #endif /* WM_DEBUG */ 113 114 /* 115 * Transmit descriptor list size. Due to errata, we can only have 116 * 256 hardware descriptors in the ring on < 82544, but we use 4096 117 * on >= 82544. We tell the upper layers that they can queue a lot 118 * of packets, and we go ahead and manage up to 64 (16 for the i82547) 119 * of them at a time. 120 * 121 * We allow up to 256 (!) DMA segments per packet. Pathological packet 122 * chains containing many small mbufs have been observed in zero-copy 123 * situations with jumbo frames. 124 */ 125 #define WM_NTXSEGS 256 126 #define WM_IFQUEUELEN 256 127 #define WM_TXQUEUELEN_MAX 64 128 #define WM_TXQUEUELEN_MAX_82547 16 129 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum) 130 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1) 131 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8) 132 #define WM_NTXDESC_82542 256 133 #define WM_NTXDESC_82544 4096 134 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc) 135 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1) 136 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t)) 137 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc)) 138 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc)) 139 140 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */ 141 142 /* 143 * Receive descriptor list size. We have one Rx buffer for normal 144 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized 145 * packet. We allocate 256 receive descriptors, each with a 2k 146 * buffer (MCLBYTES), which gives us room for 50 jumbo packets. 147 */ 148 #define WM_NRXDESC 256 149 #define WM_NRXDESC_MASK (WM_NRXDESC - 1) 150 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) 151 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) 152 153 /* 154 * Control structures are DMA'd to the i82542 chip. We allocate them in 155 * a single clump that maps to a single DMA segment to make several things 156 * easier. 157 */ 158 struct wm_control_data_82544 { 159 /* 160 * The receive descriptors. 161 */ 162 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; 163 164 /* 165 * The transmit descriptors. Put these at the end, because 166 * we might use a smaller number of them. 167 */ 168 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544]; 169 }; 170 171 struct wm_control_data_82542 { 172 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; 173 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542]; 174 }; 175 176 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x) 177 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)]) 178 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)]) 179 180 /* 181 * Software state for transmit jobs. 182 */ 183 struct wm_txsoft { 184 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 185 bus_dmamap_t txs_dmamap; /* our DMA map */ 186 int txs_firstdesc; /* first descriptor in packet */ 187 int txs_lastdesc; /* last descriptor in packet */ 188 int txs_ndesc; /* # of descriptors used */ 189 }; 190 191 /* 192 * Software state for receive buffers. Each descriptor gets a 193 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill 194 * more than one buffer, we chain them together. 195 */ 196 struct wm_rxsoft { 197 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 198 bus_dmamap_t rxs_dmamap; /* our DMA map */ 199 }; 200 201 typedef enum { 202 WM_T_unknown = 0, 203 WM_T_82542_2_0, /* i82542 2.0 (really old) */ 204 WM_T_82542_2_1, /* i82542 2.1+ (old) */ 205 WM_T_82543, /* i82543 */ 206 WM_T_82544, /* i82544 */ 207 WM_T_82540, /* i82540 */ 208 WM_T_82545, /* i82545 */ 209 WM_T_82545_3, /* i82545 3.0+ */ 210 WM_T_82546, /* i82546 */ 211 WM_T_82546_3, /* i82546 3.0+ */ 212 WM_T_82541, /* i82541 */ 213 WM_T_82541_2, /* i82541 2.0+ */ 214 WM_T_82547, /* i82547 */ 215 WM_T_82547_2, /* i82547 2.0+ */ 216 WM_T_82571, /* i82571 */ 217 WM_T_82572, /* i82572 */ 218 WM_T_82573, /* i82573 */ 219 } wm_chip_type; 220 221 /* 222 * Software state per device. 223 */ 224 struct wm_softc { 225 struct device sc_dev; /* generic device information */ 226 bus_space_tag_t sc_st; /* bus space tag */ 227 bus_space_handle_t sc_sh; /* bus space handle */ 228 bus_space_tag_t sc_iot; /* I/O space tag */ 229 bus_space_handle_t sc_ioh; /* I/O space handle */ 230 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 231 struct ethercom sc_ethercom; /* ethernet common data */ 232 void *sc_sdhook; /* shutdown hook */ 233 void *sc_powerhook; /* power hook */ 234 pci_chipset_tag_t sc_pc; 235 pcitag_t sc_pcitag; 236 struct pci_conf_state sc_pciconf; 237 238 wm_chip_type sc_type; /* chip type */ 239 int sc_flags; /* flags; see below */ 240 int sc_bus_speed; /* PCI/PCIX bus speed */ 241 int sc_pcix_offset; /* PCIX capability register offset */ 242 int sc_flowflags; /* 802.3x flow control flags */ 243 244 void *sc_ih; /* interrupt cookie */ 245 246 int sc_ee_addrbits; /* EEPROM address bits */ 247 248 struct mii_data sc_mii; /* MII/media information */ 249 250 struct callout sc_tick_ch; /* tick callout */ 251 252 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 253 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 254 255 int sc_align_tweak; 256 257 /* 258 * Software state for the transmit and receive descriptors. 259 */ 260 int sc_txnum; /* must be a power of two */ 261 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX]; 262 struct wm_rxsoft sc_rxsoft[WM_NRXDESC]; 263 264 /* 265 * Control data structures. 266 */ 267 int sc_ntxdesc; /* must be a power of two */ 268 struct wm_control_data_82544 *sc_control_data; 269 #define sc_txdescs sc_control_data->wcd_txdescs 270 #define sc_rxdescs sc_control_data->wcd_rxdescs 271 272 #ifdef WM_EVENT_COUNTERS 273 /* Event counters. */ 274 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */ 275 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */ 276 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */ 277 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */ 278 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */ 279 struct evcnt sc_ev_rxintr; /* Rx interrupts */ 280 struct evcnt sc_ev_linkintr; /* Link interrupts */ 281 282 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */ 283 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */ 284 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */ 285 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */ 286 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */ 287 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound */ 288 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */ 289 290 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ 291 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */ 292 293 struct evcnt sc_ev_tu; /* Tx underrun */ 294 295 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ 296 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ 297 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ 298 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ 299 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ 300 #endif /* WM_EVENT_COUNTERS */ 301 302 bus_addr_t sc_tdt_reg; /* offset of TDT register */ 303 304 int sc_txfree; /* number of free Tx descriptors */ 305 int sc_txnext; /* next ready Tx descriptor */ 306 307 int sc_txsfree; /* number of free Tx jobs */ 308 int sc_txsnext; /* next free Tx job */ 309 int sc_txsdirty; /* dirty Tx jobs */ 310 311 /* These 5 variables are used only on the 82547. */ 312 int sc_txfifo_size; /* Tx FIFO size */ 313 int sc_txfifo_head; /* current head of FIFO */ 314 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */ 315 int sc_txfifo_stall; /* Tx FIFO is stalled */ 316 struct callout sc_txfifo_ch; /* Tx FIFO stall work-around timer */ 317 318 bus_addr_t sc_rdt_reg; /* offset of RDT register */ 319 320 int sc_rxptr; /* next ready Rx descriptor/queue ent */ 321 int sc_rxdiscard; 322 int sc_rxlen; 323 struct mbuf *sc_rxhead; 324 struct mbuf *sc_rxtail; 325 struct mbuf **sc_rxtailp; 326 327 uint32_t sc_ctrl; /* prototype CTRL register */ 328 #if 0 329 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ 330 #endif 331 uint32_t sc_icr; /* prototype interrupt bits */ 332 uint32_t sc_itr; /* prototype intr throttling reg */ 333 uint32_t sc_tctl; /* prototype TCTL register */ 334 uint32_t sc_rctl; /* prototype RCTL register */ 335 uint32_t sc_txcw; /* prototype TXCW register */ 336 uint32_t sc_tipg; /* prototype TIPG register */ 337 uint32_t sc_fcrtl; /* prototype FCRTL register */ 338 uint32_t sc_pba; /* prototype PBA register */ 339 340 int sc_tbi_linkup; /* TBI link status */ 341 int sc_tbi_anstate; /* autonegotiation state */ 342 343 int sc_mchash_type; /* multicast filter offset */ 344 345 #if NRND > 0 346 rndsource_element_t rnd_source; /* random source */ 347 #endif 348 }; 349 350 #define WM_RXCHAIN_RESET(sc) \ 351 do { \ 352 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \ 353 *(sc)->sc_rxtailp = NULL; \ 354 (sc)->sc_rxlen = 0; \ 355 } while (/*CONSTCOND*/0) 356 357 #define WM_RXCHAIN_LINK(sc, m) \ 358 do { \ 359 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \ 360 (sc)->sc_rxtailp = &(m)->m_next; \ 361 } while (/*CONSTCOND*/0) 362 363 /* sc_flags */ 364 #define WM_F_HAS_MII 0x001 /* has MII */ 365 #define WM_F_EEPROM_HANDSHAKE 0x002 /* requires EEPROM handshake */ 366 #define WM_F_EEPROM_SEMAPHORE 0x004 /* EEPROM with semaphore */ 367 #define WM_F_EEPROM_EERDEEWR 0x008 /* EEPROM access via EERD/EEWR */ 368 #define WM_F_EEPROM_SPI 0x010 /* EEPROM is SPI */ 369 #define WM_F_EEPROM_FLASH 0x020 /* EEPROM is FLASH */ 370 #define WM_F_EEPROM_INVALID 0x040 /* EEPROM not present (bad checksum) */ 371 #define WM_F_IOH_VALID 0x080 /* I/O handle is valid */ 372 #define WM_F_BUS64 0x100 /* bus is 64-bit */ 373 #define WM_F_PCIX 0x200 /* bus is PCI-X */ 374 #define WM_F_CSA 0x400 /* bus is CSA */ 375 #define WM_F_PCIE 0x800 /* bus is PCI-Express */ 376 377 #ifdef WM_EVENT_COUNTERS 378 #define WM_EVCNT_INCR(ev) (ev)->ev_count++ 379 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val) 380 #else 381 #define WM_EVCNT_INCR(ev) /* nothing */ 382 #define WM_EVCNT_ADD(ev, val) /* nothing */ 383 #endif 384 385 #define CSR_READ(sc, reg) \ 386 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) 387 #define CSR_WRITE(sc, reg, val) \ 388 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) 389 #define CSR_WRITE_FLUSH(sc) \ 390 (void) CSR_READ((sc), WMREG_STATUS) 391 392 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x))) 393 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x))) 394 395 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU) 396 #define WM_CDTXADDR_HI(sc, x) \ 397 (sizeof(bus_addr_t) == 8 ? \ 398 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0) 399 400 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU) 401 #define WM_CDRXADDR_HI(sc, x) \ 402 (sizeof(bus_addr_t) == 8 ? \ 403 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0) 404 405 #define WM_CDTXSYNC(sc, x, n, ops) \ 406 do { \ 407 int __x, __n; \ 408 \ 409 __x = (x); \ 410 __n = (n); \ 411 \ 412 /* If it will wrap around, sync to the end of the ring. */ \ 413 if ((__x + __n) > WM_NTXDESC(sc)) { \ 414 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 415 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \ 416 (WM_NTXDESC(sc) - __x), (ops)); \ 417 __n -= (WM_NTXDESC(sc) - __x); \ 418 __x = 0; \ 419 } \ 420 \ 421 /* Now sync whatever is left. */ \ 422 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 423 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \ 424 } while (/*CONSTCOND*/0) 425 426 #define WM_CDRXSYNC(sc, x, ops) \ 427 do { \ 428 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 429 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \ 430 } while (/*CONSTCOND*/0) 431 432 #define WM_INIT_RXDESC(sc, x) \ 433 do { \ 434 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 435 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \ 436 struct mbuf *__m = __rxs->rxs_mbuf; \ 437 \ 438 /* \ 439 * Note: We scoot the packet forward 2 bytes in the buffer \ 440 * so that the payload after the Ethernet header is aligned \ 441 * to a 4-byte boundary. \ 442 * \ 443 * XXX BRAINDAMAGE ALERT! \ 444 * The stupid chip uses the same size for every buffer, which \ 445 * is set in the Receive Control register. We are using the 2K \ 446 * size option, but what we REALLY want is (2K - 2)! For this \ 447 * reason, we can't "scoot" packets longer than the standard \ 448 * Ethernet MTU. On strict-alignment platforms, if the total \ 449 * size exceeds (2K - 2) we set align_tweak to 0 and let \ 450 * the upper layer copy the headers. \ 451 */ \ 452 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \ 453 \ 454 wm_set_dma_addr(&__rxd->wrx_addr, \ 455 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \ 456 __rxd->wrx_len = 0; \ 457 __rxd->wrx_cksum = 0; \ 458 __rxd->wrx_status = 0; \ 459 __rxd->wrx_errors = 0; \ 460 __rxd->wrx_special = 0; \ 461 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 462 \ 463 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \ 464 } while (/*CONSTCOND*/0) 465 466 static void wm_start(struct ifnet *); 467 static void wm_watchdog(struct ifnet *); 468 static int wm_ioctl(struct ifnet *, u_long, caddr_t); 469 static int wm_init(struct ifnet *); 470 static void wm_stop(struct ifnet *, int); 471 472 static void wm_shutdown(void *); 473 static void wm_powerhook(int, void *); 474 475 static void wm_reset(struct wm_softc *); 476 static void wm_rxdrain(struct wm_softc *); 477 static int wm_add_rxbuf(struct wm_softc *, int); 478 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *); 479 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *); 480 static int wm_validate_eeprom_checksum(struct wm_softc *); 481 static void wm_tick(void *); 482 483 static void wm_set_filter(struct wm_softc *); 484 485 static int wm_intr(void *); 486 static void wm_txintr(struct wm_softc *); 487 static void wm_rxintr(struct wm_softc *); 488 static void wm_linkintr(struct wm_softc *, uint32_t); 489 490 static void wm_tbi_mediainit(struct wm_softc *); 491 static int wm_tbi_mediachange(struct ifnet *); 492 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); 493 494 static void wm_tbi_set_linkled(struct wm_softc *); 495 static void wm_tbi_check_link(struct wm_softc *); 496 497 static void wm_gmii_reset(struct wm_softc *); 498 499 static int wm_gmii_i82543_readreg(struct device *, int, int); 500 static void wm_gmii_i82543_writereg(struct device *, int, int, int); 501 502 static int wm_gmii_i82544_readreg(struct device *, int, int); 503 static void wm_gmii_i82544_writereg(struct device *, int, int, int); 504 505 static void wm_gmii_statchg(struct device *); 506 507 static void wm_gmii_mediainit(struct wm_softc *); 508 static int wm_gmii_mediachange(struct ifnet *); 509 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); 510 511 static int wm_match(struct device *, struct cfdata *, void *); 512 static void wm_attach(struct device *, struct device *, void *); 513 static int wm_is_onboard_nvm_eeprom(struct wm_softc *); 514 static int wm_get_eeprom_semaphore(struct wm_softc *); 515 static void wm_put_eeprom_semaphore(struct wm_softc *); 516 static int wm_poll_eerd_eewr_done(struct wm_softc *, int); 517 518 CFATTACH_DECL(wm, sizeof(struct wm_softc), 519 wm_match, wm_attach, NULL, NULL); 520 521 static void wm_82547_txfifo_stall(void *); 522 523 /* 524 * Devices supported by this driver. 525 */ 526 static const struct wm_product { 527 pci_vendor_id_t wmp_vendor; 528 pci_product_id_t wmp_product; 529 const char *wmp_name; 530 wm_chip_type wmp_type; 531 int wmp_flags; 532 #define WMP_F_1000X 0x01 533 #define WMP_F_1000T 0x02 534 } wm_products[] = { 535 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, 536 "Intel i82542 1000BASE-X Ethernet", 537 WM_T_82542_2_1, WMP_F_1000X }, 538 539 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, 540 "Intel i82543GC 1000BASE-X Ethernet", 541 WM_T_82543, WMP_F_1000X }, 542 543 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, 544 "Intel i82543GC 1000BASE-T Ethernet", 545 WM_T_82543, WMP_F_1000T }, 546 547 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, 548 "Intel i82544EI 1000BASE-T Ethernet", 549 WM_T_82544, WMP_F_1000T }, 550 551 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, 552 "Intel i82544EI 1000BASE-X Ethernet", 553 WM_T_82544, WMP_F_1000X }, 554 555 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, 556 "Intel i82544GC 1000BASE-T Ethernet", 557 WM_T_82544, WMP_F_1000T }, 558 559 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, 560 "Intel i82544GC (LOM) 1000BASE-T Ethernet", 561 WM_T_82544, WMP_F_1000T }, 562 563 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, 564 "Intel i82540EM 1000BASE-T Ethernet", 565 WM_T_82540, WMP_F_1000T }, 566 567 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM, 568 "Intel i82540EM (LOM) 1000BASE-T Ethernet", 569 WM_T_82540, WMP_F_1000T }, 570 571 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM, 572 "Intel i82540EP 1000BASE-T Ethernet", 573 WM_T_82540, WMP_F_1000T }, 574 575 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP, 576 "Intel i82540EP 1000BASE-T Ethernet", 577 WM_T_82540, WMP_F_1000T }, 578 579 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP, 580 "Intel i82540EP 1000BASE-T Ethernet", 581 WM_T_82540, WMP_F_1000T }, 582 583 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER, 584 "Intel i82545EM 1000BASE-T Ethernet", 585 WM_T_82545, WMP_F_1000T }, 586 587 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER, 588 "Intel i82545GM 1000BASE-T Ethernet", 589 WM_T_82545_3, WMP_F_1000T }, 590 591 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER, 592 "Intel i82545GM 1000BASE-X Ethernet", 593 WM_T_82545_3, WMP_F_1000X }, 594 #if 0 595 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES, 596 "Intel i82545GM Gigabit Ethernet (SERDES)", 597 WM_T_82545_3, WMP_F_SERDES }, 598 #endif 599 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER, 600 "Intel i82546EB 1000BASE-T Ethernet", 601 WM_T_82546, WMP_F_1000T }, 602 603 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD, 604 "Intel i82546EB 1000BASE-T Ethernet", 605 WM_T_82546, WMP_F_1000T }, 606 607 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER, 608 "Intel i82545EM 1000BASE-X Ethernet", 609 WM_T_82545, WMP_F_1000X }, 610 611 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER, 612 "Intel i82546EB 1000BASE-X Ethernet", 613 WM_T_82546, WMP_F_1000X }, 614 615 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER, 616 "Intel i82546GB 1000BASE-T Ethernet", 617 WM_T_82546_3, WMP_F_1000T }, 618 619 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER, 620 "Intel i82546GB 1000BASE-X Ethernet", 621 WM_T_82546_3, WMP_F_1000X }, 622 #if 0 623 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES, 624 "Intel i82546GB Gigabit Ethernet (SERDES)", 625 WM_T_82546_3, WMP_F_SERDES }, 626 #endif 627 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE, 628 "Intel PRO/1000MT (82546GB)", 629 WM_T_82546_3, WMP_F_1000T }, 630 631 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI, 632 "Intel i82541EI 1000BASE-T Ethernet", 633 WM_T_82541, WMP_F_1000T }, 634 635 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM, 636 "Intel i82541ER (LOM) 1000BASE-T Ethernet", 637 WM_T_82541, WMP_F_1000T }, 638 639 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE, 640 "Intel i82541EI Mobile 1000BASE-T Ethernet", 641 WM_T_82541, WMP_F_1000T }, 642 643 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER, 644 "Intel i82541ER 1000BASE-T Ethernet", 645 WM_T_82541_2, WMP_F_1000T }, 646 647 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI, 648 "Intel i82541GI 1000BASE-T Ethernet", 649 WM_T_82541_2, WMP_F_1000T }, 650 651 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE, 652 "Intel i82541GI Mobile 1000BASE-T Ethernet", 653 WM_T_82541_2, WMP_F_1000T }, 654 655 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI, 656 "Intel i82541PI 1000BASE-T Ethernet", 657 WM_T_82541_2, WMP_F_1000T }, 658 659 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI, 660 "Intel i82547EI 1000BASE-T Ethernet", 661 WM_T_82547, WMP_F_1000T }, 662 663 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE, 664 "Intel i82547EI Moblie 1000BASE-T Ethernet", 665 WM_T_82547, WMP_F_1000T }, 666 667 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI, 668 "Intel i82547GI 1000BASE-T Ethernet", 669 WM_T_82547_2, WMP_F_1000T }, 670 671 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER, 672 "Intel PRO/1000 PT (82571EB)", 673 WM_T_82571, WMP_F_1000T }, 674 675 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER, 676 "Intel PRO/1000 PF (82571EB)", 677 WM_T_82571, WMP_F_1000X }, 678 #if 0 679 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES, 680 "Intel PRO/1000 PB (82571EB)", 681 WM_T_82571, WMP_F_SERDES }, 682 #endif 683 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER, 684 "Intel i82572EI 1000baseT Ethernet", 685 WM_T_82572, WMP_F_1000T }, 686 687 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER, 688 "Intel i82572EI 1000baseX Ethernet", 689 WM_T_82572, WMP_F_1000X }, 690 #if 0 691 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES, 692 "Intel i82572EI Gigabit Ethernet (SERDES)", 693 WM_T_82572, WMP_F_SERDES }, 694 #endif 695 696 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI, 697 "Intel i82572EI 1000baseT Ethernet", 698 WM_T_82572, WMP_F_1000T }, 699 700 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E, 701 "Intel i82573E", 702 WM_T_82573, WMP_F_1000T }, 703 704 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT, 705 "Intel i82573E IAMT", 706 WM_T_82573, WMP_F_1000T }, 707 708 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L, 709 "Intel i82573L Gigabit Ethernet", 710 WM_T_82573, WMP_F_1000T }, 711 712 { 0, 0, 713 NULL, 714 0, 0 }, 715 }; 716 717 #ifdef WM_EVENT_COUNTERS 718 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")]; 719 #endif /* WM_EVENT_COUNTERS */ 720 721 #if 0 /* Not currently used */ 722 static inline uint32_t 723 wm_io_read(struct wm_softc *sc, int reg) 724 { 725 726 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 727 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4)); 728 } 729 #endif 730 731 static inline void 732 wm_io_write(struct wm_softc *sc, int reg, uint32_t val) 733 { 734 735 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 736 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val); 737 } 738 739 static inline void 740 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v) 741 { 742 wa->wa_low = htole32(v & 0xffffffffU); 743 if (sizeof(bus_addr_t) == 8) 744 wa->wa_high = htole32((uint64_t) v >> 32); 745 else 746 wa->wa_high = 0; 747 } 748 749 static const struct wm_product * 750 wm_lookup(const struct pci_attach_args *pa) 751 { 752 const struct wm_product *wmp; 753 754 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) { 755 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor && 756 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product) 757 return (wmp); 758 } 759 return (NULL); 760 } 761 762 static int 763 wm_match(struct device *parent, struct cfdata *cf, void *aux) 764 { 765 struct pci_attach_args *pa = aux; 766 767 if (wm_lookup(pa) != NULL) 768 return (1); 769 770 return (0); 771 } 772 773 static void 774 wm_attach(struct device *parent, struct device *self, void *aux) 775 { 776 struct wm_softc *sc = (void *) self; 777 struct pci_attach_args *pa = aux; 778 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 779 pci_chipset_tag_t pc = pa->pa_pc; 780 pci_intr_handle_t ih; 781 size_t cdata_size; 782 const char *intrstr = NULL; 783 const char *eetype; 784 bus_space_tag_t memt; 785 bus_space_handle_t memh; 786 bus_dma_segment_t seg; 787 int memh_valid; 788 int i, rseg, error; 789 const struct wm_product *wmp; 790 prop_data_t ea; 791 prop_number_t pn; 792 uint8_t enaddr[ETHER_ADDR_LEN]; 793 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin; 794 pcireg_t preg, memtype; 795 uint32_t reg; 796 797 callout_init(&sc->sc_tick_ch); 798 799 wmp = wm_lookup(pa); 800 if (wmp == NULL) { 801 printf("\n"); 802 panic("wm_attach: impossible"); 803 } 804 805 sc->sc_pc = pa->pa_pc; 806 sc->sc_pcitag = pa->pa_tag; 807 808 if (pci_dma64_available(pa)) 809 sc->sc_dmat = pa->pa_dmat64; 810 else 811 sc->sc_dmat = pa->pa_dmat; 812 813 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG)); 814 aprint_naive(": Ethernet controller\n"); 815 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg); 816 817 sc->sc_type = wmp->wmp_type; 818 if (sc->sc_type < WM_T_82543) { 819 if (preg < 2) { 820 aprint_error("%s: i82542 must be at least rev. 2\n", 821 sc->sc_dev.dv_xname); 822 return; 823 } 824 if (preg < 3) 825 sc->sc_type = WM_T_82542_2_0; 826 } 827 828 /* 829 * Map the device. All devices support memory-mapped acccess, 830 * and it is really required for normal operation. 831 */ 832 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA); 833 switch (memtype) { 834 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 835 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 836 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA, 837 memtype, 0, &memt, &memh, NULL, NULL) == 0); 838 break; 839 default: 840 memh_valid = 0; 841 } 842 843 if (memh_valid) { 844 sc->sc_st = memt; 845 sc->sc_sh = memh; 846 } else { 847 aprint_error("%s: unable to map device registers\n", 848 sc->sc_dev.dv_xname); 849 return; 850 } 851 852 /* 853 * In addition, i82544 and later support I/O mapped indirect 854 * register access. It is not desirable (nor supported in 855 * this driver) to use it for normal operation, though it is 856 * required to work around bugs in some chip versions. 857 */ 858 if (sc->sc_type >= WM_T_82544) { 859 /* First we have to find the I/O BAR. */ 860 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) { 861 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) == 862 PCI_MAPREG_TYPE_IO) 863 break; 864 } 865 if (i == PCI_MAPREG_END) 866 aprint_error("%s: WARNING: unable to find I/O BAR\n", 867 sc->sc_dev.dv_xname); 868 else { 869 /* 870 * The i8254x doesn't apparently respond when the 871 * I/O BAR is 0, which looks somewhat like it's not 872 * been configured. 873 */ 874 preg = pci_conf_read(pc, pa->pa_tag, i); 875 if (PCI_MAPREG_MEM_ADDR(preg) == 0) { 876 aprint_error("%s: WARNING: I/O BAR at zero.\n", 877 sc->sc_dev.dv_xname); 878 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO, 879 0, &sc->sc_iot, &sc->sc_ioh, 880 NULL, NULL) == 0) { 881 sc->sc_flags |= WM_F_IOH_VALID; 882 } else { 883 aprint_error("%s: WARNING: unable to map " 884 "I/O space\n", sc->sc_dev.dv_xname); 885 } 886 } 887 888 } 889 890 /* Enable bus mastering. Disable MWI on the i82542 2.0. */ 891 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 892 preg |= PCI_COMMAND_MASTER_ENABLE; 893 if (sc->sc_type < WM_T_82542_2_1) 894 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE; 895 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg); 896 897 /* power up chip */ 898 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, sc, 899 NULL)) && error != EOPNOTSUPP) { 900 aprint_error("%s: cannot activate %d\n", sc->sc_dev.dv_xname, 901 error); 902 return; 903 } 904 905 /* 906 * Map and establish our interrupt. 907 */ 908 if (pci_intr_map(pa, &ih)) { 909 aprint_error("%s: unable to map interrupt\n", 910 sc->sc_dev.dv_xname); 911 return; 912 } 913 intrstr = pci_intr_string(pc, ih); 914 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc); 915 if (sc->sc_ih == NULL) { 916 aprint_error("%s: unable to establish interrupt", 917 sc->sc_dev.dv_xname); 918 if (intrstr != NULL) 919 aprint_normal(" at %s", intrstr); 920 aprint_normal("\n"); 921 return; 922 } 923 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr); 924 925 /* 926 * Determine a few things about the bus we're connected to. 927 */ 928 if (sc->sc_type < WM_T_82543) { 929 /* We don't really know the bus characteristics here. */ 930 sc->sc_bus_speed = 33; 931 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) { 932 /* 933 * CSA (Communication Streaming Architecture) is about as fast 934 * a 32-bit 66MHz PCI Bus. 935 */ 936 sc->sc_flags |= WM_F_CSA; 937 sc->sc_bus_speed = 66; 938 aprint_verbose("%s: Communication Streaming Architecture\n", 939 sc->sc_dev.dv_xname); 940 if (sc->sc_type == WM_T_82547) { 941 callout_init(&sc->sc_txfifo_ch); 942 callout_setfunc(&sc->sc_txfifo_ch, 943 wm_82547_txfifo_stall, sc); 944 aprint_verbose("%s: using 82547 Tx FIFO stall " 945 "work-around\n", sc->sc_dev.dv_xname); 946 } 947 } else if (sc->sc_type >= WM_T_82571) { 948 sc->sc_flags |= WM_F_PCIE | WM_F_EEPROM_SEMAPHORE; 949 aprint_verbose("%s: PCI-Express bus\n", sc->sc_dev.dv_xname); 950 } else { 951 reg = CSR_READ(sc, WMREG_STATUS); 952 if (reg & STATUS_BUS64) 953 sc->sc_flags |= WM_F_BUS64; 954 if (sc->sc_type >= WM_T_82544 && 955 (reg & STATUS_PCIX_MODE) != 0) { 956 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb; 957 958 sc->sc_flags |= WM_F_PCIX; 959 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 960 PCI_CAP_PCIX, 961 &sc->sc_pcix_offset, NULL) == 0) 962 aprint_error("%s: unable to find PCIX " 963 "capability\n", sc->sc_dev.dv_xname); 964 else if (sc->sc_type != WM_T_82545_3 && 965 sc->sc_type != WM_T_82546_3) { 966 /* 967 * Work around a problem caused by the BIOS 968 * setting the max memory read byte count 969 * incorrectly. 970 */ 971 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, 972 sc->sc_pcix_offset + PCI_PCIX_CMD); 973 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag, 974 sc->sc_pcix_offset + PCI_PCIX_STATUS); 975 976 bytecnt = 977 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >> 978 PCI_PCIX_CMD_BYTECNT_SHIFT; 979 maxb = 980 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >> 981 PCI_PCIX_STATUS_MAXB_SHIFT; 982 if (bytecnt > maxb) { 983 aprint_verbose("%s: resetting PCI-X " 984 "MMRBC: %d -> %d\n", 985 sc->sc_dev.dv_xname, 986 512 << bytecnt, 512 << maxb); 987 pcix_cmd = (pcix_cmd & 988 ~PCI_PCIX_CMD_BYTECNT_MASK) | 989 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT); 990 pci_conf_write(pa->pa_pc, pa->pa_tag, 991 sc->sc_pcix_offset + PCI_PCIX_CMD, 992 pcix_cmd); 993 } 994 } 995 } 996 /* 997 * The quad port adapter is special; it has a PCIX-PCIX 998 * bridge on the board, and can run the secondary bus at 999 * a higher speed. 1000 */ 1001 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) { 1002 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120 1003 : 66; 1004 } else if (sc->sc_flags & WM_F_PCIX) { 1005 switch (reg & STATUS_PCIXSPD_MASK) { 1006 case STATUS_PCIXSPD_50_66: 1007 sc->sc_bus_speed = 66; 1008 break; 1009 case STATUS_PCIXSPD_66_100: 1010 sc->sc_bus_speed = 100; 1011 break; 1012 case STATUS_PCIXSPD_100_133: 1013 sc->sc_bus_speed = 133; 1014 break; 1015 default: 1016 aprint_error( 1017 "%s: unknown PCIXSPD %d; assuming 66MHz\n", 1018 sc->sc_dev.dv_xname, 1019 reg & STATUS_PCIXSPD_MASK); 1020 sc->sc_bus_speed = 66; 1021 } 1022 } else 1023 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33; 1024 aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname, 1025 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed, 1026 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI"); 1027 } 1028 1029 /* 1030 * Allocate the control data structures, and create and load the 1031 * DMA map for it. 1032 * 1033 * NOTE: All Tx descriptors must be in the same 4G segment of 1034 * memory. So must Rx descriptors. We simplify by allocating 1035 * both sets within the same 4G segment. 1036 */ 1037 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ? 1038 WM_NTXDESC_82542 : WM_NTXDESC_82544; 1039 cdata_size = sc->sc_type < WM_T_82544 ? 1040 sizeof(struct wm_control_data_82542) : 1041 sizeof(struct wm_control_data_82544); 1042 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE, 1043 (bus_size_t) 0x100000000ULL, 1044 &seg, 1, &rseg, 0)) != 0) { 1045 aprint_error( 1046 "%s: unable to allocate control data, error = %d\n", 1047 sc->sc_dev.dv_xname, error); 1048 goto fail_0; 1049 } 1050 1051 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size, 1052 (caddr_t *)&sc->sc_control_data, 0)) != 0) { 1053 aprint_error("%s: unable to map control data, error = %d\n", 1054 sc->sc_dev.dv_xname, error); 1055 goto fail_1; 1056 } 1057 1058 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size, 1059 0, 0, &sc->sc_cddmamap)) != 0) { 1060 aprint_error("%s: unable to create control data DMA map, " 1061 "error = %d\n", sc->sc_dev.dv_xname, error); 1062 goto fail_2; 1063 } 1064 1065 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 1066 sc->sc_control_data, cdata_size, NULL, 1067 0)) != 0) { 1068 aprint_error( 1069 "%s: unable to load control data DMA map, error = %d\n", 1070 sc->sc_dev.dv_xname, error); 1071 goto fail_3; 1072 } 1073 1074 1075 /* 1076 * Create the transmit buffer DMA maps. 1077 */ 1078 WM_TXQUEUELEN(sc) = 1079 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ? 1080 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX; 1081 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 1082 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA, 1083 WM_NTXSEGS, WTX_MAX_LEN, 0, 0, 1084 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 1085 aprint_error("%s: unable to create Tx DMA map %d, " 1086 "error = %d\n", sc->sc_dev.dv_xname, i, error); 1087 goto fail_4; 1088 } 1089 } 1090 1091 /* 1092 * Create the receive buffer DMA maps. 1093 */ 1094 for (i = 0; i < WM_NRXDESC; i++) { 1095 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1096 MCLBYTES, 0, 0, 1097 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 1098 aprint_error("%s: unable to create Rx DMA map %d, " 1099 "error = %d\n", sc->sc_dev.dv_xname, i, error); 1100 goto fail_5; 1101 } 1102 sc->sc_rxsoft[i].rxs_mbuf = NULL; 1103 } 1104 1105 /* 1106 * Reset the chip to a known state. 1107 */ 1108 wm_reset(sc); 1109 1110 /* 1111 * Get some information about the EEPROM. 1112 */ 1113 if (sc->sc_type == WM_T_82573) 1114 sc->sc_flags |= WM_F_EEPROM_EERDEEWR; 1115 else if (sc->sc_type > WM_T_82544) 1116 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE; 1117 1118 if (sc->sc_type <= WM_T_82544) 1119 sc->sc_ee_addrbits = 6; 1120 else if (sc->sc_type <= WM_T_82546_3) { 1121 reg = CSR_READ(sc, WMREG_EECD); 1122 if (reg & EECD_EE_SIZE) 1123 sc->sc_ee_addrbits = 8; 1124 else 1125 sc->sc_ee_addrbits = 6; 1126 } else if (sc->sc_type <= WM_T_82547_2) { 1127 reg = CSR_READ(sc, WMREG_EECD); 1128 if (reg & EECD_EE_TYPE) { 1129 sc->sc_flags |= WM_F_EEPROM_SPI; 1130 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8; 1131 } else 1132 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6; 1133 } else if ((sc->sc_type == WM_T_82573) && 1134 (wm_is_onboard_nvm_eeprom(sc) == 0)) { 1135 sc->sc_flags |= WM_F_EEPROM_FLASH; 1136 } else { 1137 /* Assume everything else is SPI. */ 1138 reg = CSR_READ(sc, WMREG_EECD); 1139 sc->sc_flags |= WM_F_EEPROM_SPI; 1140 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8; 1141 } 1142 1143 /* 1144 * Defer printing the EEPROM type until after verifying the checksum 1145 * This allows the EEPROM type to be printed correctly in the case 1146 * that no EEPROM is attached. 1147 */ 1148 1149 1150 /* 1151 * Validate the EEPROM checksum. If the checksum fails, flag this for 1152 * later, so we can fail future reads from the EEPROM. 1153 */ 1154 if (wm_validate_eeprom_checksum(sc)) 1155 sc->sc_flags |= WM_F_EEPROM_INVALID; 1156 1157 if (sc->sc_flags & WM_F_EEPROM_INVALID) 1158 aprint_verbose("%s: No EEPROM\n", sc->sc_dev.dv_xname); 1159 else if (sc->sc_flags & WM_F_EEPROM_FLASH) { 1160 aprint_verbose("%s: FLASH\n", sc->sc_dev.dv_xname); 1161 } else { 1162 if (sc->sc_flags & WM_F_EEPROM_SPI) 1163 eetype = "SPI"; 1164 else 1165 eetype = "MicroWire"; 1166 aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n", 1167 sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits, 1168 sc->sc_ee_addrbits, eetype); 1169 } 1170 1171 /* 1172 * Read the Ethernet address from the EEPROM, if not first found 1173 * in device properties. 1174 */ 1175 ea = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-addr"); 1176 if (ea != NULL) { 1177 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 1178 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 1179 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN); 1180 } else { 1181 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR, 1182 sizeof(myea) / sizeof(myea[0]), myea)) { 1183 aprint_error("%s: unable to read Ethernet address\n", 1184 sc->sc_dev.dv_xname); 1185 return; 1186 } 1187 enaddr[0] = myea[0] & 0xff; 1188 enaddr[1] = myea[0] >> 8; 1189 enaddr[2] = myea[1] & 0xff; 1190 enaddr[3] = myea[1] >> 8; 1191 enaddr[4] = myea[2] & 0xff; 1192 enaddr[5] = myea[2] >> 8; 1193 } 1194 1195 /* 1196 * Toggle the LSB of the MAC address on the second port 1197 * of the dual port controller. 1198 */ 1199 if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3 1200 || sc->sc_type == WM_T_82571) { 1201 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1) 1202 enaddr[5] ^= 1; 1203 } 1204 1205 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname, 1206 ether_sprintf(enaddr)); 1207 1208 /* 1209 * Read the config info from the EEPROM, and set up various 1210 * bits in the control registers based on their contents. 1211 */ 1212 pn = prop_dictionary_get(device_properties(&sc->sc_dev), 1213 "i82543-cfg1"); 1214 if (pn != NULL) { 1215 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1216 cfg1 = (uint16_t) prop_number_integer_value(pn); 1217 } else { 1218 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) { 1219 aprint_error("%s: unable to read CFG1\n", 1220 sc->sc_dev.dv_xname); 1221 return; 1222 } 1223 } 1224 1225 pn = prop_dictionary_get(device_properties(&sc->sc_dev), 1226 "i82543-cfg2"); 1227 if (pn != NULL) { 1228 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1229 cfg2 = (uint16_t) prop_number_integer_value(pn); 1230 } else { 1231 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) { 1232 aprint_error("%s: unable to read CFG2\n", 1233 sc->sc_dev.dv_xname); 1234 return; 1235 } 1236 } 1237 1238 if (sc->sc_type >= WM_T_82544) { 1239 pn = prop_dictionary_get(device_properties(&sc->sc_dev), 1240 "i82543-swdpin"); 1241 if (pn != NULL) { 1242 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1243 swdpin = (uint16_t) prop_number_integer_value(pn); 1244 } else { 1245 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) { 1246 aprint_error("%s: unable to read SWDPIN\n", 1247 sc->sc_dev.dv_xname); 1248 return; 1249 } 1250 } 1251 } 1252 1253 if (cfg1 & EEPROM_CFG1_ILOS) 1254 sc->sc_ctrl |= CTRL_ILOS; 1255 if (sc->sc_type >= WM_T_82544) { 1256 sc->sc_ctrl |= 1257 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) << 1258 CTRL_SWDPIO_SHIFT; 1259 sc->sc_ctrl |= 1260 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) << 1261 CTRL_SWDPINS_SHIFT; 1262 } else { 1263 sc->sc_ctrl |= 1264 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) << 1265 CTRL_SWDPIO_SHIFT; 1266 } 1267 1268 #if 0 1269 if (sc->sc_type >= WM_T_82544) { 1270 if (cfg1 & EEPROM_CFG1_IPS0) 1271 sc->sc_ctrl_ext |= CTRL_EXT_IPS; 1272 if (cfg1 & EEPROM_CFG1_IPS1) 1273 sc->sc_ctrl_ext |= CTRL_EXT_IPS1; 1274 sc->sc_ctrl_ext |= 1275 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) << 1276 CTRL_EXT_SWDPIO_SHIFT; 1277 sc->sc_ctrl_ext |= 1278 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) << 1279 CTRL_EXT_SWDPINS_SHIFT; 1280 } else { 1281 sc->sc_ctrl_ext |= 1282 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) << 1283 CTRL_EXT_SWDPIO_SHIFT; 1284 } 1285 #endif 1286 1287 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 1288 #if 0 1289 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 1290 #endif 1291 1292 /* 1293 * Set up some register offsets that are different between 1294 * the i82542 and the i82543 and later chips. 1295 */ 1296 if (sc->sc_type < WM_T_82543) { 1297 sc->sc_rdt_reg = WMREG_OLD_RDT0; 1298 sc->sc_tdt_reg = WMREG_OLD_TDT; 1299 } else { 1300 sc->sc_rdt_reg = WMREG_RDT; 1301 sc->sc_tdt_reg = WMREG_TDT; 1302 } 1303 1304 /* 1305 * Determine if we're TBI or GMII mode, and initialize the 1306 * media structures accordingly. 1307 */ 1308 if (sc->sc_type < WM_T_82543 || 1309 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) { 1310 if (wmp->wmp_flags & WMP_F_1000T) 1311 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T " 1312 "product!\n", sc->sc_dev.dv_xname); 1313 wm_tbi_mediainit(sc); 1314 } else { 1315 if (wmp->wmp_flags & WMP_F_1000X) 1316 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X " 1317 "product!\n", sc->sc_dev.dv_xname); 1318 wm_gmii_mediainit(sc); 1319 } 1320 1321 ifp = &sc->sc_ethercom.ec_if; 1322 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 1323 ifp->if_softc = sc; 1324 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1325 ifp->if_ioctl = wm_ioctl; 1326 ifp->if_start = wm_start; 1327 ifp->if_watchdog = wm_watchdog; 1328 ifp->if_init = wm_init; 1329 ifp->if_stop = wm_stop; 1330 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN)); 1331 IFQ_SET_READY(&ifp->if_snd); 1332 1333 if (sc->sc_type != WM_T_82573) 1334 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1335 1336 /* 1337 * If we're a i82543 or greater, we can support VLANs. 1338 */ 1339 if (sc->sc_type >= WM_T_82543) 1340 sc->sc_ethercom.ec_capabilities |= 1341 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */; 1342 1343 /* 1344 * We can perform TCPv4 and UDPv4 checkums in-bound. Only 1345 * on i82543 and later. 1346 */ 1347 if (sc->sc_type >= WM_T_82543) 1348 ifp->if_capabilities |= 1349 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 1350 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 1351 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx | 1352 IFCAP_CSUM_TCPv6_Tx | 1353 IFCAP_CSUM_UDPv6_Tx; 1354 1355 /* 1356 * If we're a i82544 or greater (except i82547), we can do 1357 * TCP segmentation offload. 1358 */ 1359 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) 1360 ifp->if_capabilities |= IFCAP_TSOv4; 1361 1362 /* 1363 * Attach the interface. 1364 */ 1365 if_attach(ifp); 1366 ether_ifattach(ifp, enaddr); 1367 #if NRND > 0 1368 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname, 1369 RND_TYPE_NET, 0); 1370 #endif 1371 1372 #ifdef WM_EVENT_COUNTERS 1373 /* Attach event counters. */ 1374 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC, 1375 NULL, sc->sc_dev.dv_xname, "txsstall"); 1376 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC, 1377 NULL, sc->sc_dev.dv_xname, "txdstall"); 1378 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC, 1379 NULL, sc->sc_dev.dv_xname, "txfifo_stall"); 1380 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR, 1381 NULL, sc->sc_dev.dv_xname, "txdw"); 1382 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR, 1383 NULL, sc->sc_dev.dv_xname, "txqe"); 1384 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR, 1385 NULL, sc->sc_dev.dv_xname, "rxintr"); 1386 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR, 1387 NULL, sc->sc_dev.dv_xname, "linkintr"); 1388 1389 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC, 1390 NULL, sc->sc_dev.dv_xname, "rxipsum"); 1391 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC, 1392 NULL, sc->sc_dev.dv_xname, "rxtusum"); 1393 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC, 1394 NULL, sc->sc_dev.dv_xname, "txipsum"); 1395 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC, 1396 NULL, sc->sc_dev.dv_xname, "txtusum"); 1397 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC, 1398 NULL, sc->sc_dev.dv_xname, "txtusum6"); 1399 1400 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC, 1401 NULL, sc->sc_dev.dv_xname, "txtso"); 1402 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC, 1403 NULL, sc->sc_dev.dv_xname, "txtsopain"); 1404 1405 for (i = 0; i < WM_NTXSEGS; i++) { 1406 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i); 1407 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC, 1408 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]); 1409 } 1410 1411 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC, 1412 NULL, sc->sc_dev.dv_xname, "txdrop"); 1413 1414 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC, 1415 NULL, sc->sc_dev.dv_xname, "tu"); 1416 1417 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC, 1418 NULL, sc->sc_dev.dv_xname, "tx_xoff"); 1419 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC, 1420 NULL, sc->sc_dev.dv_xname, "tx_xon"); 1421 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC, 1422 NULL, sc->sc_dev.dv_xname, "rx_xoff"); 1423 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC, 1424 NULL, sc->sc_dev.dv_xname, "rx_xon"); 1425 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC, 1426 NULL, sc->sc_dev.dv_xname, "rx_macctl"); 1427 #endif /* WM_EVENT_COUNTERS */ 1428 1429 /* 1430 * Make sure the interface is shutdown during reboot. 1431 */ 1432 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc); 1433 if (sc->sc_sdhook == NULL) 1434 aprint_error("%s: WARNING: unable to establish shutdown hook\n", 1435 sc->sc_dev.dv_xname); 1436 1437 sc->sc_powerhook = powerhook_establish(wm_powerhook, sc); 1438 if (sc->sc_powerhook == NULL) 1439 aprint_error("%s: can't establish powerhook\n", 1440 sc->sc_dev.dv_xname); 1441 return; 1442 1443 /* 1444 * Free any resources we've allocated during the failed attach 1445 * attempt. Do this in reverse order and fall through. 1446 */ 1447 fail_5: 1448 for (i = 0; i < WM_NRXDESC; i++) { 1449 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 1450 bus_dmamap_destroy(sc->sc_dmat, 1451 sc->sc_rxsoft[i].rxs_dmamap); 1452 } 1453 fail_4: 1454 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 1455 if (sc->sc_txsoft[i].txs_dmamap != NULL) 1456 bus_dmamap_destroy(sc->sc_dmat, 1457 sc->sc_txsoft[i].txs_dmamap); 1458 } 1459 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 1460 fail_3: 1461 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 1462 fail_2: 1463 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data, 1464 cdata_size); 1465 fail_1: 1466 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1467 fail_0: 1468 return; 1469 } 1470 1471 /* 1472 * wm_shutdown: 1473 * 1474 * Make sure the interface is stopped at reboot time. 1475 */ 1476 static void 1477 wm_shutdown(void *arg) 1478 { 1479 struct wm_softc *sc = arg; 1480 1481 wm_stop(&sc->sc_ethercom.ec_if, 1); 1482 } 1483 1484 static void 1485 wm_powerhook(int why, void *arg) 1486 { 1487 struct wm_softc *sc = arg; 1488 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1489 pci_chipset_tag_t pc = sc->sc_pc; 1490 pcitag_t tag = sc->sc_pcitag; 1491 1492 switch (why) { 1493 case PWR_SOFTSUSPEND: 1494 wm_shutdown(sc); 1495 break; 1496 case PWR_SOFTRESUME: 1497 ifp->if_flags &= ~IFF_RUNNING; 1498 wm_init(ifp); 1499 if (ifp->if_flags & IFF_RUNNING) 1500 wm_start(ifp); 1501 break; 1502 case PWR_SUSPEND: 1503 pci_conf_capture(pc, tag, &sc->sc_pciconf); 1504 break; 1505 case PWR_RESUME: 1506 pci_conf_restore(pc, tag, &sc->sc_pciconf); 1507 break; 1508 } 1509 1510 return; 1511 } 1512 1513 /* 1514 * wm_tx_offload: 1515 * 1516 * Set up TCP/IP checksumming parameters for the 1517 * specified packet. 1518 */ 1519 static int 1520 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp, 1521 uint8_t *fieldsp) 1522 { 1523 struct mbuf *m0 = txs->txs_mbuf; 1524 struct livengood_tcpip_ctxdesc *t; 1525 uint32_t ipcs, tucs, cmd, cmdlen, seg; 1526 struct ether_header *eh; 1527 int offset, iphl; 1528 uint8_t fields; 1529 1530 /* 1531 * XXX It would be nice if the mbuf pkthdr had offset 1532 * fields for the protocol headers. 1533 */ 1534 1535 eh = mtod(m0, struct ether_header *); 1536 switch (htons(eh->ether_type)) { 1537 case ETHERTYPE_IP: 1538 case ETHERTYPE_IPV6: 1539 offset = ETHER_HDR_LEN; 1540 break; 1541 1542 case ETHERTYPE_VLAN: 1543 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1544 break; 1545 1546 default: 1547 /* 1548 * Don't support this protocol or encapsulation. 1549 */ 1550 *fieldsp = 0; 1551 *cmdp = 0; 1552 return (0); 1553 } 1554 1555 if ((m0->m_pkthdr.csum_flags & 1556 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) { 1557 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 1558 } else { 1559 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data); 1560 } 1561 1562 cmd = WTX_CMD_DEXT | WTX_DTYP_D; 1563 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE; 1564 seg = 0; 1565 fields = 0; 1566 1567 if (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) { 1568 int hlen = offset + iphl; 1569 WM_EVCNT_INCR(&sc->sc_ev_txtso); 1570 if (__predict_false(m0->m_len < 1571 (hlen + sizeof(struct tcphdr)))) { 1572 /* 1573 * TCP/IP headers are not in the first mbuf; we need 1574 * to do this the slow and painful way. Let's just 1575 * hope this doesn't happen very often. 1576 */ 1577 struct ip ip; 1578 struct tcphdr th; 1579 1580 WM_EVCNT_INCR(&sc->sc_ev_txtsopain); 1581 1582 m_copydata(m0, offset, sizeof(ip), &ip); 1583 m_copydata(m0, hlen, sizeof(th), &th); 1584 1585 ip.ip_len = 0; 1586 1587 m_copyback(m0, hlen + offsetof(struct ip, ip_len), 1588 sizeof(ip.ip_len), &ip.ip_len); 1589 1590 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 1591 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 1592 1593 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 1594 sizeof(th.th_sum), &th.th_sum); 1595 1596 hlen += th.th_off << 2; 1597 } else { 1598 /* 1599 * TCP/IP headers are in the first mbuf; we can do 1600 * this the easy way. 1601 */ 1602 struct ip *ip = 1603 (struct ip *) (mtod(m0, caddr_t) + offset); 1604 struct tcphdr *th = 1605 (struct tcphdr *) (mtod(m0, caddr_t) + hlen); 1606 1607 ip->ip_len = 0; 1608 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 1609 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 1610 1611 hlen += th->th_off << 2; 1612 } 1613 1614 cmd |= WTX_TCPIP_CMD_TSE; 1615 cmdlen |= WTX_TCPIP_CMD_TSE | WTX_TCPIP_CMD_IP | 1616 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen); 1617 seg = WTX_TCPIP_SEG_HDRLEN(hlen) | 1618 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz); 1619 } 1620 1621 /* 1622 * NOTE: Even if we're not using the IP or TCP/UDP checksum 1623 * offload feature, if we load the context descriptor, we 1624 * MUST provide valid values for IPCSS and TUCSS fields. 1625 */ 1626 1627 ipcs = WTX_TCPIP_IPCSS(offset) | 1628 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) | 1629 WTX_TCPIP_IPCSE(offset + iphl - 1); 1630 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) { 1631 WM_EVCNT_INCR(&sc->sc_ev_txipsum); 1632 fields |= WTX_IXSM; 1633 } 1634 1635 offset += iphl; 1636 1637 if (m0->m_pkthdr.csum_flags & 1638 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) { 1639 WM_EVCNT_INCR(&sc->sc_ev_txtusum); 1640 fields |= WTX_TXSM; 1641 tucs = WTX_TCPIP_TUCSS(offset) | 1642 WTX_TCPIP_TUCSO(offset + 1643 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) | 1644 WTX_TCPIP_TUCSE(0) /* rest of packet */; 1645 } else if ((m0->m_pkthdr.csum_flags & 1646 (M_CSUM_TCPv6|M_CSUM_UDPv6)) != 0) { 1647 WM_EVCNT_INCR(&sc->sc_ev_txtusum6); 1648 fields |= WTX_TXSM; 1649 tucs = WTX_TCPIP_TUCSS(offset) | 1650 WTX_TCPIP_TUCSO(offset + 1651 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) | 1652 WTX_TCPIP_TUCSE(0) /* rest of packet */; 1653 } else { 1654 /* Just initialize it to a valid TCP context. */ 1655 tucs = WTX_TCPIP_TUCSS(offset) | 1656 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) | 1657 WTX_TCPIP_TUCSE(0) /* rest of packet */; 1658 } 1659 1660 /* Fill in the context descriptor. */ 1661 t = (struct livengood_tcpip_ctxdesc *) 1662 &sc->sc_txdescs[sc->sc_txnext]; 1663 t->tcpip_ipcs = htole32(ipcs); 1664 t->tcpip_tucs = htole32(tucs); 1665 t->tcpip_cmdlen = htole32(cmdlen); 1666 t->tcpip_seg = htole32(seg); 1667 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE); 1668 1669 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext); 1670 txs->txs_ndesc++; 1671 1672 *cmdp = cmd; 1673 *fieldsp = fields; 1674 1675 return (0); 1676 } 1677 1678 static void 1679 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0) 1680 { 1681 struct mbuf *m; 1682 int i; 1683 1684 log(LOG_DEBUG, "%s: mbuf chain:\n", sc->sc_dev.dv_xname); 1685 for (m = m0, i = 0; m != NULL; m = m->m_next, i++) 1686 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, " 1687 "m_flags = 0x%08x\n", sc->sc_dev.dv_xname, 1688 m->m_data, m->m_len, m->m_flags); 1689 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", sc->sc_dev.dv_xname, 1690 i, i == 1 ? "" : "s"); 1691 } 1692 1693 /* 1694 * wm_82547_txfifo_stall: 1695 * 1696 * Callout used to wait for the 82547 Tx FIFO to drain, 1697 * reset the FIFO pointers, and restart packet transmission. 1698 */ 1699 static void 1700 wm_82547_txfifo_stall(void *arg) 1701 { 1702 struct wm_softc *sc = arg; 1703 int s; 1704 1705 s = splnet(); 1706 1707 if (sc->sc_txfifo_stall) { 1708 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) && 1709 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) && 1710 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) { 1711 /* 1712 * Packets have drained. Stop transmitter, reset 1713 * FIFO pointers, restart transmitter, and kick 1714 * the packet queue. 1715 */ 1716 uint32_t tctl = CSR_READ(sc, WMREG_TCTL); 1717 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN); 1718 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr); 1719 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr); 1720 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr); 1721 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr); 1722 CSR_WRITE(sc, WMREG_TCTL, tctl); 1723 CSR_WRITE_FLUSH(sc); 1724 1725 sc->sc_txfifo_head = 0; 1726 sc->sc_txfifo_stall = 0; 1727 wm_start(&sc->sc_ethercom.ec_if); 1728 } else { 1729 /* 1730 * Still waiting for packets to drain; try again in 1731 * another tick. 1732 */ 1733 callout_schedule(&sc->sc_txfifo_ch, 1); 1734 } 1735 } 1736 1737 splx(s); 1738 } 1739 1740 /* 1741 * wm_82547_txfifo_bugchk: 1742 * 1743 * Check for bug condition in the 82547 Tx FIFO. We need to 1744 * prevent enqueueing a packet that would wrap around the end 1745 * if the Tx FIFO ring buffer, otherwise the chip will croak. 1746 * 1747 * We do this by checking the amount of space before the end 1748 * of the Tx FIFO buffer. If the packet will not fit, we "stall" 1749 * the Tx FIFO, wait for all remaining packets to drain, reset 1750 * the internal FIFO pointers to the beginning, and restart 1751 * transmission on the interface. 1752 */ 1753 #define WM_FIFO_HDR 0x10 1754 #define WM_82547_PAD_LEN 0x3e0 1755 static int 1756 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0) 1757 { 1758 int space = sc->sc_txfifo_size - sc->sc_txfifo_head; 1759 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR); 1760 1761 /* Just return if already stalled. */ 1762 if (sc->sc_txfifo_stall) 1763 return (1); 1764 1765 if (sc->sc_mii.mii_media_active & IFM_FDX) { 1766 /* Stall only occurs in half-duplex mode. */ 1767 goto send_packet; 1768 } 1769 1770 if (len >= WM_82547_PAD_LEN + space) { 1771 sc->sc_txfifo_stall = 1; 1772 callout_schedule(&sc->sc_txfifo_ch, 1); 1773 return (1); 1774 } 1775 1776 send_packet: 1777 sc->sc_txfifo_head += len; 1778 if (sc->sc_txfifo_head >= sc->sc_txfifo_size) 1779 sc->sc_txfifo_head -= sc->sc_txfifo_size; 1780 1781 return (0); 1782 } 1783 1784 /* 1785 * wm_start: [ifnet interface function] 1786 * 1787 * Start packet transmission on the interface. 1788 */ 1789 static void 1790 wm_start(struct ifnet *ifp) 1791 { 1792 struct wm_softc *sc = ifp->if_softc; 1793 struct mbuf *m0; 1794 #if 0 /* XXXJRT */ 1795 struct m_tag *mtag; 1796 #endif 1797 struct wm_txsoft *txs; 1798 bus_dmamap_t dmamap; 1799 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso; 1800 bus_addr_t curaddr; 1801 bus_size_t seglen, curlen; 1802 uint32_t cksumcmd; 1803 uint8_t cksumfields; 1804 1805 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 1806 return; 1807 1808 /* 1809 * Remember the previous number of free descriptors. 1810 */ 1811 ofree = sc->sc_txfree; 1812 1813 /* 1814 * Loop through the send queue, setting up transmit descriptors 1815 * until we drain the queue, or use up all available transmit 1816 * descriptors. 1817 */ 1818 for (;;) { 1819 /* Grab a packet off the queue. */ 1820 IFQ_POLL(&ifp->if_snd, m0); 1821 if (m0 == NULL) 1822 break; 1823 1824 DPRINTF(WM_DEBUG_TX, 1825 ("%s: TX: have packet to transmit: %p\n", 1826 sc->sc_dev.dv_xname, m0)); 1827 1828 /* Get a work queue entry. */ 1829 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) { 1830 wm_txintr(sc); 1831 if (sc->sc_txsfree == 0) { 1832 DPRINTF(WM_DEBUG_TX, 1833 ("%s: TX: no free job descriptors\n", 1834 sc->sc_dev.dv_xname)); 1835 WM_EVCNT_INCR(&sc->sc_ev_txsstall); 1836 break; 1837 } 1838 } 1839 1840 txs = &sc->sc_txsoft[sc->sc_txsnext]; 1841 dmamap = txs->txs_dmamap; 1842 1843 use_tso = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 1844 1845 /* 1846 * So says the Linux driver: 1847 * The controller does a simple calculation to make sure 1848 * there is enough room in the FIFO before initiating the 1849 * DMA for each buffer. The calc is: 1850 * 4 = ceil(buffer len / MSS) 1851 * To make sure we don't overrun the FIFO, adjust the max 1852 * buffer len if the MSS drops. 1853 */ 1854 dmamap->dm_maxsegsz = 1855 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN) 1856 ? m0->m_pkthdr.segsz << 2 1857 : WTX_MAX_LEN; 1858 1859 /* 1860 * Load the DMA map. If this fails, the packet either 1861 * didn't fit in the allotted number of segments, or we 1862 * were short on resources. For the too-many-segments 1863 * case, we simply report an error and drop the packet, 1864 * since we can't sanely copy a jumbo packet to a single 1865 * buffer. 1866 */ 1867 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 1868 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1869 if (error) { 1870 if (error == EFBIG) { 1871 WM_EVCNT_INCR(&sc->sc_ev_txdrop); 1872 log(LOG_ERR, "%s: Tx packet consumes too many " 1873 "DMA segments, dropping...\n", 1874 sc->sc_dev.dv_xname); 1875 IFQ_DEQUEUE(&ifp->if_snd, m0); 1876 wm_dump_mbuf_chain(sc, m0); 1877 m_freem(m0); 1878 continue; 1879 } 1880 /* 1881 * Short on resources, just stop for now. 1882 */ 1883 DPRINTF(WM_DEBUG_TX, 1884 ("%s: TX: dmamap load failed: %d\n", 1885 sc->sc_dev.dv_xname, error)); 1886 break; 1887 } 1888 1889 segs_needed = dmamap->dm_nsegs; 1890 if (use_tso) { 1891 /* For sentinel descriptor; see below. */ 1892 segs_needed++; 1893 } 1894 1895 /* 1896 * Ensure we have enough descriptors free to describe 1897 * the packet. Note, we always reserve one descriptor 1898 * at the end of the ring due to the semantics of the 1899 * TDT register, plus one more in the event we need 1900 * to load offload context. 1901 */ 1902 if (segs_needed > sc->sc_txfree - 2) { 1903 /* 1904 * Not enough free descriptors to transmit this 1905 * packet. We haven't committed anything yet, 1906 * so just unload the DMA map, put the packet 1907 * pack on the queue, and punt. Notify the upper 1908 * layer that there are no more slots left. 1909 */ 1910 DPRINTF(WM_DEBUG_TX, 1911 ("%s: TX: need %d (%d) descriptors, have %d\n", 1912 sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed, 1913 sc->sc_txfree - 1)); 1914 ifp->if_flags |= IFF_OACTIVE; 1915 bus_dmamap_unload(sc->sc_dmat, dmamap); 1916 WM_EVCNT_INCR(&sc->sc_ev_txdstall); 1917 break; 1918 } 1919 1920 /* 1921 * Check for 82547 Tx FIFO bug. We need to do this 1922 * once we know we can transmit the packet, since we 1923 * do some internal FIFO space accounting here. 1924 */ 1925 if (sc->sc_type == WM_T_82547 && 1926 wm_82547_txfifo_bugchk(sc, m0)) { 1927 DPRINTF(WM_DEBUG_TX, 1928 ("%s: TX: 82547 Tx FIFO bug detected\n", 1929 sc->sc_dev.dv_xname)); 1930 ifp->if_flags |= IFF_OACTIVE; 1931 bus_dmamap_unload(sc->sc_dmat, dmamap); 1932 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall); 1933 break; 1934 } 1935 1936 IFQ_DEQUEUE(&ifp->if_snd, m0); 1937 1938 /* 1939 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 1940 */ 1941 1942 DPRINTF(WM_DEBUG_TX, 1943 ("%s: TX: packet has %d (%d) DMA segments\n", 1944 sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed)); 1945 1946 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]); 1947 1948 /* 1949 * Store a pointer to the packet so that we can free it 1950 * later. 1951 * 1952 * Initially, we consider the number of descriptors the 1953 * packet uses the number of DMA segments. This may be 1954 * incremented by 1 if we do checksum offload (a descriptor 1955 * is used to set the checksum context). 1956 */ 1957 txs->txs_mbuf = m0; 1958 txs->txs_firstdesc = sc->sc_txnext; 1959 txs->txs_ndesc = segs_needed; 1960 1961 /* Set up offload parameters for this packet. */ 1962 if (m0->m_pkthdr.csum_flags & 1963 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4| 1964 M_CSUM_TCPv6|M_CSUM_UDPv6)) { 1965 if (wm_tx_offload(sc, txs, &cksumcmd, 1966 &cksumfields) != 0) { 1967 /* Error message already displayed. */ 1968 bus_dmamap_unload(sc->sc_dmat, dmamap); 1969 continue; 1970 } 1971 } else { 1972 cksumcmd = 0; 1973 cksumfields = 0; 1974 } 1975 1976 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS; 1977 1978 /* Sync the DMA map. */ 1979 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 1980 BUS_DMASYNC_PREWRITE); 1981 1982 /* 1983 * Initialize the transmit descriptor. 1984 */ 1985 for (nexttx = sc->sc_txnext, seg = 0; 1986 seg < dmamap->dm_nsegs; seg++) { 1987 for (seglen = dmamap->dm_segs[seg].ds_len, 1988 curaddr = dmamap->dm_segs[seg].ds_addr; 1989 seglen != 0; 1990 curaddr += curlen, seglen -= curlen, 1991 nexttx = WM_NEXTTX(sc, nexttx)) { 1992 curlen = seglen; 1993 1994 /* 1995 * So says the Linux driver: 1996 * Work around for premature descriptor 1997 * write-backs in TSO mode. Append a 1998 * 4-byte sentinel descriptor. 1999 */ 2000 if (use_tso && 2001 seg == dmamap->dm_nsegs - 1 && 2002 curlen > 8) 2003 curlen -= 4; 2004 2005 wm_set_dma_addr( 2006 &sc->sc_txdescs[nexttx].wtx_addr, 2007 curaddr); 2008 sc->sc_txdescs[nexttx].wtx_cmdlen = 2009 htole32(cksumcmd | curlen); 2010 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 2011 0; 2012 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 2013 cksumfields; 2014 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0; 2015 lasttx = nexttx; 2016 2017 DPRINTF(WM_DEBUG_TX, 2018 ("%s: TX: desc %d: low 0x%08lx, " 2019 "len 0x%04x\n", 2020 sc->sc_dev.dv_xname, nexttx, 2021 curaddr & 0xffffffffUL, (unsigned)curlen)); 2022 } 2023 } 2024 2025 KASSERT(lasttx != -1); 2026 2027 /* 2028 * Set up the command byte on the last descriptor of 2029 * the packet. If we're in the interrupt delay window, 2030 * delay the interrupt. 2031 */ 2032 sc->sc_txdescs[lasttx].wtx_cmdlen |= 2033 htole32(WTX_CMD_EOP | WTX_CMD_RS); 2034 2035 #if 0 /* XXXJRT */ 2036 /* 2037 * If VLANs are enabled and the packet has a VLAN tag, set 2038 * up the descriptor to encapsulate the packet for us. 2039 * 2040 * This is only valid on the last descriptor of the packet. 2041 */ 2042 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) { 2043 sc->sc_txdescs[lasttx].wtx_cmdlen |= 2044 htole32(WTX_CMD_VLE); 2045 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan 2046 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff); 2047 } 2048 #endif /* XXXJRT */ 2049 2050 txs->txs_lastdesc = lasttx; 2051 2052 DPRINTF(WM_DEBUG_TX, 2053 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname, 2054 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen))); 2055 2056 /* Sync the descriptors we're using. */ 2057 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc, 2058 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2059 2060 /* Give the packet to the chip. */ 2061 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx); 2062 2063 DPRINTF(WM_DEBUG_TX, 2064 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx)); 2065 2066 DPRINTF(WM_DEBUG_TX, 2067 ("%s: TX: finished transmitting packet, job %d\n", 2068 sc->sc_dev.dv_xname, sc->sc_txsnext)); 2069 2070 /* Advance the tx pointer. */ 2071 sc->sc_txfree -= txs->txs_ndesc; 2072 sc->sc_txnext = nexttx; 2073 2074 sc->sc_txsfree--; 2075 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext); 2076 2077 #if NBPFILTER > 0 2078 /* Pass the packet to any BPF listeners. */ 2079 if (ifp->if_bpf) 2080 bpf_mtap(ifp->if_bpf, m0); 2081 #endif /* NBPFILTER > 0 */ 2082 } 2083 2084 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) { 2085 /* No more slots; notify upper layer. */ 2086 ifp->if_flags |= IFF_OACTIVE; 2087 } 2088 2089 if (sc->sc_txfree != ofree) { 2090 /* Set a watchdog timer in case the chip flakes out. */ 2091 ifp->if_timer = 5; 2092 } 2093 } 2094 2095 /* 2096 * wm_watchdog: [ifnet interface function] 2097 * 2098 * Watchdog timer handler. 2099 */ 2100 static void 2101 wm_watchdog(struct ifnet *ifp) 2102 { 2103 struct wm_softc *sc = ifp->if_softc; 2104 2105 /* 2106 * Since we're using delayed interrupts, sweep up 2107 * before we report an error. 2108 */ 2109 wm_txintr(sc); 2110 2111 if (sc->sc_txfree != WM_NTXDESC(sc)) { 2112 log(LOG_ERR, 2113 "%s: device timeout (txfree %d txsfree %d txnext %d)\n", 2114 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree, 2115 sc->sc_txnext); 2116 ifp->if_oerrors++; 2117 2118 /* Reset the interface. */ 2119 (void) wm_init(ifp); 2120 } 2121 2122 /* Try to get more packets going. */ 2123 wm_start(ifp); 2124 } 2125 2126 /* 2127 * wm_ioctl: [ifnet interface function] 2128 * 2129 * Handle control requests from the operator. 2130 */ 2131 static int 2132 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2133 { 2134 struct wm_softc *sc = ifp->if_softc; 2135 struct ifreq *ifr = (struct ifreq *) data; 2136 int s, error; 2137 2138 s = splnet(); 2139 2140 switch (cmd) { 2141 case SIOCSIFMEDIA: 2142 case SIOCGIFMEDIA: 2143 /* Flow control requires full-duplex mode. */ 2144 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 2145 (ifr->ifr_media & IFM_FDX) == 0) 2146 ifr->ifr_media &= ~IFM_ETH_FMASK; 2147 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 2148 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 2149 /* We can do both TXPAUSE and RXPAUSE. */ 2150 ifr->ifr_media |= 2151 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 2152 } 2153 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 2154 } 2155 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 2156 break; 2157 default: 2158 error = ether_ioctl(ifp, cmd, data); 2159 if (error == ENETRESET) { 2160 /* 2161 * Multicast list has changed; set the hardware filter 2162 * accordingly. 2163 */ 2164 if (ifp->if_flags & IFF_RUNNING) 2165 wm_set_filter(sc); 2166 error = 0; 2167 } 2168 break; 2169 } 2170 2171 /* Try to get more packets going. */ 2172 wm_start(ifp); 2173 2174 splx(s); 2175 return (error); 2176 } 2177 2178 /* 2179 * wm_intr: 2180 * 2181 * Interrupt service routine. 2182 */ 2183 static int 2184 wm_intr(void *arg) 2185 { 2186 struct wm_softc *sc = arg; 2187 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2188 uint32_t icr; 2189 int handled = 0; 2190 2191 while (1 /* CONSTCOND */) { 2192 icr = CSR_READ(sc, WMREG_ICR); 2193 if ((icr & sc->sc_icr) == 0) 2194 break; 2195 2196 #if 0 /*NRND > 0*/ 2197 if (RND_ENABLED(&sc->rnd_source)) 2198 rnd_add_uint32(&sc->rnd_source, icr); 2199 #endif 2200 2201 handled = 1; 2202 2203 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 2204 if (icr & (ICR_RXDMT0|ICR_RXT0)) { 2205 DPRINTF(WM_DEBUG_RX, 2206 ("%s: RX: got Rx intr 0x%08x\n", 2207 sc->sc_dev.dv_xname, 2208 icr & (ICR_RXDMT0|ICR_RXT0))); 2209 WM_EVCNT_INCR(&sc->sc_ev_rxintr); 2210 } 2211 #endif 2212 wm_rxintr(sc); 2213 2214 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 2215 if (icr & ICR_TXDW) { 2216 DPRINTF(WM_DEBUG_TX, 2217 ("%s: TX: got TXDW interrupt\n", 2218 sc->sc_dev.dv_xname)); 2219 WM_EVCNT_INCR(&sc->sc_ev_txdw); 2220 } 2221 #endif 2222 wm_txintr(sc); 2223 2224 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) { 2225 WM_EVCNT_INCR(&sc->sc_ev_linkintr); 2226 wm_linkintr(sc, icr); 2227 } 2228 2229 if (icr & ICR_RXO) { 2230 ifp->if_ierrors++; 2231 #if defined(WM_DEBUG) 2232 log(LOG_WARNING, "%s: Receive overrun\n", 2233 sc->sc_dev.dv_xname); 2234 #endif /* defined(WM_DEBUG) */ 2235 } 2236 } 2237 2238 if (handled) { 2239 /* Try to get more packets going. */ 2240 wm_start(ifp); 2241 } 2242 2243 return (handled); 2244 } 2245 2246 /* 2247 * wm_txintr: 2248 * 2249 * Helper; handle transmit interrupts. 2250 */ 2251 static void 2252 wm_txintr(struct wm_softc *sc) 2253 { 2254 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2255 struct wm_txsoft *txs; 2256 uint8_t status; 2257 int i; 2258 2259 ifp->if_flags &= ~IFF_OACTIVE; 2260 2261 /* 2262 * Go through the Tx list and free mbufs for those 2263 * frames which have been transmitted. 2264 */ 2265 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc); 2266 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) { 2267 txs = &sc->sc_txsoft[i]; 2268 2269 DPRINTF(WM_DEBUG_TX, 2270 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i)); 2271 2272 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc, 2273 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2274 2275 status = 2276 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status; 2277 if ((status & WTX_ST_DD) == 0) { 2278 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1, 2279 BUS_DMASYNC_PREREAD); 2280 break; 2281 } 2282 2283 DPRINTF(WM_DEBUG_TX, 2284 ("%s: TX: job %d done: descs %d..%d\n", 2285 sc->sc_dev.dv_xname, i, txs->txs_firstdesc, 2286 txs->txs_lastdesc)); 2287 2288 /* 2289 * XXX We should probably be using the statistics 2290 * XXX registers, but I don't know if they exist 2291 * XXX on chips before the i82544. 2292 */ 2293 2294 #ifdef WM_EVENT_COUNTERS 2295 if (status & WTX_ST_TU) 2296 WM_EVCNT_INCR(&sc->sc_ev_tu); 2297 #endif /* WM_EVENT_COUNTERS */ 2298 2299 if (status & (WTX_ST_EC|WTX_ST_LC)) { 2300 ifp->if_oerrors++; 2301 if (status & WTX_ST_LC) 2302 log(LOG_WARNING, "%s: late collision\n", 2303 sc->sc_dev.dv_xname); 2304 else if (status & WTX_ST_EC) { 2305 ifp->if_collisions += 16; 2306 log(LOG_WARNING, "%s: excessive collisions\n", 2307 sc->sc_dev.dv_xname); 2308 } 2309 } else 2310 ifp->if_opackets++; 2311 2312 sc->sc_txfree += txs->txs_ndesc; 2313 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 2314 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2315 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 2316 m_freem(txs->txs_mbuf); 2317 txs->txs_mbuf = NULL; 2318 } 2319 2320 /* Update the dirty transmit buffer pointer. */ 2321 sc->sc_txsdirty = i; 2322 DPRINTF(WM_DEBUG_TX, 2323 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i)); 2324 2325 /* 2326 * If there are no more pending transmissions, cancel the watchdog 2327 * timer. 2328 */ 2329 if (sc->sc_txsfree == WM_TXQUEUELEN(sc)) 2330 ifp->if_timer = 0; 2331 } 2332 2333 /* 2334 * wm_rxintr: 2335 * 2336 * Helper; handle receive interrupts. 2337 */ 2338 static void 2339 wm_rxintr(struct wm_softc *sc) 2340 { 2341 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2342 struct wm_rxsoft *rxs; 2343 struct mbuf *m; 2344 int i, len; 2345 uint8_t status, errors; 2346 2347 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) { 2348 rxs = &sc->sc_rxsoft[i]; 2349 2350 DPRINTF(WM_DEBUG_RX, 2351 ("%s: RX: checking descriptor %d\n", 2352 sc->sc_dev.dv_xname, i)); 2353 2354 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2355 2356 status = sc->sc_rxdescs[i].wrx_status; 2357 errors = sc->sc_rxdescs[i].wrx_errors; 2358 len = le16toh(sc->sc_rxdescs[i].wrx_len); 2359 2360 if ((status & WRX_ST_DD) == 0) { 2361 /* 2362 * We have processed all of the receive descriptors. 2363 */ 2364 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD); 2365 break; 2366 } 2367 2368 if (__predict_false(sc->sc_rxdiscard)) { 2369 DPRINTF(WM_DEBUG_RX, 2370 ("%s: RX: discarding contents of descriptor %d\n", 2371 sc->sc_dev.dv_xname, i)); 2372 WM_INIT_RXDESC(sc, i); 2373 if (status & WRX_ST_EOP) { 2374 /* Reset our state. */ 2375 DPRINTF(WM_DEBUG_RX, 2376 ("%s: RX: resetting rxdiscard -> 0\n", 2377 sc->sc_dev.dv_xname)); 2378 sc->sc_rxdiscard = 0; 2379 } 2380 continue; 2381 } 2382 2383 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 2384 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2385 2386 m = rxs->rxs_mbuf; 2387 2388 /* 2389 * Add a new receive buffer to the ring. 2390 */ 2391 if (wm_add_rxbuf(sc, i) != 0) { 2392 /* 2393 * Failed, throw away what we've done so 2394 * far, and discard the rest of the packet. 2395 */ 2396 ifp->if_ierrors++; 2397 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 2398 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 2399 WM_INIT_RXDESC(sc, i); 2400 if ((status & WRX_ST_EOP) == 0) 2401 sc->sc_rxdiscard = 1; 2402 if (sc->sc_rxhead != NULL) 2403 m_freem(sc->sc_rxhead); 2404 WM_RXCHAIN_RESET(sc); 2405 DPRINTF(WM_DEBUG_RX, 2406 ("%s: RX: Rx buffer allocation failed, " 2407 "dropping packet%s\n", sc->sc_dev.dv_xname, 2408 sc->sc_rxdiscard ? " (discard)" : "")); 2409 continue; 2410 } 2411 2412 WM_RXCHAIN_LINK(sc, m); 2413 2414 m->m_len = len; 2415 2416 DPRINTF(WM_DEBUG_RX, 2417 ("%s: RX: buffer at %p len %d\n", 2418 sc->sc_dev.dv_xname, m->m_data, len)); 2419 2420 /* 2421 * If this is not the end of the packet, keep 2422 * looking. 2423 */ 2424 if ((status & WRX_ST_EOP) == 0) { 2425 sc->sc_rxlen += len; 2426 DPRINTF(WM_DEBUG_RX, 2427 ("%s: RX: not yet EOP, rxlen -> %d\n", 2428 sc->sc_dev.dv_xname, sc->sc_rxlen)); 2429 continue; 2430 } 2431 2432 /* 2433 * Okay, we have the entire packet now. The chip is 2434 * configured to include the FCS (not all chips can 2435 * be configured to strip it), so we need to trim it. 2436 */ 2437 m->m_len -= ETHER_CRC_LEN; 2438 2439 *sc->sc_rxtailp = NULL; 2440 len = m->m_len + sc->sc_rxlen; 2441 m = sc->sc_rxhead; 2442 2443 WM_RXCHAIN_RESET(sc); 2444 2445 DPRINTF(WM_DEBUG_RX, 2446 ("%s: RX: have entire packet, len -> %d\n", 2447 sc->sc_dev.dv_xname, len)); 2448 2449 /* 2450 * If an error occurred, update stats and drop the packet. 2451 */ 2452 if (errors & 2453 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) { 2454 ifp->if_ierrors++; 2455 if (errors & WRX_ER_SE) 2456 log(LOG_WARNING, "%s: symbol error\n", 2457 sc->sc_dev.dv_xname); 2458 else if (errors & WRX_ER_SEQ) 2459 log(LOG_WARNING, "%s: receive sequence error\n", 2460 sc->sc_dev.dv_xname); 2461 else if (errors & WRX_ER_CE) 2462 log(LOG_WARNING, "%s: CRC error\n", 2463 sc->sc_dev.dv_xname); 2464 m_freem(m); 2465 continue; 2466 } 2467 2468 /* 2469 * No errors. Receive the packet. 2470 */ 2471 m->m_pkthdr.rcvif = ifp; 2472 m->m_pkthdr.len = len; 2473 2474 #if 0 /* XXXJRT */ 2475 /* 2476 * If VLANs are enabled, VLAN packets have been unwrapped 2477 * for us. Associate the tag with the packet. 2478 */ 2479 if ((status & WRX_ST_VP) != 0) { 2480 VLAN_INPUT_TAG(ifp, m, 2481 le16toh(sc->sc_rxdescs[i].wrx_special, 2482 continue); 2483 } 2484 #endif /* XXXJRT */ 2485 2486 /* 2487 * Set up checksum info for this packet. 2488 */ 2489 if ((status & WRX_ST_IXSM) == 0) { 2490 if (status & WRX_ST_IPCS) { 2491 WM_EVCNT_INCR(&sc->sc_ev_rxipsum); 2492 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 2493 if (errors & WRX_ER_IPE) 2494 m->m_pkthdr.csum_flags |= 2495 M_CSUM_IPv4_BAD; 2496 } 2497 if (status & WRX_ST_TCPCS) { 2498 /* 2499 * Note: we don't know if this was TCP or UDP, 2500 * so we just set both bits, and expect the 2501 * upper layers to deal. 2502 */ 2503 WM_EVCNT_INCR(&sc->sc_ev_rxtusum); 2504 m->m_pkthdr.csum_flags |= 2505 M_CSUM_TCPv4|M_CSUM_UDPv4; 2506 if (errors & WRX_ER_TCPE) 2507 m->m_pkthdr.csum_flags |= 2508 M_CSUM_TCP_UDP_BAD; 2509 } 2510 } 2511 2512 ifp->if_ipackets++; 2513 2514 #if NBPFILTER > 0 2515 /* Pass this up to any BPF listeners. */ 2516 if (ifp->if_bpf) 2517 bpf_mtap(ifp->if_bpf, m); 2518 #endif /* NBPFILTER > 0 */ 2519 2520 /* Pass it on. */ 2521 (*ifp->if_input)(ifp, m); 2522 } 2523 2524 /* Update the receive pointer. */ 2525 sc->sc_rxptr = i; 2526 2527 DPRINTF(WM_DEBUG_RX, 2528 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i)); 2529 } 2530 2531 /* 2532 * wm_linkintr: 2533 * 2534 * Helper; handle link interrupts. 2535 */ 2536 static void 2537 wm_linkintr(struct wm_softc *sc, uint32_t icr) 2538 { 2539 uint32_t status; 2540 2541 /* 2542 * If we get a link status interrupt on a 1000BASE-T 2543 * device, just fall into the normal MII tick path. 2544 */ 2545 if (sc->sc_flags & WM_F_HAS_MII) { 2546 if (icr & ICR_LSC) { 2547 DPRINTF(WM_DEBUG_LINK, 2548 ("%s: LINK: LSC -> mii_tick\n", 2549 sc->sc_dev.dv_xname)); 2550 mii_tick(&sc->sc_mii); 2551 } else if (icr & ICR_RXSEQ) { 2552 DPRINTF(WM_DEBUG_LINK, 2553 ("%s: LINK Receive sequence error\n", 2554 sc->sc_dev.dv_xname)); 2555 } 2556 return; 2557 } 2558 2559 /* 2560 * If we are now receiving /C/, check for link again in 2561 * a couple of link clock ticks. 2562 */ 2563 if (icr & ICR_RXCFG) { 2564 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n", 2565 sc->sc_dev.dv_xname)); 2566 sc->sc_tbi_anstate = 2; 2567 } 2568 2569 if (icr & ICR_LSC) { 2570 status = CSR_READ(sc, WMREG_STATUS); 2571 if (status & STATUS_LU) { 2572 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n", 2573 sc->sc_dev.dv_xname, 2574 (status & STATUS_FD) ? "FDX" : "HDX")); 2575 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 2576 sc->sc_fcrtl &= ~FCRTL_XONE; 2577 if (status & STATUS_FD) 2578 sc->sc_tctl |= 2579 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 2580 else 2581 sc->sc_tctl |= 2582 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 2583 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE) 2584 sc->sc_fcrtl |= FCRTL_XONE; 2585 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 2586 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 2587 WMREG_OLD_FCRTL : WMREG_FCRTL, 2588 sc->sc_fcrtl); 2589 sc->sc_tbi_linkup = 1; 2590 } else { 2591 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", 2592 sc->sc_dev.dv_xname)); 2593 sc->sc_tbi_linkup = 0; 2594 } 2595 sc->sc_tbi_anstate = 2; 2596 wm_tbi_set_linkled(sc); 2597 } else if (icr & ICR_RXSEQ) { 2598 DPRINTF(WM_DEBUG_LINK, 2599 ("%s: LINK: Receive sequence error\n", 2600 sc->sc_dev.dv_xname)); 2601 } 2602 } 2603 2604 /* 2605 * wm_tick: 2606 * 2607 * One second timer, used to check link status, sweep up 2608 * completed transmit jobs, etc. 2609 */ 2610 static void 2611 wm_tick(void *arg) 2612 { 2613 struct wm_softc *sc = arg; 2614 int s; 2615 2616 s = splnet(); 2617 2618 if (sc->sc_type >= WM_T_82542_2_1) { 2619 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC)); 2620 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC)); 2621 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC)); 2622 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC)); 2623 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC)); 2624 } 2625 2626 if (sc->sc_flags & WM_F_HAS_MII) 2627 mii_tick(&sc->sc_mii); 2628 else 2629 wm_tbi_check_link(sc); 2630 2631 splx(s); 2632 2633 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); 2634 } 2635 2636 /* 2637 * wm_reset: 2638 * 2639 * Reset the i82542 chip. 2640 */ 2641 static void 2642 wm_reset(struct wm_softc *sc) 2643 { 2644 int i; 2645 2646 /* 2647 * Allocate on-chip memory according to the MTU size. 2648 * The Packet Buffer Allocation register must be written 2649 * before the chip is reset. 2650 */ 2651 switch (sc->sc_type) { 2652 case WM_T_82547: 2653 case WM_T_82547_2: 2654 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? 2655 PBA_22K : PBA_30K; 2656 sc->sc_txfifo_head = 0; 2657 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT; 2658 sc->sc_txfifo_size = 2659 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT; 2660 sc->sc_txfifo_stall = 0; 2661 break; 2662 case WM_T_82571: 2663 case WM_T_82572: 2664 sc->sc_pba = PBA_32K; 2665 break; 2666 case WM_T_82573: 2667 sc->sc_pba = PBA_12K; 2668 break; 2669 default: 2670 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? 2671 PBA_40K : PBA_48K; 2672 break; 2673 } 2674 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba); 2675 2676 switch (sc->sc_type) { 2677 case WM_T_82544: 2678 case WM_T_82540: 2679 case WM_T_82545: 2680 case WM_T_82546: 2681 case WM_T_82541: 2682 case WM_T_82541_2: 2683 /* 2684 * On some chipsets, a reset through a memory-mapped write 2685 * cycle can cause the chip to reset before completing the 2686 * write cycle. This causes major headache that can be 2687 * avoided by issuing the reset via indirect register writes 2688 * through I/O space. 2689 * 2690 * So, if we successfully mapped the I/O BAR at attach time, 2691 * use that. Otherwise, try our luck with a memory-mapped 2692 * reset. 2693 */ 2694 if (sc->sc_flags & WM_F_IOH_VALID) 2695 wm_io_write(sc, WMREG_CTRL, CTRL_RST); 2696 else 2697 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST); 2698 break; 2699 2700 case WM_T_82545_3: 2701 case WM_T_82546_3: 2702 /* Use the shadow control register on these chips. */ 2703 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST); 2704 break; 2705 2706 default: 2707 /* Everything else can safely use the documented method. */ 2708 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST); 2709 break; 2710 } 2711 delay(10000); 2712 2713 for (i = 0; i < 1000; i++) { 2714 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) 2715 return; 2716 delay(20); 2717 } 2718 2719 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST) 2720 log(LOG_ERR, "%s: reset failed to complete\n", 2721 sc->sc_dev.dv_xname); 2722 } 2723 2724 /* 2725 * wm_init: [ifnet interface function] 2726 * 2727 * Initialize the interface. Must be called at splnet(). 2728 */ 2729 static int 2730 wm_init(struct ifnet *ifp) 2731 { 2732 struct wm_softc *sc = ifp->if_softc; 2733 struct wm_rxsoft *rxs; 2734 int i, error = 0; 2735 uint32_t reg; 2736 2737 /* 2738 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set. 2739 * There is a small but measurable benefit to avoiding the adjusment 2740 * of the descriptor so that the headers are aligned, for normal mtu, 2741 * on such platforms. One possibility is that the DMA itself is 2742 * slightly more efficient if the front of the entire packet (instead 2743 * of the front of the headers) is aligned. 2744 * 2745 * Note we must always set align_tweak to 0 if we are using 2746 * jumbo frames. 2747 */ 2748 #ifdef __NO_STRICT_ALIGNMENT 2749 sc->sc_align_tweak = 0; 2750 #else 2751 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2)) 2752 sc->sc_align_tweak = 0; 2753 else 2754 sc->sc_align_tweak = 2; 2755 #endif /* __NO_STRICT_ALIGNMENT */ 2756 2757 /* Cancel any pending I/O. */ 2758 wm_stop(ifp, 0); 2759 2760 /* Reset the chip to a known state. */ 2761 wm_reset(sc); 2762 2763 /* Initialize the transmit descriptor ring. */ 2764 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc)); 2765 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc), 2766 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2767 sc->sc_txfree = WM_NTXDESC(sc); 2768 sc->sc_txnext = 0; 2769 2770 if (sc->sc_type < WM_T_82543) { 2771 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0)); 2772 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0)); 2773 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc)); 2774 CSR_WRITE(sc, WMREG_OLD_TDH, 0); 2775 CSR_WRITE(sc, WMREG_OLD_TDT, 0); 2776 CSR_WRITE(sc, WMREG_OLD_TIDV, 128); 2777 } else { 2778 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0)); 2779 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0)); 2780 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc)); 2781 CSR_WRITE(sc, WMREG_TDH, 0); 2782 CSR_WRITE(sc, WMREG_TDT, 0); 2783 CSR_WRITE(sc, WMREG_TIDV, 64); 2784 CSR_WRITE(sc, WMREG_TADV, 128); 2785 2786 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) | 2787 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0)); 2788 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) | 2789 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1)); 2790 } 2791 CSR_WRITE(sc, WMREG_TQSA_LO, 0); 2792 CSR_WRITE(sc, WMREG_TQSA_HI, 0); 2793 2794 /* Initialize the transmit job descriptors. */ 2795 for (i = 0; i < WM_TXQUEUELEN(sc); i++) 2796 sc->sc_txsoft[i].txs_mbuf = NULL; 2797 sc->sc_txsfree = WM_TXQUEUELEN(sc); 2798 sc->sc_txsnext = 0; 2799 sc->sc_txsdirty = 0; 2800 2801 /* 2802 * Initialize the receive descriptor and receive job 2803 * descriptor rings. 2804 */ 2805 if (sc->sc_type < WM_T_82543) { 2806 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0)); 2807 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0)); 2808 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs)); 2809 CSR_WRITE(sc, WMREG_OLD_RDH0, 0); 2810 CSR_WRITE(sc, WMREG_OLD_RDT0, 0); 2811 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD); 2812 2813 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0); 2814 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0); 2815 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0); 2816 CSR_WRITE(sc, WMREG_OLD_RDH1, 0); 2817 CSR_WRITE(sc, WMREG_OLD_RDT1, 0); 2818 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0); 2819 } else { 2820 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0)); 2821 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0)); 2822 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs)); 2823 CSR_WRITE(sc, WMREG_RDH, 0); 2824 CSR_WRITE(sc, WMREG_RDT, 0); 2825 CSR_WRITE(sc, WMREG_RDTR, 0 | RDTR_FPD); 2826 CSR_WRITE(sc, WMREG_RADV, 128); 2827 } 2828 for (i = 0; i < WM_NRXDESC; i++) { 2829 rxs = &sc->sc_rxsoft[i]; 2830 if (rxs->rxs_mbuf == NULL) { 2831 if ((error = wm_add_rxbuf(sc, i)) != 0) { 2832 log(LOG_ERR, "%s: unable to allocate or map rx " 2833 "buffer %d, error = %d\n", 2834 sc->sc_dev.dv_xname, i, error); 2835 /* 2836 * XXX Should attempt to run with fewer receive 2837 * XXX buffers instead of just failing. 2838 */ 2839 wm_rxdrain(sc); 2840 goto out; 2841 } 2842 } else 2843 WM_INIT_RXDESC(sc, i); 2844 } 2845 sc->sc_rxptr = 0; 2846 sc->sc_rxdiscard = 0; 2847 WM_RXCHAIN_RESET(sc); 2848 2849 /* 2850 * Clear out the VLAN table -- we don't use it (yet). 2851 */ 2852 CSR_WRITE(sc, WMREG_VET, 0); 2853 for (i = 0; i < WM_VLAN_TABSIZE; i++) 2854 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0); 2855 2856 /* 2857 * Set up flow-control parameters. 2858 * 2859 * XXX Values could probably stand some tuning. 2860 */ 2861 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST); 2862 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST); 2863 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL); 2864 2865 sc->sc_fcrtl = FCRTL_DFLT; 2866 if (sc->sc_type < WM_T_82543) { 2867 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT); 2868 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl); 2869 } else { 2870 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT); 2871 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl); 2872 } 2873 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT); 2874 2875 #if 0 /* XXXJRT */ 2876 /* Deal with VLAN enables. */ 2877 if (VLAN_ATTACHED(&sc->sc_ethercom)) 2878 sc->sc_ctrl |= CTRL_VME; 2879 else 2880 #endif /* XXXJRT */ 2881 sc->sc_ctrl &= ~CTRL_VME; 2882 2883 /* Write the control registers. */ 2884 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 2885 #if 0 2886 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 2887 #endif 2888 2889 /* 2890 * Set up checksum offload parameters. 2891 */ 2892 reg = CSR_READ(sc, WMREG_RXCSUM); 2893 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) 2894 reg |= RXCSUM_IPOFL; 2895 else 2896 reg &= ~RXCSUM_IPOFL; 2897 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) 2898 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL; 2899 else { 2900 reg &= ~RXCSUM_TUOFL; 2901 if ((ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) == 0) 2902 reg &= ~RXCSUM_IPOFL; 2903 } 2904 CSR_WRITE(sc, WMREG_RXCSUM, reg); 2905 2906 /* 2907 * Set up the interrupt registers. 2908 */ 2909 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 2910 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 | 2911 ICR_RXO | ICR_RXT0; 2912 if ((sc->sc_flags & WM_F_HAS_MII) == 0) 2913 sc->sc_icr |= ICR_RXCFG; 2914 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr); 2915 2916 /* Set up the inter-packet gap. */ 2917 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); 2918 2919 if (sc->sc_type >= WM_T_82543) { 2920 /* Set up the interrupt throttling register (units of 256ns) */ 2921 sc->sc_itr = 1000000000 / (7000 * 256); 2922 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr); 2923 } 2924 2925 #if 0 /* XXXJRT */ 2926 /* Set the VLAN ethernetype. */ 2927 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN); 2928 #endif 2929 2930 /* 2931 * Set up the transmit control register; we start out with 2932 * a collision distance suitable for FDX, but update it whe 2933 * we resolve the media type. 2934 */ 2935 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) | 2936 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 2937 if (sc->sc_type >= WM_T_82571) 2938 sc->sc_tctl |= TCTL_MULR; 2939 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 2940 2941 /* Set the media. */ 2942 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp); 2943 2944 /* 2945 * Set up the receive control register; we actually program 2946 * the register when we set the receive filter. Use multicast 2947 * address offset type 0. 2948 * 2949 * Only the i82544 has the ability to strip the incoming 2950 * CRC, so we don't enable that feature. 2951 */ 2952 sc->sc_mchash_type = 0; 2953 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF 2954 | RCTL_MO(sc->sc_mchash_type); 2955 2956 /* 82573 doesn't support jumbo frame */ 2957 if (sc->sc_type != WM_T_82573) 2958 sc->sc_rctl |= RCTL_LPE; 2959 2960 if (MCLBYTES == 2048) { 2961 sc->sc_rctl |= RCTL_2k; 2962 } else { 2963 if (sc->sc_type >= WM_T_82543) { 2964 switch(MCLBYTES) { 2965 case 4096: 2966 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k; 2967 break; 2968 case 8192: 2969 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k; 2970 break; 2971 case 16384: 2972 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k; 2973 break; 2974 default: 2975 panic("wm_init: MCLBYTES %d unsupported", 2976 MCLBYTES); 2977 break; 2978 } 2979 } else panic("wm_init: i82542 requires MCLBYTES = 2048"); 2980 } 2981 2982 /* Set the receive filter. */ 2983 wm_set_filter(sc); 2984 2985 /* Start the one second link check clock. */ 2986 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); 2987 2988 /* ...all done! */ 2989 ifp->if_flags |= IFF_RUNNING; 2990 ifp->if_flags &= ~IFF_OACTIVE; 2991 2992 out: 2993 if (error) 2994 log(LOG_ERR, "%s: interface not running\n", 2995 sc->sc_dev.dv_xname); 2996 return (error); 2997 } 2998 2999 /* 3000 * wm_rxdrain: 3001 * 3002 * Drain the receive queue. 3003 */ 3004 static void 3005 wm_rxdrain(struct wm_softc *sc) 3006 { 3007 struct wm_rxsoft *rxs; 3008 int i; 3009 3010 for (i = 0; i < WM_NRXDESC; i++) { 3011 rxs = &sc->sc_rxsoft[i]; 3012 if (rxs->rxs_mbuf != NULL) { 3013 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 3014 m_freem(rxs->rxs_mbuf); 3015 rxs->rxs_mbuf = NULL; 3016 } 3017 } 3018 } 3019 3020 /* 3021 * wm_stop: [ifnet interface function] 3022 * 3023 * Stop transmission on the interface. 3024 */ 3025 static void 3026 wm_stop(struct ifnet *ifp, int disable) 3027 { 3028 struct wm_softc *sc = ifp->if_softc; 3029 struct wm_txsoft *txs; 3030 int i; 3031 3032 /* Stop the one second clock. */ 3033 callout_stop(&sc->sc_tick_ch); 3034 3035 /* Stop the 82547 Tx FIFO stall check timer. */ 3036 if (sc->sc_type == WM_T_82547) 3037 callout_stop(&sc->sc_txfifo_ch); 3038 3039 if (sc->sc_flags & WM_F_HAS_MII) { 3040 /* Down the MII. */ 3041 mii_down(&sc->sc_mii); 3042 } 3043 3044 /* Stop the transmit and receive processes. */ 3045 CSR_WRITE(sc, WMREG_TCTL, 0); 3046 CSR_WRITE(sc, WMREG_RCTL, 0); 3047 3048 /* 3049 * Clear the interrupt mask to ensure the device cannot assert its 3050 * interrupt line. 3051 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service 3052 * any currently pending or shared interrupt. 3053 */ 3054 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 3055 sc->sc_icr = 0; 3056 3057 /* Release any queued transmit buffers. */ 3058 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 3059 txs = &sc->sc_txsoft[i]; 3060 if (txs->txs_mbuf != NULL) { 3061 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 3062 m_freem(txs->txs_mbuf); 3063 txs->txs_mbuf = NULL; 3064 } 3065 } 3066 3067 if (disable) 3068 wm_rxdrain(sc); 3069 3070 /* Mark the interface as down and cancel the watchdog timer. */ 3071 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3072 ifp->if_timer = 0; 3073 } 3074 3075 /* 3076 * wm_acquire_eeprom: 3077 * 3078 * Perform the EEPROM handshake required on some chips. 3079 */ 3080 static int 3081 wm_acquire_eeprom(struct wm_softc *sc) 3082 { 3083 uint32_t reg; 3084 int x; 3085 3086 /* always success */ 3087 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0) 3088 return 0; 3089 3090 if (wm_get_eeprom_semaphore(sc)) 3091 return 1; 3092 3093 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) { 3094 reg = CSR_READ(sc, WMREG_EECD); 3095 3096 /* Request EEPROM access. */ 3097 reg |= EECD_EE_REQ; 3098 CSR_WRITE(sc, WMREG_EECD, reg); 3099 3100 /* ..and wait for it to be granted. */ 3101 for (x = 0; x < 1000; x++) { 3102 reg = CSR_READ(sc, WMREG_EECD); 3103 if (reg & EECD_EE_GNT) 3104 break; 3105 delay(5); 3106 } 3107 if ((reg & EECD_EE_GNT) == 0) { 3108 aprint_error("%s: could not acquire EEPROM GNT\n", 3109 sc->sc_dev.dv_xname); 3110 reg &= ~EECD_EE_REQ; 3111 CSR_WRITE(sc, WMREG_EECD, reg); 3112 wm_put_eeprom_semaphore(sc); 3113 return (1); 3114 } 3115 } 3116 3117 return (0); 3118 } 3119 3120 /* 3121 * wm_release_eeprom: 3122 * 3123 * Release the EEPROM mutex. 3124 */ 3125 static void 3126 wm_release_eeprom(struct wm_softc *sc) 3127 { 3128 uint32_t reg; 3129 3130 /* always success */ 3131 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0) 3132 return; 3133 3134 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) { 3135 reg = CSR_READ(sc, WMREG_EECD); 3136 reg &= ~EECD_EE_REQ; 3137 CSR_WRITE(sc, WMREG_EECD, reg); 3138 } 3139 3140 wm_put_eeprom_semaphore(sc); 3141 } 3142 3143 /* 3144 * wm_eeprom_sendbits: 3145 * 3146 * Send a series of bits to the EEPROM. 3147 */ 3148 static void 3149 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits) 3150 { 3151 uint32_t reg; 3152 int x; 3153 3154 reg = CSR_READ(sc, WMREG_EECD); 3155 3156 for (x = nbits; x > 0; x--) { 3157 if (bits & (1U << (x - 1))) 3158 reg |= EECD_DI; 3159 else 3160 reg &= ~EECD_DI; 3161 CSR_WRITE(sc, WMREG_EECD, reg); 3162 delay(2); 3163 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK); 3164 delay(2); 3165 CSR_WRITE(sc, WMREG_EECD, reg); 3166 delay(2); 3167 } 3168 } 3169 3170 /* 3171 * wm_eeprom_recvbits: 3172 * 3173 * Receive a series of bits from the EEPROM. 3174 */ 3175 static void 3176 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits) 3177 { 3178 uint32_t reg, val; 3179 int x; 3180 3181 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI; 3182 3183 val = 0; 3184 for (x = nbits; x > 0; x--) { 3185 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK); 3186 delay(2); 3187 if (CSR_READ(sc, WMREG_EECD) & EECD_DO) 3188 val |= (1U << (x - 1)); 3189 CSR_WRITE(sc, WMREG_EECD, reg); 3190 delay(2); 3191 } 3192 *valp = val; 3193 } 3194 3195 /* 3196 * wm_read_eeprom_uwire: 3197 * 3198 * Read a word from the EEPROM using the MicroWire protocol. 3199 */ 3200 static int 3201 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 3202 { 3203 uint32_t reg, val; 3204 int i; 3205 3206 for (i = 0; i < wordcnt; i++) { 3207 /* Clear SK and DI. */ 3208 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI); 3209 CSR_WRITE(sc, WMREG_EECD, reg); 3210 3211 /* Set CHIP SELECT. */ 3212 reg |= EECD_CS; 3213 CSR_WRITE(sc, WMREG_EECD, reg); 3214 delay(2); 3215 3216 /* Shift in the READ command. */ 3217 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3); 3218 3219 /* Shift in address. */ 3220 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits); 3221 3222 /* Shift out the data. */ 3223 wm_eeprom_recvbits(sc, &val, 16); 3224 data[i] = val & 0xffff; 3225 3226 /* Clear CHIP SELECT. */ 3227 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS; 3228 CSR_WRITE(sc, WMREG_EECD, reg); 3229 delay(2); 3230 } 3231 3232 return (0); 3233 } 3234 3235 /* 3236 * wm_spi_eeprom_ready: 3237 * 3238 * Wait for a SPI EEPROM to be ready for commands. 3239 */ 3240 static int 3241 wm_spi_eeprom_ready(struct wm_softc *sc) 3242 { 3243 uint32_t val; 3244 int usec; 3245 3246 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) { 3247 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8); 3248 wm_eeprom_recvbits(sc, &val, 8); 3249 if ((val & SPI_SR_RDY) == 0) 3250 break; 3251 } 3252 if (usec >= SPI_MAX_RETRIES) { 3253 aprint_error("%s: EEPROM failed to become ready\n", 3254 sc->sc_dev.dv_xname); 3255 return (1); 3256 } 3257 return (0); 3258 } 3259 3260 /* 3261 * wm_read_eeprom_spi: 3262 * 3263 * Read a work from the EEPROM using the SPI protocol. 3264 */ 3265 static int 3266 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 3267 { 3268 uint32_t reg, val; 3269 int i; 3270 uint8_t opc; 3271 3272 /* Clear SK and CS. */ 3273 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS); 3274 CSR_WRITE(sc, WMREG_EECD, reg); 3275 delay(2); 3276 3277 if (wm_spi_eeprom_ready(sc)) 3278 return (1); 3279 3280 /* Toggle CS to flush commands. */ 3281 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS); 3282 delay(2); 3283 CSR_WRITE(sc, WMREG_EECD, reg); 3284 delay(2); 3285 3286 opc = SPI_OPC_READ; 3287 if (sc->sc_ee_addrbits == 8 && word >= 128) 3288 opc |= SPI_OPC_A8; 3289 3290 wm_eeprom_sendbits(sc, opc, 8); 3291 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits); 3292 3293 for (i = 0; i < wordcnt; i++) { 3294 wm_eeprom_recvbits(sc, &val, 16); 3295 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8); 3296 } 3297 3298 /* Raise CS and clear SK. */ 3299 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS; 3300 CSR_WRITE(sc, WMREG_EECD, reg); 3301 delay(2); 3302 3303 return (0); 3304 } 3305 3306 #define EEPROM_CHECKSUM 0xBABA 3307 #define EEPROM_SIZE 0x0040 3308 3309 /* 3310 * wm_validate_eeprom_checksum 3311 * 3312 * The checksum is defined as the sum of the first 64 (16 bit) words. 3313 */ 3314 static int 3315 wm_validate_eeprom_checksum(struct wm_softc *sc) 3316 { 3317 uint16_t checksum; 3318 uint16_t eeprom_data; 3319 int i; 3320 3321 checksum = 0; 3322 3323 for (i = 0; i < EEPROM_SIZE; i++) { 3324 if (wm_read_eeprom(sc, i, 1, &eeprom_data)) 3325 return 1; 3326 checksum += eeprom_data; 3327 } 3328 3329 if (checksum != (uint16_t) EEPROM_CHECKSUM) 3330 return 1; 3331 3332 return 0; 3333 } 3334 3335 /* 3336 * wm_read_eeprom: 3337 * 3338 * Read data from the serial EEPROM. 3339 */ 3340 static int 3341 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 3342 { 3343 int rv; 3344 3345 if (sc->sc_flags & WM_F_EEPROM_INVALID) 3346 return 1; 3347 3348 if (wm_acquire_eeprom(sc)) 3349 return 1; 3350 3351 if (sc->sc_flags & WM_F_EEPROM_EERDEEWR) 3352 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data); 3353 else if (sc->sc_flags & WM_F_EEPROM_SPI) 3354 rv = wm_read_eeprom_spi(sc, word, wordcnt, data); 3355 else 3356 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data); 3357 3358 wm_release_eeprom(sc); 3359 return rv; 3360 } 3361 3362 static int 3363 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt, 3364 uint16_t *data) 3365 { 3366 int i, eerd = 0; 3367 int error = 0; 3368 3369 for (i = 0; i < wordcnt; i++) { 3370 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START; 3371 3372 CSR_WRITE(sc, WMREG_EERD, eerd); 3373 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD); 3374 if (error != 0) 3375 break; 3376 3377 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT); 3378 } 3379 3380 return error; 3381 } 3382 3383 static int 3384 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw) 3385 { 3386 uint32_t attempts = 100000; 3387 uint32_t i, reg = 0; 3388 int32_t done = -1; 3389 3390 for (i = 0; i < attempts; i++) { 3391 reg = CSR_READ(sc, rw); 3392 3393 if (reg & EERD_DONE) { 3394 done = 0; 3395 break; 3396 } 3397 delay(5); 3398 } 3399 3400 return done; 3401 } 3402 3403 /* 3404 * wm_add_rxbuf: 3405 * 3406 * Add a receive buffer to the indiciated descriptor. 3407 */ 3408 static int 3409 wm_add_rxbuf(struct wm_softc *sc, int idx) 3410 { 3411 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx]; 3412 struct mbuf *m; 3413 int error; 3414 3415 MGETHDR(m, M_DONTWAIT, MT_DATA); 3416 if (m == NULL) 3417 return (ENOBUFS); 3418 3419 MCLGET(m, M_DONTWAIT); 3420 if ((m->m_flags & M_EXT) == 0) { 3421 m_freem(m); 3422 return (ENOBUFS); 3423 } 3424 3425 if (rxs->rxs_mbuf != NULL) 3426 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 3427 3428 rxs->rxs_mbuf = m; 3429 3430 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 3431 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m, 3432 BUS_DMA_READ|BUS_DMA_NOWAIT); 3433 if (error) { 3434 /* XXX XXX XXX */ 3435 printf("%s: unable to load rx DMA map %d, error = %d\n", 3436 sc->sc_dev.dv_xname, idx, error); 3437 panic("wm_add_rxbuf"); 3438 } 3439 3440 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 3441 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 3442 3443 WM_INIT_RXDESC(sc, idx); 3444 3445 return (0); 3446 } 3447 3448 /* 3449 * wm_set_ral: 3450 * 3451 * Set an entery in the receive address list. 3452 */ 3453 static void 3454 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx) 3455 { 3456 uint32_t ral_lo, ral_hi; 3457 3458 if (enaddr != NULL) { 3459 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) | 3460 (enaddr[3] << 24); 3461 ral_hi = enaddr[4] | (enaddr[5] << 8); 3462 ral_hi |= RAL_AV; 3463 } else { 3464 ral_lo = 0; 3465 ral_hi = 0; 3466 } 3467 3468 if (sc->sc_type >= WM_T_82544) { 3469 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx), 3470 ral_lo); 3471 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx), 3472 ral_hi); 3473 } else { 3474 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo); 3475 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi); 3476 } 3477 } 3478 3479 /* 3480 * wm_mchash: 3481 * 3482 * Compute the hash of the multicast address for the 4096-bit 3483 * multicast filter. 3484 */ 3485 static uint32_t 3486 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr) 3487 { 3488 static const int lo_shift[4] = { 4, 3, 2, 0 }; 3489 static const int hi_shift[4] = { 4, 5, 6, 8 }; 3490 uint32_t hash; 3491 3492 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) | 3493 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]); 3494 3495 return (hash & 0xfff); 3496 } 3497 3498 /* 3499 * wm_set_filter: 3500 * 3501 * Set up the receive filter. 3502 */ 3503 static void 3504 wm_set_filter(struct wm_softc *sc) 3505 { 3506 struct ethercom *ec = &sc->sc_ethercom; 3507 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3508 struct ether_multi *enm; 3509 struct ether_multistep step; 3510 bus_addr_t mta_reg; 3511 uint32_t hash, reg, bit; 3512 int i; 3513 3514 if (sc->sc_type >= WM_T_82544) 3515 mta_reg = WMREG_CORDOVA_MTA; 3516 else 3517 mta_reg = WMREG_MTA; 3518 3519 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE); 3520 3521 if (ifp->if_flags & IFF_BROADCAST) 3522 sc->sc_rctl |= RCTL_BAM; 3523 if (ifp->if_flags & IFF_PROMISC) { 3524 sc->sc_rctl |= RCTL_UPE; 3525 goto allmulti; 3526 } 3527 3528 /* 3529 * Set the station address in the first RAL slot, and 3530 * clear the remaining slots. 3531 */ 3532 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0); 3533 for (i = 1; i < WM_RAL_TABSIZE; i++) 3534 wm_set_ral(sc, NULL, i); 3535 3536 /* Clear out the multicast table. */ 3537 for (i = 0; i < WM_MC_TABSIZE; i++) 3538 CSR_WRITE(sc, mta_reg + (i << 2), 0); 3539 3540 ETHER_FIRST_MULTI(step, ec, enm); 3541 while (enm != NULL) { 3542 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 3543 /* 3544 * We must listen to a range of multicast addresses. 3545 * For now, just accept all multicasts, rather than 3546 * trying to set only those filter bits needed to match 3547 * the range. (At this time, the only use of address 3548 * ranges is for IP multicast routing, for which the 3549 * range is big enough to require all bits set.) 3550 */ 3551 goto allmulti; 3552 } 3553 3554 hash = wm_mchash(sc, enm->enm_addrlo); 3555 3556 reg = (hash >> 5) & 0x7f; 3557 bit = hash & 0x1f; 3558 3559 hash = CSR_READ(sc, mta_reg + (reg << 2)); 3560 hash |= 1U << bit; 3561 3562 /* XXX Hardware bug?? */ 3563 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) { 3564 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2)); 3565 CSR_WRITE(sc, mta_reg + (reg << 2), hash); 3566 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit); 3567 } else 3568 CSR_WRITE(sc, mta_reg + (reg << 2), hash); 3569 3570 ETHER_NEXT_MULTI(step, enm); 3571 } 3572 3573 ifp->if_flags &= ~IFF_ALLMULTI; 3574 goto setit; 3575 3576 allmulti: 3577 ifp->if_flags |= IFF_ALLMULTI; 3578 sc->sc_rctl |= RCTL_MPE; 3579 3580 setit: 3581 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl); 3582 } 3583 3584 /* 3585 * wm_tbi_mediainit: 3586 * 3587 * Initialize media for use on 1000BASE-X devices. 3588 */ 3589 static void 3590 wm_tbi_mediainit(struct wm_softc *sc) 3591 { 3592 const char *sep = ""; 3593 3594 if (sc->sc_type < WM_T_82543) 3595 sc->sc_tipg = TIPG_WM_DFLT; 3596 else 3597 sc->sc_tipg = TIPG_LG_DFLT; 3598 3599 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange, 3600 wm_tbi_mediastatus); 3601 3602 /* 3603 * SWD Pins: 3604 * 3605 * 0 = Link LED (output) 3606 * 1 = Loss Of Signal (input) 3607 */ 3608 sc->sc_ctrl |= CTRL_SWDPIO(0); 3609 sc->sc_ctrl &= ~CTRL_SWDPIO(1); 3610 3611 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3612 3613 #define ADD(ss, mm, dd) \ 3614 do { \ 3615 aprint_normal("%s%s", sep, ss); \ 3616 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \ 3617 sep = ", "; \ 3618 } while (/*CONSTCOND*/0) 3619 3620 aprint_normal("%s: ", sc->sc_dev.dv_xname); 3621 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD); 3622 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD); 3623 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD); 3624 aprint_normal("\n"); 3625 3626 #undef ADD 3627 3628 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 3629 } 3630 3631 /* 3632 * wm_tbi_mediastatus: [ifmedia interface function] 3633 * 3634 * Get the current interface media status on a 1000BASE-X device. 3635 */ 3636 static void 3637 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 3638 { 3639 struct wm_softc *sc = ifp->if_softc; 3640 uint32_t ctrl; 3641 3642 ifmr->ifm_status = IFM_AVALID; 3643 ifmr->ifm_active = IFM_ETHER; 3644 3645 if (sc->sc_tbi_linkup == 0) { 3646 ifmr->ifm_active |= IFM_NONE; 3647 return; 3648 } 3649 3650 ifmr->ifm_status |= IFM_ACTIVE; 3651 ifmr->ifm_active |= IFM_1000_SX; 3652 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD) 3653 ifmr->ifm_active |= IFM_FDX; 3654 ctrl = CSR_READ(sc, WMREG_CTRL); 3655 if (ctrl & CTRL_RFCE) 3656 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE; 3657 if (ctrl & CTRL_TFCE) 3658 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE; 3659 } 3660 3661 /* 3662 * wm_tbi_mediachange: [ifmedia interface function] 3663 * 3664 * Set hardware to newly-selected media on a 1000BASE-X device. 3665 */ 3666 static int 3667 wm_tbi_mediachange(struct ifnet *ifp) 3668 { 3669 struct wm_softc *sc = ifp->if_softc; 3670 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 3671 uint32_t status; 3672 int i; 3673 3674 sc->sc_txcw = ife->ifm_data; 3675 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO || 3676 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0) 3677 sc->sc_txcw |= ANAR_X_PAUSE_SYM | ANAR_X_PAUSE_ASYM; 3678 sc->sc_txcw |= TXCW_ANE; 3679 3680 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); 3681 delay(10000); 3682 3683 /* NOTE: CTRL will update TFCE and RFCE automatically. */ 3684 3685 sc->sc_tbi_anstate = 0; 3686 3687 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) { 3688 /* Have signal; wait for the link to come up. */ 3689 for (i = 0; i < 50; i++) { 3690 delay(10000); 3691 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU) 3692 break; 3693 } 3694 3695 status = CSR_READ(sc, WMREG_STATUS); 3696 if (status & STATUS_LU) { 3697 /* Link is up. */ 3698 DPRINTF(WM_DEBUG_LINK, 3699 ("%s: LINK: set media -> link up %s\n", 3700 sc->sc_dev.dv_xname, 3701 (status & STATUS_FD) ? "FDX" : "HDX")); 3702 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 3703 sc->sc_fcrtl &= ~FCRTL_XONE; 3704 if (status & STATUS_FD) 3705 sc->sc_tctl |= 3706 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 3707 else 3708 sc->sc_tctl |= 3709 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 3710 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE) 3711 sc->sc_fcrtl |= FCRTL_XONE; 3712 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 3713 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 3714 WMREG_OLD_FCRTL : WMREG_FCRTL, 3715 sc->sc_fcrtl); 3716 sc->sc_tbi_linkup = 1; 3717 } else { 3718 /* Link is down. */ 3719 DPRINTF(WM_DEBUG_LINK, 3720 ("%s: LINK: set media -> link down\n", 3721 sc->sc_dev.dv_xname)); 3722 sc->sc_tbi_linkup = 0; 3723 } 3724 } else { 3725 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n", 3726 sc->sc_dev.dv_xname)); 3727 sc->sc_tbi_linkup = 0; 3728 } 3729 3730 wm_tbi_set_linkled(sc); 3731 3732 return (0); 3733 } 3734 3735 /* 3736 * wm_tbi_set_linkled: 3737 * 3738 * Update the link LED on 1000BASE-X devices. 3739 */ 3740 static void 3741 wm_tbi_set_linkled(struct wm_softc *sc) 3742 { 3743 3744 if (sc->sc_tbi_linkup) 3745 sc->sc_ctrl |= CTRL_SWDPIN(0); 3746 else 3747 sc->sc_ctrl &= ~CTRL_SWDPIN(0); 3748 3749 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3750 } 3751 3752 /* 3753 * wm_tbi_check_link: 3754 * 3755 * Check the link on 1000BASE-X devices. 3756 */ 3757 static void 3758 wm_tbi_check_link(struct wm_softc *sc) 3759 { 3760 uint32_t rxcw, ctrl, status; 3761 3762 if (sc->sc_tbi_anstate == 0) 3763 return; 3764 else if (sc->sc_tbi_anstate > 1) { 3765 DPRINTF(WM_DEBUG_LINK, 3766 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname, 3767 sc->sc_tbi_anstate)); 3768 sc->sc_tbi_anstate--; 3769 return; 3770 } 3771 3772 sc->sc_tbi_anstate = 0; 3773 3774 rxcw = CSR_READ(sc, WMREG_RXCW); 3775 ctrl = CSR_READ(sc, WMREG_CTRL); 3776 status = CSR_READ(sc, WMREG_STATUS); 3777 3778 if ((status & STATUS_LU) == 0) { 3779 DPRINTF(WM_DEBUG_LINK, 3780 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname)); 3781 sc->sc_tbi_linkup = 0; 3782 } else { 3783 DPRINTF(WM_DEBUG_LINK, 3784 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname, 3785 (status & STATUS_FD) ? "FDX" : "HDX")); 3786 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 3787 sc->sc_fcrtl &= ~FCRTL_XONE; 3788 if (status & STATUS_FD) 3789 sc->sc_tctl |= 3790 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 3791 else 3792 sc->sc_tctl |= 3793 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 3794 if (ctrl & CTRL_TFCE) 3795 sc->sc_fcrtl |= FCRTL_XONE; 3796 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 3797 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 3798 WMREG_OLD_FCRTL : WMREG_FCRTL, 3799 sc->sc_fcrtl); 3800 sc->sc_tbi_linkup = 1; 3801 } 3802 3803 wm_tbi_set_linkled(sc); 3804 } 3805 3806 /* 3807 * wm_gmii_reset: 3808 * 3809 * Reset the PHY. 3810 */ 3811 static void 3812 wm_gmii_reset(struct wm_softc *sc) 3813 { 3814 uint32_t reg; 3815 3816 if (sc->sc_type >= WM_T_82544) { 3817 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 3818 delay(20000); 3819 3820 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3821 delay(20000); 3822 } else { 3823 /* The PHY reset pin is active-low. */ 3824 reg = CSR_READ(sc, WMREG_CTRL_EXT); 3825 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) | 3826 CTRL_EXT_SWDPIN(4)); 3827 reg |= CTRL_EXT_SWDPIO(4); 3828 3829 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4)); 3830 delay(10); 3831 3832 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 3833 delay(10); 3834 3835 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4)); 3836 delay(10); 3837 #if 0 3838 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4); 3839 #endif 3840 } 3841 } 3842 3843 /* 3844 * wm_gmii_mediainit: 3845 * 3846 * Initialize media for use on 1000BASE-T devices. 3847 */ 3848 static void 3849 wm_gmii_mediainit(struct wm_softc *sc) 3850 { 3851 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3852 3853 /* We have MII. */ 3854 sc->sc_flags |= WM_F_HAS_MII; 3855 3856 sc->sc_tipg = TIPG_1000T_DFLT; 3857 3858 /* 3859 * Let the chip set speed/duplex on its own based on 3860 * signals from the PHY. 3861 */ 3862 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE; 3863 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3864 3865 /* Initialize our media structures and probe the GMII. */ 3866 sc->sc_mii.mii_ifp = ifp; 3867 3868 if (sc->sc_type >= WM_T_82544) { 3869 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg; 3870 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg; 3871 } else { 3872 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg; 3873 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg; 3874 } 3875 sc->sc_mii.mii_statchg = wm_gmii_statchg; 3876 3877 wm_gmii_reset(sc); 3878 3879 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange, 3880 wm_gmii_mediastatus); 3881 3882 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 3883 MII_OFFSET_ANY, MIIF_DOPAUSE); 3884 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 3885 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 3886 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 3887 } else 3888 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 3889 } 3890 3891 /* 3892 * wm_gmii_mediastatus: [ifmedia interface function] 3893 * 3894 * Get the current interface media status on a 1000BASE-T device. 3895 */ 3896 static void 3897 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 3898 { 3899 struct wm_softc *sc = ifp->if_softc; 3900 3901 mii_pollstat(&sc->sc_mii); 3902 ifmr->ifm_status = sc->sc_mii.mii_media_status; 3903 ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) | 3904 sc->sc_flowflags; 3905 } 3906 3907 /* 3908 * wm_gmii_mediachange: [ifmedia interface function] 3909 * 3910 * Set hardware to newly-selected media on a 1000BASE-T device. 3911 */ 3912 static int 3913 wm_gmii_mediachange(struct ifnet *ifp) 3914 { 3915 struct wm_softc *sc = ifp->if_softc; 3916 3917 if (ifp->if_flags & IFF_UP) 3918 mii_mediachg(&sc->sc_mii); 3919 return (0); 3920 } 3921 3922 #define MDI_IO CTRL_SWDPIN(2) 3923 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */ 3924 #define MDI_CLK CTRL_SWDPIN(3) 3925 3926 static void 3927 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits) 3928 { 3929 uint32_t i, v; 3930 3931 v = CSR_READ(sc, WMREG_CTRL); 3932 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 3933 v |= MDI_DIR | CTRL_SWDPIO(3); 3934 3935 for (i = 1 << (nbits - 1); i != 0; i >>= 1) { 3936 if (data & i) 3937 v |= MDI_IO; 3938 else 3939 v &= ~MDI_IO; 3940 CSR_WRITE(sc, WMREG_CTRL, v); 3941 delay(10); 3942 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 3943 delay(10); 3944 CSR_WRITE(sc, WMREG_CTRL, v); 3945 delay(10); 3946 } 3947 } 3948 3949 static uint32_t 3950 i82543_mii_recvbits(struct wm_softc *sc) 3951 { 3952 uint32_t v, i, data = 0; 3953 3954 v = CSR_READ(sc, WMREG_CTRL); 3955 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 3956 v |= CTRL_SWDPIO(3); 3957 3958 CSR_WRITE(sc, WMREG_CTRL, v); 3959 delay(10); 3960 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 3961 delay(10); 3962 CSR_WRITE(sc, WMREG_CTRL, v); 3963 delay(10); 3964 3965 for (i = 0; i < 16; i++) { 3966 data <<= 1; 3967 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 3968 delay(10); 3969 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO) 3970 data |= 1; 3971 CSR_WRITE(sc, WMREG_CTRL, v); 3972 delay(10); 3973 } 3974 3975 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 3976 delay(10); 3977 CSR_WRITE(sc, WMREG_CTRL, v); 3978 delay(10); 3979 3980 return (data); 3981 } 3982 3983 #undef MDI_IO 3984 #undef MDI_DIR 3985 #undef MDI_CLK 3986 3987 /* 3988 * wm_gmii_i82543_readreg: [mii interface function] 3989 * 3990 * Read a PHY register on the GMII (i82543 version). 3991 */ 3992 static int 3993 wm_gmii_i82543_readreg(struct device *self, int phy, int reg) 3994 { 3995 struct wm_softc *sc = (void *) self; 3996 int rv; 3997 3998 i82543_mii_sendbits(sc, 0xffffffffU, 32); 3999 i82543_mii_sendbits(sc, reg | (phy << 5) | 4000 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14); 4001 rv = i82543_mii_recvbits(sc) & 0xffff; 4002 4003 DPRINTF(WM_DEBUG_GMII, 4004 ("%s: GMII: read phy %d reg %d -> 0x%04x\n", 4005 sc->sc_dev.dv_xname, phy, reg, rv)); 4006 4007 return (rv); 4008 } 4009 4010 /* 4011 * wm_gmii_i82543_writereg: [mii interface function] 4012 * 4013 * Write a PHY register on the GMII (i82543 version). 4014 */ 4015 static void 4016 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val) 4017 { 4018 struct wm_softc *sc = (void *) self; 4019 4020 i82543_mii_sendbits(sc, 0xffffffffU, 32); 4021 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) | 4022 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) | 4023 (MII_COMMAND_START << 30), 32); 4024 } 4025 4026 /* 4027 * wm_gmii_i82544_readreg: [mii interface function] 4028 * 4029 * Read a PHY register on the GMII. 4030 */ 4031 static int 4032 wm_gmii_i82544_readreg(struct device *self, int phy, int reg) 4033 { 4034 struct wm_softc *sc = (void *) self; 4035 uint32_t mdic = 0; 4036 int i, rv; 4037 4038 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) | 4039 MDIC_REGADD(reg)); 4040 4041 for (i = 0; i < 100; i++) { 4042 mdic = CSR_READ(sc, WMREG_MDIC); 4043 if (mdic & MDIC_READY) 4044 break; 4045 delay(10); 4046 } 4047 4048 if ((mdic & MDIC_READY) == 0) { 4049 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n", 4050 sc->sc_dev.dv_xname, phy, reg); 4051 rv = 0; 4052 } else if (mdic & MDIC_E) { 4053 #if 0 /* This is normal if no PHY is present. */ 4054 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n", 4055 sc->sc_dev.dv_xname, phy, reg); 4056 #endif 4057 rv = 0; 4058 } else { 4059 rv = MDIC_DATA(mdic); 4060 if (rv == 0xffff) 4061 rv = 0; 4062 } 4063 4064 return (rv); 4065 } 4066 4067 /* 4068 * wm_gmii_i82544_writereg: [mii interface function] 4069 * 4070 * Write a PHY register on the GMII. 4071 */ 4072 static void 4073 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val) 4074 { 4075 struct wm_softc *sc = (void *) self; 4076 uint32_t mdic = 0; 4077 int i; 4078 4079 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) | 4080 MDIC_REGADD(reg) | MDIC_DATA(val)); 4081 4082 for (i = 0; i < 100; i++) { 4083 mdic = CSR_READ(sc, WMREG_MDIC); 4084 if (mdic & MDIC_READY) 4085 break; 4086 delay(10); 4087 } 4088 4089 if ((mdic & MDIC_READY) == 0) 4090 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n", 4091 sc->sc_dev.dv_xname, phy, reg); 4092 else if (mdic & MDIC_E) 4093 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n", 4094 sc->sc_dev.dv_xname, phy, reg); 4095 } 4096 4097 /* 4098 * wm_gmii_statchg: [mii interface function] 4099 * 4100 * Callback from MII layer when media changes. 4101 */ 4102 static void 4103 wm_gmii_statchg(struct device *self) 4104 { 4105 struct wm_softc *sc = (void *) self; 4106 struct mii_data *mii = &sc->sc_mii; 4107 4108 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE); 4109 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 4110 sc->sc_fcrtl &= ~FCRTL_XONE; 4111 4112 /* 4113 * Get flow control negotiation result. 4114 */ 4115 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 4116 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { 4117 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 4118 mii->mii_media_active &= ~IFM_ETH_FMASK; 4119 } 4120 4121 if (sc->sc_flowflags & IFM_FLOW) { 4122 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) { 4123 sc->sc_ctrl |= CTRL_TFCE; 4124 sc->sc_fcrtl |= FCRTL_XONE; 4125 } 4126 if (sc->sc_flowflags & IFM_ETH_RXPAUSE) 4127 sc->sc_ctrl |= CTRL_RFCE; 4128 } 4129 4130 if (sc->sc_mii.mii_media_active & IFM_FDX) { 4131 DPRINTF(WM_DEBUG_LINK, 4132 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname)); 4133 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 4134 } else { 4135 DPRINTF(WM_DEBUG_LINK, 4136 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname)); 4137 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 4138 } 4139 4140 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4141 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 4142 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL 4143 : WMREG_FCRTL, sc->sc_fcrtl); 4144 } 4145 4146 static int 4147 wm_is_onboard_nvm_eeprom(struct wm_softc *sc) 4148 { 4149 uint32_t eecd = 0; 4150 4151 if (sc->sc_type == WM_T_82573) { 4152 eecd = CSR_READ(sc, WMREG_EECD); 4153 4154 /* Isolate bits 15 & 16 */ 4155 eecd = ((eecd >> 15) & 0x03); 4156 4157 /* If both bits are set, device is Flash type */ 4158 if (eecd == 0x03) { 4159 return 0; 4160 } 4161 } 4162 return 1; 4163 } 4164 4165 static int 4166 wm_get_eeprom_semaphore(struct wm_softc *sc) 4167 { 4168 int32_t timeout; 4169 uint32_t swsm; 4170 4171 if ((sc->sc_flags & WM_F_EEPROM_SEMAPHORE) == 0) 4172 return 0; 4173 4174 /* Get the FW semaphore. */ 4175 timeout = 1000 + 1; /* XXX */ 4176 while (timeout) { 4177 swsm = CSR_READ(sc, WMREG_SWSM); 4178 swsm |= SWSM_SWESMBI; 4179 CSR_WRITE(sc, WMREG_SWSM, swsm); 4180 /* if we managed to set the bit we got the semaphore. */ 4181 swsm = CSR_READ(sc, WMREG_SWSM); 4182 if (swsm & SWSM_SWESMBI) 4183 break; 4184 4185 delay(50); 4186 timeout--; 4187 } 4188 4189 if (timeout == 0) { 4190 /* Release semaphores */ 4191 wm_put_eeprom_semaphore(sc); 4192 return 1; 4193 } 4194 4195 return 0; 4196 } 4197 4198 static void 4199 wm_put_eeprom_semaphore(struct wm_softc *sc) 4200 { 4201 uint32_t swsm; 4202 4203 if ((sc->sc_flags & WM_F_EEPROM_SEMAPHORE) == 0) 4204 return; 4205 4206 swsm = CSR_READ(sc, WMREG_SWSM); 4207 swsm &= ~(SWSM_SWESMBI); 4208 CSR_WRITE(sc, WMREG_SWSM, swsm); 4209 } 4210