1 /* $NetBSD: if_wm.c,v 1.126 2006/10/12 01:31:30 christos Exp $ */ 2 3 /* 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips. 40 * 41 * TODO (in order of importance): 42 * 43 * - Rework how parameters are loaded from the EEPROM. 44 * - Figure out what to do with the i82545GM and i82546GB 45 * SERDES controllers. 46 * - Fix hw VLAN assist. 47 */ 48 49 #include <sys/cdefs.h> 50 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.126 2006/10/12 01:31:30 christos Exp $"); 51 52 #include "bpfilter.h" 53 #include "rnd.h" 54 55 #include <sys/param.h> 56 #include <sys/systm.h> 57 #include <sys/callout.h> 58 #include <sys/mbuf.h> 59 #include <sys/malloc.h> 60 #include <sys/kernel.h> 61 #include <sys/socket.h> 62 #include <sys/ioctl.h> 63 #include <sys/errno.h> 64 #include <sys/device.h> 65 #include <sys/queue.h> 66 #include <sys/syslog.h> 67 68 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */ 69 70 #if NRND > 0 71 #include <sys/rnd.h> 72 #endif 73 74 #include <net/if.h> 75 #include <net/if_dl.h> 76 #include <net/if_media.h> 77 #include <net/if_ether.h> 78 79 #if NBPFILTER > 0 80 #include <net/bpf.h> 81 #endif 82 83 #include <netinet/in.h> /* XXX for struct ip */ 84 #include <netinet/in_systm.h> /* XXX for struct ip */ 85 #include <netinet/ip.h> /* XXX for struct ip */ 86 #include <netinet/tcp.h> /* XXX for struct tcphdr */ 87 88 #include <machine/bus.h> 89 #include <machine/intr.h> 90 #include <machine/endian.h> 91 92 #include <dev/mii/mii.h> 93 #include <dev/mii/miivar.h> 94 #include <dev/mii/mii_bitbang.h> 95 96 #include <dev/pci/pcireg.h> 97 #include <dev/pci/pcivar.h> 98 #include <dev/pci/pcidevs.h> 99 100 #include <dev/pci/if_wmreg.h> 101 102 #ifdef WM_DEBUG 103 #define WM_DEBUG_LINK 0x01 104 #define WM_DEBUG_TX 0x02 105 #define WM_DEBUG_RX 0x04 106 #define WM_DEBUG_GMII 0x08 107 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK; 108 109 #define DPRINTF(x, y) if (wm_debug & (x)) printf y 110 #else 111 #define DPRINTF(x, y) /* nothing */ 112 #endif /* WM_DEBUG */ 113 114 /* 115 * Transmit descriptor list size. Due to errata, we can only have 116 * 256 hardware descriptors in the ring on < 82544, but we use 4096 117 * on >= 82544. We tell the upper layers that they can queue a lot 118 * of packets, and we go ahead and manage up to 64 (16 for the i82547) 119 * of them at a time. 120 * 121 * We allow up to 256 (!) DMA segments per packet. Pathological packet 122 * chains containing many small mbufs have been observed in zero-copy 123 * situations with jumbo frames. 124 */ 125 #define WM_NTXSEGS 256 126 #define WM_IFQUEUELEN 256 127 #define WM_TXQUEUELEN_MAX 64 128 #define WM_TXQUEUELEN_MAX_82547 16 129 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum) 130 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1) 131 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8) 132 #define WM_NTXDESC_82542 256 133 #define WM_NTXDESC_82544 4096 134 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc) 135 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1) 136 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t)) 137 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc)) 138 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc)) 139 140 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */ 141 142 /* 143 * Receive descriptor list size. We have one Rx buffer for normal 144 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized 145 * packet. We allocate 256 receive descriptors, each with a 2k 146 * buffer (MCLBYTES), which gives us room for 50 jumbo packets. 147 */ 148 #define WM_NRXDESC 256 149 #define WM_NRXDESC_MASK (WM_NRXDESC - 1) 150 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) 151 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) 152 153 /* 154 * Control structures are DMA'd to the i82542 chip. We allocate them in 155 * a single clump that maps to a single DMA segment to make several things 156 * easier. 157 */ 158 struct wm_control_data_82544 { 159 /* 160 * The receive descriptors. 161 */ 162 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; 163 164 /* 165 * The transmit descriptors. Put these at the end, because 166 * we might use a smaller number of them. 167 */ 168 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544]; 169 }; 170 171 struct wm_control_data_82542 { 172 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; 173 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542]; 174 }; 175 176 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x) 177 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)]) 178 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)]) 179 180 /* 181 * Software state for transmit jobs. 182 */ 183 struct wm_txsoft { 184 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 185 bus_dmamap_t txs_dmamap; /* our DMA map */ 186 int txs_firstdesc; /* first descriptor in packet */ 187 int txs_lastdesc; /* last descriptor in packet */ 188 int txs_ndesc; /* # of descriptors used */ 189 }; 190 191 /* 192 * Software state for receive buffers. Each descriptor gets a 193 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill 194 * more than one buffer, we chain them together. 195 */ 196 struct wm_rxsoft { 197 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 198 bus_dmamap_t rxs_dmamap; /* our DMA map */ 199 }; 200 201 typedef enum { 202 WM_T_unknown = 0, 203 WM_T_82542_2_0, /* i82542 2.0 (really old) */ 204 WM_T_82542_2_1, /* i82542 2.1+ (old) */ 205 WM_T_82543, /* i82543 */ 206 WM_T_82544, /* i82544 */ 207 WM_T_82540, /* i82540 */ 208 WM_T_82545, /* i82545 */ 209 WM_T_82545_3, /* i82545 3.0+ */ 210 WM_T_82546, /* i82546 */ 211 WM_T_82546_3, /* i82546 3.0+ */ 212 WM_T_82541, /* i82541 */ 213 WM_T_82541_2, /* i82541 2.0+ */ 214 WM_T_82547, /* i82547 */ 215 WM_T_82547_2, /* i82547 2.0+ */ 216 WM_T_82571, /* i82571 */ 217 WM_T_82572, /* i82572 */ 218 WM_T_82573, /* i82573 */ 219 } wm_chip_type; 220 221 /* 222 * Software state per device. 223 */ 224 struct wm_softc { 225 struct device sc_dev; /* generic device information */ 226 bus_space_tag_t sc_st; /* bus space tag */ 227 bus_space_handle_t sc_sh; /* bus space handle */ 228 bus_space_tag_t sc_iot; /* I/O space tag */ 229 bus_space_handle_t sc_ioh; /* I/O space handle */ 230 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 231 struct ethercom sc_ethercom; /* ethernet common data */ 232 void *sc_sdhook; /* shutdown hook */ 233 void *sc_powerhook; /* power hook */ 234 pci_chipset_tag_t sc_pc; 235 pcitag_t sc_pcitag; 236 struct pci_conf_state sc_pciconf; 237 238 wm_chip_type sc_type; /* chip type */ 239 int sc_flags; /* flags; see below */ 240 int sc_bus_speed; /* PCI/PCIX bus speed */ 241 int sc_pcix_offset; /* PCIX capability register offset */ 242 int sc_flowflags; /* 802.3x flow control flags */ 243 244 void *sc_ih; /* interrupt cookie */ 245 246 int sc_ee_addrbits; /* EEPROM address bits */ 247 248 struct mii_data sc_mii; /* MII/media information */ 249 250 struct callout sc_tick_ch; /* tick callout */ 251 252 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 253 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 254 255 int sc_align_tweak; 256 257 /* 258 * Software state for the transmit and receive descriptors. 259 */ 260 int sc_txnum; /* must be a power of two */ 261 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX]; 262 struct wm_rxsoft sc_rxsoft[WM_NRXDESC]; 263 264 /* 265 * Control data structures. 266 */ 267 int sc_ntxdesc; /* must be a power of two */ 268 struct wm_control_data_82544 *sc_control_data; 269 #define sc_txdescs sc_control_data->wcd_txdescs 270 #define sc_rxdescs sc_control_data->wcd_rxdescs 271 272 #ifdef WM_EVENT_COUNTERS 273 /* Event counters. */ 274 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */ 275 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */ 276 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */ 277 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */ 278 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */ 279 struct evcnt sc_ev_rxintr; /* Rx interrupts */ 280 struct evcnt sc_ev_linkintr; /* Link interrupts */ 281 282 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */ 283 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */ 284 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */ 285 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */ 286 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */ 287 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound */ 288 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */ 289 290 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ 291 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */ 292 293 struct evcnt sc_ev_tu; /* Tx underrun */ 294 295 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ 296 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ 297 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ 298 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ 299 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ 300 #endif /* WM_EVENT_COUNTERS */ 301 302 bus_addr_t sc_tdt_reg; /* offset of TDT register */ 303 304 int sc_txfree; /* number of free Tx descriptors */ 305 int sc_txnext; /* next ready Tx descriptor */ 306 307 int sc_txsfree; /* number of free Tx jobs */ 308 int sc_txsnext; /* next free Tx job */ 309 int sc_txsdirty; /* dirty Tx jobs */ 310 311 /* These 5 variables are used only on the 82547. */ 312 int sc_txfifo_size; /* Tx FIFO size */ 313 int sc_txfifo_head; /* current head of FIFO */ 314 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */ 315 int sc_txfifo_stall; /* Tx FIFO is stalled */ 316 struct callout sc_txfifo_ch; /* Tx FIFO stall work-around timer */ 317 318 bus_addr_t sc_rdt_reg; /* offset of RDT register */ 319 320 int sc_rxptr; /* next ready Rx descriptor/queue ent */ 321 int sc_rxdiscard; 322 int sc_rxlen; 323 struct mbuf *sc_rxhead; 324 struct mbuf *sc_rxtail; 325 struct mbuf **sc_rxtailp; 326 327 uint32_t sc_ctrl; /* prototype CTRL register */ 328 #if 0 329 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ 330 #endif 331 uint32_t sc_icr; /* prototype interrupt bits */ 332 uint32_t sc_itr; /* prototype intr throttling reg */ 333 uint32_t sc_tctl; /* prototype TCTL register */ 334 uint32_t sc_rctl; /* prototype RCTL register */ 335 uint32_t sc_txcw; /* prototype TXCW register */ 336 uint32_t sc_tipg; /* prototype TIPG register */ 337 uint32_t sc_fcrtl; /* prototype FCRTL register */ 338 uint32_t sc_pba; /* prototype PBA register */ 339 340 int sc_tbi_linkup; /* TBI link status */ 341 int sc_tbi_anstate; /* autonegotiation state */ 342 343 int sc_mchash_type; /* multicast filter offset */ 344 345 #if NRND > 0 346 rndsource_element_t rnd_source; /* random source */ 347 #endif 348 }; 349 350 #define WM_RXCHAIN_RESET(sc) \ 351 do { \ 352 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \ 353 *(sc)->sc_rxtailp = NULL; \ 354 (sc)->sc_rxlen = 0; \ 355 } while (/*CONSTCOND*/0) 356 357 #define WM_RXCHAIN_LINK(sc, m) \ 358 do { \ 359 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \ 360 (sc)->sc_rxtailp = &(m)->m_next; \ 361 } while (/*CONSTCOND*/0) 362 363 /* sc_flags */ 364 #define WM_F_HAS_MII 0x001 /* has MII */ 365 #define WM_F_EEPROM_HANDSHAKE 0x002 /* requires EEPROM handshake */ 366 #define WM_F_EEPROM_SEMAPHORE 0x004 /* EEPROM with semaphore */ 367 #define WM_F_EEPROM_EERDEEWR 0x008 /* EEPROM access via EERD/EEWR */ 368 #define WM_F_EEPROM_SPI 0x010 /* EEPROM is SPI */ 369 #define WM_F_EEPROM_FLASH 0x020 /* EEPROM is FLASH */ 370 #define WM_F_EEPROM_INVALID 0x040 /* EEPROM not present (bad checksum) */ 371 #define WM_F_IOH_VALID 0x080 /* I/O handle is valid */ 372 #define WM_F_BUS64 0x100 /* bus is 64-bit */ 373 #define WM_F_PCIX 0x200 /* bus is PCI-X */ 374 #define WM_F_CSA 0x400 /* bus is CSA */ 375 #define WM_F_PCIE 0x800 /* bus is PCI-Express */ 376 377 #ifdef WM_EVENT_COUNTERS 378 #define WM_EVCNT_INCR(ev) (ev)->ev_count++ 379 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val) 380 #else 381 #define WM_EVCNT_INCR(ev) /* nothing */ 382 #define WM_EVCNT_ADD(ev, val) /* nothing */ 383 #endif 384 385 #define CSR_READ(sc, reg) \ 386 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) 387 #define CSR_WRITE(sc, reg, val) \ 388 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) 389 #define CSR_WRITE_FLUSH(sc) \ 390 (void) CSR_READ((sc), WMREG_STATUS) 391 392 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x))) 393 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x))) 394 395 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU) 396 #define WM_CDTXADDR_HI(sc, x) \ 397 (sizeof(bus_addr_t) == 8 ? \ 398 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0) 399 400 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU) 401 #define WM_CDRXADDR_HI(sc, x) \ 402 (sizeof(bus_addr_t) == 8 ? \ 403 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0) 404 405 #define WM_CDTXSYNC(sc, x, n, ops) \ 406 do { \ 407 int __x, __n; \ 408 \ 409 __x = (x); \ 410 __n = (n); \ 411 \ 412 /* If it will wrap around, sync to the end of the ring. */ \ 413 if ((__x + __n) > WM_NTXDESC(sc)) { \ 414 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 415 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \ 416 (WM_NTXDESC(sc) - __x), (ops)); \ 417 __n -= (WM_NTXDESC(sc) - __x); \ 418 __x = 0; \ 419 } \ 420 \ 421 /* Now sync whatever is left. */ \ 422 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 423 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \ 424 } while (/*CONSTCOND*/0) 425 426 #define WM_CDRXSYNC(sc, x, ops) \ 427 do { \ 428 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 429 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \ 430 } while (/*CONSTCOND*/0) 431 432 #define WM_INIT_RXDESC(sc, x) \ 433 do { \ 434 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 435 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \ 436 struct mbuf *__m = __rxs->rxs_mbuf; \ 437 \ 438 /* \ 439 * Note: We scoot the packet forward 2 bytes in the buffer \ 440 * so that the payload after the Ethernet header is aligned \ 441 * to a 4-byte boundary. \ 442 * \ 443 * XXX BRAINDAMAGE ALERT! \ 444 * The stupid chip uses the same size for every buffer, which \ 445 * is set in the Receive Control register. We are using the 2K \ 446 * size option, but what we REALLY want is (2K - 2)! For this \ 447 * reason, we can't "scoot" packets longer than the standard \ 448 * Ethernet MTU. On strict-alignment platforms, if the total \ 449 * size exceeds (2K - 2) we set align_tweak to 0 and let \ 450 * the upper layer copy the headers. \ 451 */ \ 452 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \ 453 \ 454 wm_set_dma_addr(&__rxd->wrx_addr, \ 455 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \ 456 __rxd->wrx_len = 0; \ 457 __rxd->wrx_cksum = 0; \ 458 __rxd->wrx_status = 0; \ 459 __rxd->wrx_errors = 0; \ 460 __rxd->wrx_special = 0; \ 461 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 462 \ 463 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \ 464 } while (/*CONSTCOND*/0) 465 466 static void wm_start(struct ifnet *); 467 static void wm_watchdog(struct ifnet *); 468 static int wm_ioctl(struct ifnet *, u_long, caddr_t); 469 static int wm_init(struct ifnet *); 470 static void wm_stop(struct ifnet *, int); 471 472 static void wm_shutdown(void *); 473 static void wm_powerhook(int, void *); 474 475 static void wm_reset(struct wm_softc *); 476 static void wm_rxdrain(struct wm_softc *); 477 static int wm_add_rxbuf(struct wm_softc *, int); 478 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *); 479 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *); 480 static int wm_validate_eeprom_checksum(struct wm_softc *); 481 static void wm_tick(void *); 482 483 static void wm_set_filter(struct wm_softc *); 484 485 static int wm_intr(void *); 486 static void wm_txintr(struct wm_softc *); 487 static void wm_rxintr(struct wm_softc *); 488 static void wm_linkintr(struct wm_softc *, uint32_t); 489 490 static void wm_tbi_mediainit(struct wm_softc *); 491 static int wm_tbi_mediachange(struct ifnet *); 492 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); 493 494 static void wm_tbi_set_linkled(struct wm_softc *); 495 static void wm_tbi_check_link(struct wm_softc *); 496 497 static void wm_gmii_reset(struct wm_softc *); 498 499 static int wm_gmii_i82543_readreg(struct device *, int, int); 500 static void wm_gmii_i82543_writereg(struct device *, int, int, int); 501 502 static int wm_gmii_i82544_readreg(struct device *, int, int); 503 static void wm_gmii_i82544_writereg(struct device *, int, int, int); 504 505 static void wm_gmii_statchg(struct device *); 506 507 static void wm_gmii_mediainit(struct wm_softc *); 508 static int wm_gmii_mediachange(struct ifnet *); 509 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); 510 511 static int wm_match(struct device *, struct cfdata *, void *); 512 static void wm_attach(struct device *, struct device *, void *); 513 static int wm_is_onboard_nvm_eeprom(struct wm_softc *); 514 static int wm_get_eeprom_semaphore(struct wm_softc *); 515 static void wm_put_eeprom_semaphore(struct wm_softc *); 516 static int wm_poll_eerd_eewr_done(struct wm_softc *, int); 517 518 CFATTACH_DECL(wm, sizeof(struct wm_softc), 519 wm_match, wm_attach, NULL, NULL); 520 521 static void wm_82547_txfifo_stall(void *); 522 523 /* 524 * Devices supported by this driver. 525 */ 526 static const struct wm_product { 527 pci_vendor_id_t wmp_vendor; 528 pci_product_id_t wmp_product; 529 const char *wmp_name; 530 wm_chip_type wmp_type; 531 int wmp_flags; 532 #define WMP_F_1000X 0x01 533 #define WMP_F_1000T 0x02 534 } wm_products[] = { 535 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, 536 "Intel i82542 1000BASE-X Ethernet", 537 WM_T_82542_2_1, WMP_F_1000X }, 538 539 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, 540 "Intel i82543GC 1000BASE-X Ethernet", 541 WM_T_82543, WMP_F_1000X }, 542 543 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, 544 "Intel i82543GC 1000BASE-T Ethernet", 545 WM_T_82543, WMP_F_1000T }, 546 547 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, 548 "Intel i82544EI 1000BASE-T Ethernet", 549 WM_T_82544, WMP_F_1000T }, 550 551 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, 552 "Intel i82544EI 1000BASE-X Ethernet", 553 WM_T_82544, WMP_F_1000X }, 554 555 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, 556 "Intel i82544GC 1000BASE-T Ethernet", 557 WM_T_82544, WMP_F_1000T }, 558 559 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, 560 "Intel i82544GC (LOM) 1000BASE-T Ethernet", 561 WM_T_82544, WMP_F_1000T }, 562 563 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, 564 "Intel i82540EM 1000BASE-T Ethernet", 565 WM_T_82540, WMP_F_1000T }, 566 567 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM, 568 "Intel i82540EM (LOM) 1000BASE-T Ethernet", 569 WM_T_82540, WMP_F_1000T }, 570 571 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM, 572 "Intel i82540EP 1000BASE-T Ethernet", 573 WM_T_82540, WMP_F_1000T }, 574 575 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP, 576 "Intel i82540EP 1000BASE-T Ethernet", 577 WM_T_82540, WMP_F_1000T }, 578 579 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP, 580 "Intel i82540EP 1000BASE-T Ethernet", 581 WM_T_82540, WMP_F_1000T }, 582 583 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER, 584 "Intel i82545EM 1000BASE-T Ethernet", 585 WM_T_82545, WMP_F_1000T }, 586 587 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER, 588 "Intel i82545GM 1000BASE-T Ethernet", 589 WM_T_82545_3, WMP_F_1000T }, 590 591 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER, 592 "Intel i82545GM 1000BASE-X Ethernet", 593 WM_T_82545_3, WMP_F_1000X }, 594 #if 0 595 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES, 596 "Intel i82545GM Gigabit Ethernet (SERDES)", 597 WM_T_82545_3, WMP_F_SERDES }, 598 #endif 599 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER, 600 "Intel i82546EB 1000BASE-T Ethernet", 601 WM_T_82546, WMP_F_1000T }, 602 603 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD, 604 "Intel i82546EB 1000BASE-T Ethernet", 605 WM_T_82546, WMP_F_1000T }, 606 607 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER, 608 "Intel i82545EM 1000BASE-X Ethernet", 609 WM_T_82545, WMP_F_1000X }, 610 611 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER, 612 "Intel i82546EB 1000BASE-X Ethernet", 613 WM_T_82546, WMP_F_1000X }, 614 615 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER, 616 "Intel i82546GB 1000BASE-T Ethernet", 617 WM_T_82546_3, WMP_F_1000T }, 618 619 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER, 620 "Intel i82546GB 1000BASE-X Ethernet", 621 WM_T_82546_3, WMP_F_1000X }, 622 #if 0 623 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES, 624 "Intel i82546GB Gigabit Ethernet (SERDES)", 625 WM_T_82546_3, WMP_F_SERDES }, 626 #endif 627 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE, 628 "Intel PRO/1000MT (82546GB)", 629 WM_T_82546_3, WMP_F_1000T }, 630 631 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI, 632 "Intel i82541EI 1000BASE-T Ethernet", 633 WM_T_82541, WMP_F_1000T }, 634 635 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM, 636 "Intel i82541ER (LOM) 1000BASE-T Ethernet", 637 WM_T_82541, WMP_F_1000T }, 638 639 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE, 640 "Intel i82541EI Mobile 1000BASE-T Ethernet", 641 WM_T_82541, WMP_F_1000T }, 642 643 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER, 644 "Intel i82541ER 1000BASE-T Ethernet", 645 WM_T_82541_2, WMP_F_1000T }, 646 647 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI, 648 "Intel i82541GI 1000BASE-T Ethernet", 649 WM_T_82541_2, WMP_F_1000T }, 650 651 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE, 652 "Intel i82541GI Mobile 1000BASE-T Ethernet", 653 WM_T_82541_2, WMP_F_1000T }, 654 655 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI, 656 "Intel i82541PI 1000BASE-T Ethernet", 657 WM_T_82541_2, WMP_F_1000T }, 658 659 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI, 660 "Intel i82547EI 1000BASE-T Ethernet", 661 WM_T_82547, WMP_F_1000T }, 662 663 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE, 664 "Intel i82547EI Moblie 1000BASE-T Ethernet", 665 WM_T_82547, WMP_F_1000T }, 666 667 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI, 668 "Intel i82547GI 1000BASE-T Ethernet", 669 WM_T_82547_2, WMP_F_1000T }, 670 671 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER, 672 "Intel PRO/1000 PT (82571EB)", 673 WM_T_82571, WMP_F_1000T }, 674 675 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER, 676 "Intel PRO/1000 PF (82571EB)", 677 WM_T_82571, WMP_F_1000X }, 678 #if 0 679 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES, 680 "Intel PRO/1000 PB (82571EB)", 681 WM_T_82571, WMP_F_SERDES }, 682 #endif 683 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER, 684 "Intel i82572EI 1000baseT Ethernet", 685 WM_T_82572, WMP_F_1000T }, 686 687 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER, 688 "Intel i82572EI 1000baseX Ethernet", 689 WM_T_82572, WMP_F_1000X }, 690 #if 0 691 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES, 692 "Intel i82572EI Gigabit Ethernet (SERDES)", 693 WM_T_82572, WMP_F_SERDES }, 694 #endif 695 696 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI, 697 "Intel i82572EI 1000baseT Ethernet", 698 WM_T_82572, WMP_F_1000T }, 699 700 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E, 701 "Intel i82573E", 702 WM_T_82573, WMP_F_1000T }, 703 704 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT, 705 "Intel i82573E IAMT", 706 WM_T_82573, WMP_F_1000T }, 707 708 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L, 709 "Intel i82573L Gigabit Ethernet", 710 WM_T_82573, WMP_F_1000T }, 711 712 { 0, 0, 713 NULL, 714 0, 0 }, 715 }; 716 717 #ifdef WM_EVENT_COUNTERS 718 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")]; 719 #endif /* WM_EVENT_COUNTERS */ 720 721 #if 0 /* Not currently used */ 722 static inline uint32_t 723 wm_io_read(struct wm_softc *sc, int reg) 724 { 725 726 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 727 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4)); 728 } 729 #endif 730 731 static inline void 732 wm_io_write(struct wm_softc *sc, int reg, uint32_t val) 733 { 734 735 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 736 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val); 737 } 738 739 static inline void 740 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v) 741 { 742 wa->wa_low = htole32(v & 0xffffffffU); 743 if (sizeof(bus_addr_t) == 8) 744 wa->wa_high = htole32((uint64_t) v >> 32); 745 else 746 wa->wa_high = 0; 747 } 748 749 static const struct wm_product * 750 wm_lookup(const struct pci_attach_args *pa) 751 { 752 const struct wm_product *wmp; 753 754 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) { 755 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor && 756 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product) 757 return (wmp); 758 } 759 return (NULL); 760 } 761 762 static int 763 wm_match(struct device *parent __unused, struct cfdata *cf __unused, void *aux) 764 { 765 struct pci_attach_args *pa = aux; 766 767 if (wm_lookup(pa) != NULL) 768 return (1); 769 770 return (0); 771 } 772 773 static void 774 wm_attach(struct device *parent __unused, struct device *self, void *aux) 775 { 776 struct wm_softc *sc = (void *) self; 777 struct pci_attach_args *pa = aux; 778 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 779 pci_chipset_tag_t pc = pa->pa_pc; 780 pci_intr_handle_t ih; 781 size_t cdata_size; 782 const char *intrstr = NULL; 783 const char *eetype; 784 bus_space_tag_t memt; 785 bus_space_handle_t memh; 786 bus_dma_segment_t seg; 787 int memh_valid; 788 int i, rseg, error; 789 const struct wm_product *wmp; 790 prop_data_t ea; 791 prop_number_t pn; 792 uint8_t enaddr[ETHER_ADDR_LEN]; 793 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin; 794 pcireg_t preg, memtype; 795 uint32_t reg; 796 797 callout_init(&sc->sc_tick_ch); 798 799 wmp = wm_lookup(pa); 800 if (wmp == NULL) { 801 printf("\n"); 802 panic("wm_attach: impossible"); 803 } 804 805 sc->sc_pc = pa->pa_pc; 806 sc->sc_pcitag = pa->pa_tag; 807 808 if (pci_dma64_available(pa)) 809 sc->sc_dmat = pa->pa_dmat64; 810 else 811 sc->sc_dmat = pa->pa_dmat; 812 813 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG)); 814 aprint_naive(": Ethernet controller\n"); 815 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg); 816 817 sc->sc_type = wmp->wmp_type; 818 if (sc->sc_type < WM_T_82543) { 819 if (preg < 2) { 820 aprint_error("%s: i82542 must be at least rev. 2\n", 821 sc->sc_dev.dv_xname); 822 return; 823 } 824 if (preg < 3) 825 sc->sc_type = WM_T_82542_2_0; 826 } 827 828 /* 829 * Map the device. All devices support memory-mapped acccess, 830 * and it is really required for normal operation. 831 */ 832 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA); 833 switch (memtype) { 834 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 835 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 836 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA, 837 memtype, 0, &memt, &memh, NULL, NULL) == 0); 838 break; 839 default: 840 memh_valid = 0; 841 } 842 843 if (memh_valid) { 844 sc->sc_st = memt; 845 sc->sc_sh = memh; 846 } else { 847 aprint_error("%s: unable to map device registers\n", 848 sc->sc_dev.dv_xname); 849 return; 850 } 851 852 /* 853 * In addition, i82544 and later support I/O mapped indirect 854 * register access. It is not desirable (nor supported in 855 * this driver) to use it for normal operation, though it is 856 * required to work around bugs in some chip versions. 857 */ 858 if (sc->sc_type >= WM_T_82544) { 859 /* First we have to find the I/O BAR. */ 860 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) { 861 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) == 862 PCI_MAPREG_TYPE_IO) 863 break; 864 } 865 if (i == PCI_MAPREG_END) 866 aprint_error("%s: WARNING: unable to find I/O BAR\n", 867 sc->sc_dev.dv_xname); 868 else { 869 /* 870 * The i8254x doesn't apparently respond when the 871 * I/O BAR is 0, which looks somewhat like it's not 872 * been configured. 873 */ 874 preg = pci_conf_read(pc, pa->pa_tag, i); 875 if (PCI_MAPREG_MEM_ADDR(preg) == 0) { 876 aprint_error("%s: WARNING: I/O BAR at zero.\n", 877 sc->sc_dev.dv_xname); 878 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO, 879 0, &sc->sc_iot, &sc->sc_ioh, 880 NULL, NULL) == 0) { 881 sc->sc_flags |= WM_F_IOH_VALID; 882 } else { 883 aprint_error("%s: WARNING: unable to map " 884 "I/O space\n", sc->sc_dev.dv_xname); 885 } 886 } 887 888 } 889 890 /* Enable bus mastering. Disable MWI on the i82542 2.0. */ 891 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 892 preg |= PCI_COMMAND_MASTER_ENABLE; 893 if (sc->sc_type < WM_T_82542_2_1) 894 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE; 895 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg); 896 897 /* power up chip */ 898 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, sc, 899 NULL)) && error != EOPNOTSUPP) { 900 aprint_error("%s: cannot activate %d\n", sc->sc_dev.dv_xname, 901 error); 902 return; 903 } 904 905 /* 906 * Map and establish our interrupt. 907 */ 908 if (pci_intr_map(pa, &ih)) { 909 aprint_error("%s: unable to map interrupt\n", 910 sc->sc_dev.dv_xname); 911 return; 912 } 913 intrstr = pci_intr_string(pc, ih); 914 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc); 915 if (sc->sc_ih == NULL) { 916 aprint_error("%s: unable to establish interrupt", 917 sc->sc_dev.dv_xname); 918 if (intrstr != NULL) 919 aprint_normal(" at %s", intrstr); 920 aprint_normal("\n"); 921 return; 922 } 923 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr); 924 925 /* 926 * Determine a few things about the bus we're connected to. 927 */ 928 if (sc->sc_type < WM_T_82543) { 929 /* We don't really know the bus characteristics here. */ 930 sc->sc_bus_speed = 33; 931 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) { 932 /* 933 * CSA (Communication Streaming Architecture) is about as fast 934 * a 32-bit 66MHz PCI Bus. 935 */ 936 sc->sc_flags |= WM_F_CSA; 937 sc->sc_bus_speed = 66; 938 aprint_verbose("%s: Communication Streaming Architecture\n", 939 sc->sc_dev.dv_xname); 940 if (sc->sc_type == WM_T_82547) { 941 callout_init(&sc->sc_txfifo_ch); 942 callout_setfunc(&sc->sc_txfifo_ch, 943 wm_82547_txfifo_stall, sc); 944 aprint_verbose("%s: using 82547 Tx FIFO stall " 945 "work-around\n", sc->sc_dev.dv_xname); 946 } 947 } else if (sc->sc_type >= WM_T_82571) { 948 sc->sc_flags |= WM_F_PCIE | WM_F_EEPROM_SEMAPHORE; 949 aprint_verbose("%s: PCI-Express bus\n", sc->sc_dev.dv_xname); 950 } else { 951 reg = CSR_READ(sc, WMREG_STATUS); 952 if (reg & STATUS_BUS64) 953 sc->sc_flags |= WM_F_BUS64; 954 if (sc->sc_type >= WM_T_82544 && 955 (reg & STATUS_PCIX_MODE) != 0) { 956 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb; 957 958 sc->sc_flags |= WM_F_PCIX; 959 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 960 PCI_CAP_PCIX, 961 &sc->sc_pcix_offset, NULL) == 0) 962 aprint_error("%s: unable to find PCIX " 963 "capability\n", sc->sc_dev.dv_xname); 964 else if (sc->sc_type != WM_T_82545_3 && 965 sc->sc_type != WM_T_82546_3) { 966 /* 967 * Work around a problem caused by the BIOS 968 * setting the max memory read byte count 969 * incorrectly. 970 */ 971 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, 972 sc->sc_pcix_offset + PCI_PCIX_CMD); 973 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag, 974 sc->sc_pcix_offset + PCI_PCIX_STATUS); 975 976 bytecnt = 977 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >> 978 PCI_PCIX_CMD_BYTECNT_SHIFT; 979 maxb = 980 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >> 981 PCI_PCIX_STATUS_MAXB_SHIFT; 982 if (bytecnt > maxb) { 983 aprint_verbose("%s: resetting PCI-X " 984 "MMRBC: %d -> %d\n", 985 sc->sc_dev.dv_xname, 986 512 << bytecnt, 512 << maxb); 987 pcix_cmd = (pcix_cmd & 988 ~PCI_PCIX_CMD_BYTECNT_MASK) | 989 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT); 990 pci_conf_write(pa->pa_pc, pa->pa_tag, 991 sc->sc_pcix_offset + PCI_PCIX_CMD, 992 pcix_cmd); 993 } 994 } 995 } 996 /* 997 * The quad port adapter is special; it has a PCIX-PCIX 998 * bridge on the board, and can run the secondary bus at 999 * a higher speed. 1000 */ 1001 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) { 1002 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120 1003 : 66; 1004 } else if (sc->sc_flags & WM_F_PCIX) { 1005 switch (reg & STATUS_PCIXSPD_MASK) { 1006 case STATUS_PCIXSPD_50_66: 1007 sc->sc_bus_speed = 66; 1008 break; 1009 case STATUS_PCIXSPD_66_100: 1010 sc->sc_bus_speed = 100; 1011 break; 1012 case STATUS_PCIXSPD_100_133: 1013 sc->sc_bus_speed = 133; 1014 break; 1015 default: 1016 aprint_error( 1017 "%s: unknown PCIXSPD %d; assuming 66MHz\n", 1018 sc->sc_dev.dv_xname, 1019 reg & STATUS_PCIXSPD_MASK); 1020 sc->sc_bus_speed = 66; 1021 } 1022 } else 1023 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33; 1024 aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname, 1025 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed, 1026 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI"); 1027 } 1028 1029 /* 1030 * Allocate the control data structures, and create and load the 1031 * DMA map for it. 1032 * 1033 * NOTE: All Tx descriptors must be in the same 4G segment of 1034 * memory. So must Rx descriptors. We simplify by allocating 1035 * both sets within the same 4G segment. 1036 */ 1037 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ? 1038 WM_NTXDESC_82542 : WM_NTXDESC_82544; 1039 cdata_size = sc->sc_type < WM_T_82544 ? 1040 sizeof(struct wm_control_data_82542) : 1041 sizeof(struct wm_control_data_82544); 1042 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE, 1043 (bus_size_t) 0x100000000ULL, 1044 &seg, 1, &rseg, 0)) != 0) { 1045 aprint_error( 1046 "%s: unable to allocate control data, error = %d\n", 1047 sc->sc_dev.dv_xname, error); 1048 goto fail_0; 1049 } 1050 1051 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size, 1052 (caddr_t *)&sc->sc_control_data, 0)) != 0) { 1053 aprint_error("%s: unable to map control data, error = %d\n", 1054 sc->sc_dev.dv_xname, error); 1055 goto fail_1; 1056 } 1057 1058 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size, 1059 0, 0, &sc->sc_cddmamap)) != 0) { 1060 aprint_error("%s: unable to create control data DMA map, " 1061 "error = %d\n", sc->sc_dev.dv_xname, error); 1062 goto fail_2; 1063 } 1064 1065 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 1066 sc->sc_control_data, cdata_size, NULL, 1067 0)) != 0) { 1068 aprint_error( 1069 "%s: unable to load control data DMA map, error = %d\n", 1070 sc->sc_dev.dv_xname, error); 1071 goto fail_3; 1072 } 1073 1074 1075 /* 1076 * Create the transmit buffer DMA maps. 1077 */ 1078 WM_TXQUEUELEN(sc) = 1079 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ? 1080 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX; 1081 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 1082 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA, 1083 WM_NTXSEGS, WTX_MAX_LEN, 0, 0, 1084 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 1085 aprint_error("%s: unable to create Tx DMA map %d, " 1086 "error = %d\n", sc->sc_dev.dv_xname, i, error); 1087 goto fail_4; 1088 } 1089 } 1090 1091 /* 1092 * Create the receive buffer DMA maps. 1093 */ 1094 for (i = 0; i < WM_NRXDESC; i++) { 1095 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1096 MCLBYTES, 0, 0, 1097 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 1098 aprint_error("%s: unable to create Rx DMA map %d, " 1099 "error = %d\n", sc->sc_dev.dv_xname, i, error); 1100 goto fail_5; 1101 } 1102 sc->sc_rxsoft[i].rxs_mbuf = NULL; 1103 } 1104 1105 /* 1106 * Reset the chip to a known state. 1107 */ 1108 wm_reset(sc); 1109 1110 /* 1111 * Get some information about the EEPROM. 1112 */ 1113 if (sc->sc_type == WM_T_82573) 1114 sc->sc_flags |= WM_F_EEPROM_EERDEEWR; 1115 else if (sc->sc_type > WM_T_82544) 1116 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE; 1117 1118 if (sc->sc_type <= WM_T_82544) 1119 sc->sc_ee_addrbits = 6; 1120 else if (sc->sc_type <= WM_T_82546_3) { 1121 reg = CSR_READ(sc, WMREG_EECD); 1122 if (reg & EECD_EE_SIZE) 1123 sc->sc_ee_addrbits = 8; 1124 else 1125 sc->sc_ee_addrbits = 6; 1126 } else if (sc->sc_type <= WM_T_82547_2) { 1127 reg = CSR_READ(sc, WMREG_EECD); 1128 if (reg & EECD_EE_TYPE) { 1129 sc->sc_flags |= WM_F_EEPROM_SPI; 1130 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8; 1131 } else 1132 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6; 1133 } else if ((sc->sc_type == WM_T_82573) && 1134 (wm_is_onboard_nvm_eeprom(sc) == 0)) { 1135 sc->sc_flags |= WM_F_EEPROM_FLASH; 1136 } else { 1137 /* Assume everything else is SPI. */ 1138 reg = CSR_READ(sc, WMREG_EECD); 1139 sc->sc_flags |= WM_F_EEPROM_SPI; 1140 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8; 1141 } 1142 1143 /* 1144 * Defer printing the EEPROM type until after verifying the checksum 1145 * This allows the EEPROM type to be printed correctly in the case 1146 * that no EEPROM is attached. 1147 */ 1148 1149 1150 /* 1151 * Validate the EEPROM checksum. If the checksum fails, flag this for 1152 * later, so we can fail future reads from the EEPROM. 1153 */ 1154 if (wm_validate_eeprom_checksum(sc)) 1155 sc->sc_flags |= WM_F_EEPROM_INVALID; 1156 1157 if (sc->sc_flags & WM_F_EEPROM_INVALID) 1158 aprint_verbose("%s: No EEPROM\n", sc->sc_dev.dv_xname); 1159 else if (sc->sc_flags & WM_F_EEPROM_FLASH) { 1160 aprint_verbose("%s: FLASH\n", sc->sc_dev.dv_xname); 1161 } else { 1162 if (sc->sc_flags & WM_F_EEPROM_SPI) 1163 eetype = "SPI"; 1164 else 1165 eetype = "MicroWire"; 1166 aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n", 1167 sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits, 1168 sc->sc_ee_addrbits, eetype); 1169 } 1170 1171 /* 1172 * Read the Ethernet address from the EEPROM, if not first found 1173 * in device properties. 1174 */ 1175 ea = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-addr"); 1176 if (ea != NULL) { 1177 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 1178 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 1179 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN); 1180 } else { 1181 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR, 1182 sizeof(myea) / sizeof(myea[0]), myea)) { 1183 aprint_error("%s: unable to read Ethernet address\n", 1184 sc->sc_dev.dv_xname); 1185 return; 1186 } 1187 enaddr[0] = myea[0] & 0xff; 1188 enaddr[1] = myea[0] >> 8; 1189 enaddr[2] = myea[1] & 0xff; 1190 enaddr[3] = myea[1] >> 8; 1191 enaddr[4] = myea[2] & 0xff; 1192 enaddr[5] = myea[2] >> 8; 1193 } 1194 1195 /* 1196 * Toggle the LSB of the MAC address on the second port 1197 * of the dual port controller. 1198 */ 1199 if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3 1200 || sc->sc_type == WM_T_82571) { 1201 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1) 1202 enaddr[5] ^= 1; 1203 } 1204 1205 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname, 1206 ether_sprintf(enaddr)); 1207 1208 /* 1209 * Read the config info from the EEPROM, and set up various 1210 * bits in the control registers based on their contents. 1211 */ 1212 pn = prop_dictionary_get(device_properties(&sc->sc_dev), 1213 "i82543-cfg1"); 1214 if (pn != NULL) { 1215 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1216 cfg1 = (uint16_t) prop_number_integer_value(pn); 1217 } else { 1218 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) { 1219 aprint_error("%s: unable to read CFG1\n", 1220 sc->sc_dev.dv_xname); 1221 return; 1222 } 1223 } 1224 1225 pn = prop_dictionary_get(device_properties(&sc->sc_dev), 1226 "i82543-cfg2"); 1227 if (pn != NULL) { 1228 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1229 cfg2 = (uint16_t) prop_number_integer_value(pn); 1230 } else { 1231 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) { 1232 aprint_error("%s: unable to read CFG2\n", 1233 sc->sc_dev.dv_xname); 1234 return; 1235 } 1236 } 1237 1238 if (sc->sc_type >= WM_T_82544) { 1239 pn = prop_dictionary_get(device_properties(&sc->sc_dev), 1240 "i82543-swdpin"); 1241 if (pn != NULL) { 1242 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1243 swdpin = (uint16_t) prop_number_integer_value(pn); 1244 } else { 1245 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) { 1246 aprint_error("%s: unable to read SWDPIN\n", 1247 sc->sc_dev.dv_xname); 1248 return; 1249 } 1250 } 1251 } 1252 1253 if (cfg1 & EEPROM_CFG1_ILOS) 1254 sc->sc_ctrl |= CTRL_ILOS; 1255 if (sc->sc_type >= WM_T_82544) { 1256 sc->sc_ctrl |= 1257 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) << 1258 CTRL_SWDPIO_SHIFT; 1259 sc->sc_ctrl |= 1260 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) << 1261 CTRL_SWDPINS_SHIFT; 1262 } else { 1263 sc->sc_ctrl |= 1264 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) << 1265 CTRL_SWDPIO_SHIFT; 1266 } 1267 1268 #if 0 1269 if (sc->sc_type >= WM_T_82544) { 1270 if (cfg1 & EEPROM_CFG1_IPS0) 1271 sc->sc_ctrl_ext |= CTRL_EXT_IPS; 1272 if (cfg1 & EEPROM_CFG1_IPS1) 1273 sc->sc_ctrl_ext |= CTRL_EXT_IPS1; 1274 sc->sc_ctrl_ext |= 1275 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) << 1276 CTRL_EXT_SWDPIO_SHIFT; 1277 sc->sc_ctrl_ext |= 1278 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) << 1279 CTRL_EXT_SWDPINS_SHIFT; 1280 } else { 1281 sc->sc_ctrl_ext |= 1282 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) << 1283 CTRL_EXT_SWDPIO_SHIFT; 1284 } 1285 #endif 1286 1287 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 1288 #if 0 1289 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 1290 #endif 1291 1292 /* 1293 * Set up some register offsets that are different between 1294 * the i82542 and the i82543 and later chips. 1295 */ 1296 if (sc->sc_type < WM_T_82543) { 1297 sc->sc_rdt_reg = WMREG_OLD_RDT0; 1298 sc->sc_tdt_reg = WMREG_OLD_TDT; 1299 } else { 1300 sc->sc_rdt_reg = WMREG_RDT; 1301 sc->sc_tdt_reg = WMREG_TDT; 1302 } 1303 1304 /* 1305 * Determine if we're TBI or GMII mode, and initialize the 1306 * media structures accordingly. 1307 */ 1308 if (sc->sc_type < WM_T_82543 || 1309 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) { 1310 if (wmp->wmp_flags & WMP_F_1000T) 1311 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T " 1312 "product!\n", sc->sc_dev.dv_xname); 1313 wm_tbi_mediainit(sc); 1314 } else { 1315 if (wmp->wmp_flags & WMP_F_1000X) 1316 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X " 1317 "product!\n", sc->sc_dev.dv_xname); 1318 wm_gmii_mediainit(sc); 1319 } 1320 1321 ifp = &sc->sc_ethercom.ec_if; 1322 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 1323 ifp->if_softc = sc; 1324 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1325 ifp->if_ioctl = wm_ioctl; 1326 ifp->if_start = wm_start; 1327 ifp->if_watchdog = wm_watchdog; 1328 ifp->if_init = wm_init; 1329 ifp->if_stop = wm_stop; 1330 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN)); 1331 IFQ_SET_READY(&ifp->if_snd); 1332 1333 if (sc->sc_type != WM_T_82573) 1334 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1335 1336 /* 1337 * If we're a i82543 or greater, we can support VLANs. 1338 */ 1339 if (sc->sc_type >= WM_T_82543) 1340 sc->sc_ethercom.ec_capabilities |= 1341 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */; 1342 1343 /* 1344 * We can perform TCPv4 and UDPv4 checkums in-bound. Only 1345 * on i82543 and later. 1346 */ 1347 if (sc->sc_type >= WM_T_82543) 1348 ifp->if_capabilities |= 1349 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 1350 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 1351 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx | 1352 IFCAP_CSUM_TCPv6_Tx | 1353 IFCAP_CSUM_UDPv6_Tx; 1354 1355 /* 1356 * If we're a i82544 or greater (except i82547), we can do 1357 * TCP segmentation offload. 1358 */ 1359 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) 1360 ifp->if_capabilities |= IFCAP_TSOv4; 1361 1362 /* 1363 * Attach the interface. 1364 */ 1365 if_attach(ifp); 1366 ether_ifattach(ifp, enaddr); 1367 #if NRND > 0 1368 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname, 1369 RND_TYPE_NET, 0); 1370 #endif 1371 1372 #ifdef WM_EVENT_COUNTERS 1373 /* Attach event counters. */ 1374 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC, 1375 NULL, sc->sc_dev.dv_xname, "txsstall"); 1376 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC, 1377 NULL, sc->sc_dev.dv_xname, "txdstall"); 1378 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC, 1379 NULL, sc->sc_dev.dv_xname, "txfifo_stall"); 1380 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR, 1381 NULL, sc->sc_dev.dv_xname, "txdw"); 1382 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR, 1383 NULL, sc->sc_dev.dv_xname, "txqe"); 1384 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR, 1385 NULL, sc->sc_dev.dv_xname, "rxintr"); 1386 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR, 1387 NULL, sc->sc_dev.dv_xname, "linkintr"); 1388 1389 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC, 1390 NULL, sc->sc_dev.dv_xname, "rxipsum"); 1391 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC, 1392 NULL, sc->sc_dev.dv_xname, "rxtusum"); 1393 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC, 1394 NULL, sc->sc_dev.dv_xname, "txipsum"); 1395 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC, 1396 NULL, sc->sc_dev.dv_xname, "txtusum"); 1397 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC, 1398 NULL, sc->sc_dev.dv_xname, "txtusum6"); 1399 1400 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC, 1401 NULL, sc->sc_dev.dv_xname, "txtso"); 1402 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC, 1403 NULL, sc->sc_dev.dv_xname, "txtsopain"); 1404 1405 for (i = 0; i < WM_NTXSEGS; i++) { 1406 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i); 1407 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC, 1408 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]); 1409 } 1410 1411 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC, 1412 NULL, sc->sc_dev.dv_xname, "txdrop"); 1413 1414 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC, 1415 NULL, sc->sc_dev.dv_xname, "tu"); 1416 1417 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC, 1418 NULL, sc->sc_dev.dv_xname, "tx_xoff"); 1419 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC, 1420 NULL, sc->sc_dev.dv_xname, "tx_xon"); 1421 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC, 1422 NULL, sc->sc_dev.dv_xname, "rx_xoff"); 1423 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC, 1424 NULL, sc->sc_dev.dv_xname, "rx_xon"); 1425 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC, 1426 NULL, sc->sc_dev.dv_xname, "rx_macctl"); 1427 #endif /* WM_EVENT_COUNTERS */ 1428 1429 /* 1430 * Make sure the interface is shutdown during reboot. 1431 */ 1432 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc); 1433 if (sc->sc_sdhook == NULL) 1434 aprint_error("%s: WARNING: unable to establish shutdown hook\n", 1435 sc->sc_dev.dv_xname); 1436 1437 sc->sc_powerhook = powerhook_establish(sc->sc_dev.dv_xname, 1438 wm_powerhook, sc); 1439 if (sc->sc_powerhook == NULL) 1440 aprint_error("%s: can't establish powerhook\n", 1441 sc->sc_dev.dv_xname); 1442 return; 1443 1444 /* 1445 * Free any resources we've allocated during the failed attach 1446 * attempt. Do this in reverse order and fall through. 1447 */ 1448 fail_5: 1449 for (i = 0; i < WM_NRXDESC; i++) { 1450 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 1451 bus_dmamap_destroy(sc->sc_dmat, 1452 sc->sc_rxsoft[i].rxs_dmamap); 1453 } 1454 fail_4: 1455 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 1456 if (sc->sc_txsoft[i].txs_dmamap != NULL) 1457 bus_dmamap_destroy(sc->sc_dmat, 1458 sc->sc_txsoft[i].txs_dmamap); 1459 } 1460 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 1461 fail_3: 1462 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 1463 fail_2: 1464 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data, 1465 cdata_size); 1466 fail_1: 1467 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1468 fail_0: 1469 return; 1470 } 1471 1472 /* 1473 * wm_shutdown: 1474 * 1475 * Make sure the interface is stopped at reboot time. 1476 */ 1477 static void 1478 wm_shutdown(void *arg) 1479 { 1480 struct wm_softc *sc = arg; 1481 1482 wm_stop(&sc->sc_ethercom.ec_if, 1); 1483 } 1484 1485 static void 1486 wm_powerhook(int why, void *arg) 1487 { 1488 struct wm_softc *sc = arg; 1489 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1490 pci_chipset_tag_t pc = sc->sc_pc; 1491 pcitag_t tag = sc->sc_pcitag; 1492 1493 switch (why) { 1494 case PWR_SOFTSUSPEND: 1495 wm_shutdown(sc); 1496 break; 1497 case PWR_SOFTRESUME: 1498 ifp->if_flags &= ~IFF_RUNNING; 1499 wm_init(ifp); 1500 if (ifp->if_flags & IFF_RUNNING) 1501 wm_start(ifp); 1502 break; 1503 case PWR_SUSPEND: 1504 pci_conf_capture(pc, tag, &sc->sc_pciconf); 1505 break; 1506 case PWR_RESUME: 1507 pci_conf_restore(pc, tag, &sc->sc_pciconf); 1508 break; 1509 } 1510 1511 return; 1512 } 1513 1514 /* 1515 * wm_tx_offload: 1516 * 1517 * Set up TCP/IP checksumming parameters for the 1518 * specified packet. 1519 */ 1520 static int 1521 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp, 1522 uint8_t *fieldsp) 1523 { 1524 struct mbuf *m0 = txs->txs_mbuf; 1525 struct livengood_tcpip_ctxdesc *t; 1526 uint32_t ipcs, tucs, cmd, cmdlen, seg; 1527 struct ether_header *eh; 1528 int offset, iphl; 1529 uint8_t fields; 1530 1531 /* 1532 * XXX It would be nice if the mbuf pkthdr had offset 1533 * fields for the protocol headers. 1534 */ 1535 1536 eh = mtod(m0, struct ether_header *); 1537 switch (htons(eh->ether_type)) { 1538 case ETHERTYPE_IP: 1539 case ETHERTYPE_IPV6: 1540 offset = ETHER_HDR_LEN; 1541 break; 1542 1543 case ETHERTYPE_VLAN: 1544 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1545 break; 1546 1547 default: 1548 /* 1549 * Don't support this protocol or encapsulation. 1550 */ 1551 *fieldsp = 0; 1552 *cmdp = 0; 1553 return (0); 1554 } 1555 1556 if ((m0->m_pkthdr.csum_flags & 1557 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) { 1558 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 1559 } else { 1560 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data); 1561 } 1562 1563 cmd = WTX_CMD_DEXT | WTX_DTYP_D; 1564 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE; 1565 seg = 0; 1566 fields = 0; 1567 1568 if (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) { 1569 int hlen = offset + iphl; 1570 WM_EVCNT_INCR(&sc->sc_ev_txtso); 1571 if (__predict_false(m0->m_len < 1572 (hlen + sizeof(struct tcphdr)))) { 1573 /* 1574 * TCP/IP headers are not in the first mbuf; we need 1575 * to do this the slow and painful way. Let's just 1576 * hope this doesn't happen very often. 1577 */ 1578 struct ip ip; 1579 struct tcphdr th; 1580 1581 WM_EVCNT_INCR(&sc->sc_ev_txtsopain); 1582 1583 m_copydata(m0, offset, sizeof(ip), &ip); 1584 m_copydata(m0, hlen, sizeof(th), &th); 1585 1586 ip.ip_len = 0; 1587 1588 m_copyback(m0, hlen + offsetof(struct ip, ip_len), 1589 sizeof(ip.ip_len), &ip.ip_len); 1590 1591 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 1592 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 1593 1594 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 1595 sizeof(th.th_sum), &th.th_sum); 1596 1597 hlen += th.th_off << 2; 1598 } else { 1599 /* 1600 * TCP/IP headers are in the first mbuf; we can do 1601 * this the easy way. 1602 */ 1603 struct ip *ip = 1604 (struct ip *) (mtod(m0, caddr_t) + offset); 1605 struct tcphdr *th = 1606 (struct tcphdr *) (mtod(m0, caddr_t) + hlen); 1607 1608 ip->ip_len = 0; 1609 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 1610 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 1611 1612 hlen += th->th_off << 2; 1613 } 1614 1615 cmd |= WTX_TCPIP_CMD_TSE; 1616 cmdlen |= WTX_TCPIP_CMD_TSE | WTX_TCPIP_CMD_IP | 1617 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen); 1618 seg = WTX_TCPIP_SEG_HDRLEN(hlen) | 1619 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz); 1620 } 1621 1622 /* 1623 * NOTE: Even if we're not using the IP or TCP/UDP checksum 1624 * offload feature, if we load the context descriptor, we 1625 * MUST provide valid values for IPCSS and TUCSS fields. 1626 */ 1627 1628 ipcs = WTX_TCPIP_IPCSS(offset) | 1629 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) | 1630 WTX_TCPIP_IPCSE(offset + iphl - 1); 1631 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) { 1632 WM_EVCNT_INCR(&sc->sc_ev_txipsum); 1633 fields |= WTX_IXSM; 1634 } 1635 1636 offset += iphl; 1637 1638 if (m0->m_pkthdr.csum_flags & 1639 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) { 1640 WM_EVCNT_INCR(&sc->sc_ev_txtusum); 1641 fields |= WTX_TXSM; 1642 tucs = WTX_TCPIP_TUCSS(offset) | 1643 WTX_TCPIP_TUCSO(offset + 1644 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) | 1645 WTX_TCPIP_TUCSE(0) /* rest of packet */; 1646 } else if ((m0->m_pkthdr.csum_flags & 1647 (M_CSUM_TCPv6|M_CSUM_UDPv6)) != 0) { 1648 WM_EVCNT_INCR(&sc->sc_ev_txtusum6); 1649 fields |= WTX_TXSM; 1650 tucs = WTX_TCPIP_TUCSS(offset) | 1651 WTX_TCPIP_TUCSO(offset + 1652 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) | 1653 WTX_TCPIP_TUCSE(0) /* rest of packet */; 1654 } else { 1655 /* Just initialize it to a valid TCP context. */ 1656 tucs = WTX_TCPIP_TUCSS(offset) | 1657 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) | 1658 WTX_TCPIP_TUCSE(0) /* rest of packet */; 1659 } 1660 1661 /* Fill in the context descriptor. */ 1662 t = (struct livengood_tcpip_ctxdesc *) 1663 &sc->sc_txdescs[sc->sc_txnext]; 1664 t->tcpip_ipcs = htole32(ipcs); 1665 t->tcpip_tucs = htole32(tucs); 1666 t->tcpip_cmdlen = htole32(cmdlen); 1667 t->tcpip_seg = htole32(seg); 1668 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE); 1669 1670 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext); 1671 txs->txs_ndesc++; 1672 1673 *cmdp = cmd; 1674 *fieldsp = fields; 1675 1676 return (0); 1677 } 1678 1679 static void 1680 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0) 1681 { 1682 struct mbuf *m; 1683 int i; 1684 1685 log(LOG_DEBUG, "%s: mbuf chain:\n", sc->sc_dev.dv_xname); 1686 for (m = m0, i = 0; m != NULL; m = m->m_next, i++) 1687 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, " 1688 "m_flags = 0x%08x\n", sc->sc_dev.dv_xname, 1689 m->m_data, m->m_len, m->m_flags); 1690 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", sc->sc_dev.dv_xname, 1691 i, i == 1 ? "" : "s"); 1692 } 1693 1694 /* 1695 * wm_82547_txfifo_stall: 1696 * 1697 * Callout used to wait for the 82547 Tx FIFO to drain, 1698 * reset the FIFO pointers, and restart packet transmission. 1699 */ 1700 static void 1701 wm_82547_txfifo_stall(void *arg) 1702 { 1703 struct wm_softc *sc = arg; 1704 int s; 1705 1706 s = splnet(); 1707 1708 if (sc->sc_txfifo_stall) { 1709 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) && 1710 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) && 1711 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) { 1712 /* 1713 * Packets have drained. Stop transmitter, reset 1714 * FIFO pointers, restart transmitter, and kick 1715 * the packet queue. 1716 */ 1717 uint32_t tctl = CSR_READ(sc, WMREG_TCTL); 1718 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN); 1719 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr); 1720 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr); 1721 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr); 1722 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr); 1723 CSR_WRITE(sc, WMREG_TCTL, tctl); 1724 CSR_WRITE_FLUSH(sc); 1725 1726 sc->sc_txfifo_head = 0; 1727 sc->sc_txfifo_stall = 0; 1728 wm_start(&sc->sc_ethercom.ec_if); 1729 } else { 1730 /* 1731 * Still waiting for packets to drain; try again in 1732 * another tick. 1733 */ 1734 callout_schedule(&sc->sc_txfifo_ch, 1); 1735 } 1736 } 1737 1738 splx(s); 1739 } 1740 1741 /* 1742 * wm_82547_txfifo_bugchk: 1743 * 1744 * Check for bug condition in the 82547 Tx FIFO. We need to 1745 * prevent enqueueing a packet that would wrap around the end 1746 * if the Tx FIFO ring buffer, otherwise the chip will croak. 1747 * 1748 * We do this by checking the amount of space before the end 1749 * of the Tx FIFO buffer. If the packet will not fit, we "stall" 1750 * the Tx FIFO, wait for all remaining packets to drain, reset 1751 * the internal FIFO pointers to the beginning, and restart 1752 * transmission on the interface. 1753 */ 1754 #define WM_FIFO_HDR 0x10 1755 #define WM_82547_PAD_LEN 0x3e0 1756 static int 1757 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0) 1758 { 1759 int space = sc->sc_txfifo_size - sc->sc_txfifo_head; 1760 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR); 1761 1762 /* Just return if already stalled. */ 1763 if (sc->sc_txfifo_stall) 1764 return (1); 1765 1766 if (sc->sc_mii.mii_media_active & IFM_FDX) { 1767 /* Stall only occurs in half-duplex mode. */ 1768 goto send_packet; 1769 } 1770 1771 if (len >= WM_82547_PAD_LEN + space) { 1772 sc->sc_txfifo_stall = 1; 1773 callout_schedule(&sc->sc_txfifo_ch, 1); 1774 return (1); 1775 } 1776 1777 send_packet: 1778 sc->sc_txfifo_head += len; 1779 if (sc->sc_txfifo_head >= sc->sc_txfifo_size) 1780 sc->sc_txfifo_head -= sc->sc_txfifo_size; 1781 1782 return (0); 1783 } 1784 1785 /* 1786 * wm_start: [ifnet interface function] 1787 * 1788 * Start packet transmission on the interface. 1789 */ 1790 static void 1791 wm_start(struct ifnet *ifp) 1792 { 1793 struct wm_softc *sc = ifp->if_softc; 1794 struct mbuf *m0; 1795 #if 0 /* XXXJRT */ 1796 struct m_tag *mtag; 1797 #endif 1798 struct wm_txsoft *txs; 1799 bus_dmamap_t dmamap; 1800 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso; 1801 bus_addr_t curaddr; 1802 bus_size_t seglen, curlen; 1803 uint32_t cksumcmd; 1804 uint8_t cksumfields; 1805 1806 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 1807 return; 1808 1809 /* 1810 * Remember the previous number of free descriptors. 1811 */ 1812 ofree = sc->sc_txfree; 1813 1814 /* 1815 * Loop through the send queue, setting up transmit descriptors 1816 * until we drain the queue, or use up all available transmit 1817 * descriptors. 1818 */ 1819 for (;;) { 1820 /* Grab a packet off the queue. */ 1821 IFQ_POLL(&ifp->if_snd, m0); 1822 if (m0 == NULL) 1823 break; 1824 1825 DPRINTF(WM_DEBUG_TX, 1826 ("%s: TX: have packet to transmit: %p\n", 1827 sc->sc_dev.dv_xname, m0)); 1828 1829 /* Get a work queue entry. */ 1830 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) { 1831 wm_txintr(sc); 1832 if (sc->sc_txsfree == 0) { 1833 DPRINTF(WM_DEBUG_TX, 1834 ("%s: TX: no free job descriptors\n", 1835 sc->sc_dev.dv_xname)); 1836 WM_EVCNT_INCR(&sc->sc_ev_txsstall); 1837 break; 1838 } 1839 } 1840 1841 txs = &sc->sc_txsoft[sc->sc_txsnext]; 1842 dmamap = txs->txs_dmamap; 1843 1844 use_tso = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 1845 1846 /* 1847 * So says the Linux driver: 1848 * The controller does a simple calculation to make sure 1849 * there is enough room in the FIFO before initiating the 1850 * DMA for each buffer. The calc is: 1851 * 4 = ceil(buffer len / MSS) 1852 * To make sure we don't overrun the FIFO, adjust the max 1853 * buffer len if the MSS drops. 1854 */ 1855 dmamap->dm_maxsegsz = 1856 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN) 1857 ? m0->m_pkthdr.segsz << 2 1858 : WTX_MAX_LEN; 1859 1860 /* 1861 * Load the DMA map. If this fails, the packet either 1862 * didn't fit in the allotted number of segments, or we 1863 * were short on resources. For the too-many-segments 1864 * case, we simply report an error and drop the packet, 1865 * since we can't sanely copy a jumbo packet to a single 1866 * buffer. 1867 */ 1868 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 1869 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1870 if (error) { 1871 if (error == EFBIG) { 1872 WM_EVCNT_INCR(&sc->sc_ev_txdrop); 1873 log(LOG_ERR, "%s: Tx packet consumes too many " 1874 "DMA segments, dropping...\n", 1875 sc->sc_dev.dv_xname); 1876 IFQ_DEQUEUE(&ifp->if_snd, m0); 1877 wm_dump_mbuf_chain(sc, m0); 1878 m_freem(m0); 1879 continue; 1880 } 1881 /* 1882 * Short on resources, just stop for now. 1883 */ 1884 DPRINTF(WM_DEBUG_TX, 1885 ("%s: TX: dmamap load failed: %d\n", 1886 sc->sc_dev.dv_xname, error)); 1887 break; 1888 } 1889 1890 segs_needed = dmamap->dm_nsegs; 1891 if (use_tso) { 1892 /* For sentinel descriptor; see below. */ 1893 segs_needed++; 1894 } 1895 1896 /* 1897 * Ensure we have enough descriptors free to describe 1898 * the packet. Note, we always reserve one descriptor 1899 * at the end of the ring due to the semantics of the 1900 * TDT register, plus one more in the event we need 1901 * to load offload context. 1902 */ 1903 if (segs_needed > sc->sc_txfree - 2) { 1904 /* 1905 * Not enough free descriptors to transmit this 1906 * packet. We haven't committed anything yet, 1907 * so just unload the DMA map, put the packet 1908 * pack on the queue, and punt. Notify the upper 1909 * layer that there are no more slots left. 1910 */ 1911 DPRINTF(WM_DEBUG_TX, 1912 ("%s: TX: need %d (%d) descriptors, have %d\n", 1913 sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed, 1914 sc->sc_txfree - 1)); 1915 ifp->if_flags |= IFF_OACTIVE; 1916 bus_dmamap_unload(sc->sc_dmat, dmamap); 1917 WM_EVCNT_INCR(&sc->sc_ev_txdstall); 1918 break; 1919 } 1920 1921 /* 1922 * Check for 82547 Tx FIFO bug. We need to do this 1923 * once we know we can transmit the packet, since we 1924 * do some internal FIFO space accounting here. 1925 */ 1926 if (sc->sc_type == WM_T_82547 && 1927 wm_82547_txfifo_bugchk(sc, m0)) { 1928 DPRINTF(WM_DEBUG_TX, 1929 ("%s: TX: 82547 Tx FIFO bug detected\n", 1930 sc->sc_dev.dv_xname)); 1931 ifp->if_flags |= IFF_OACTIVE; 1932 bus_dmamap_unload(sc->sc_dmat, dmamap); 1933 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall); 1934 break; 1935 } 1936 1937 IFQ_DEQUEUE(&ifp->if_snd, m0); 1938 1939 /* 1940 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 1941 */ 1942 1943 DPRINTF(WM_DEBUG_TX, 1944 ("%s: TX: packet has %d (%d) DMA segments\n", 1945 sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed)); 1946 1947 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]); 1948 1949 /* 1950 * Store a pointer to the packet so that we can free it 1951 * later. 1952 * 1953 * Initially, we consider the number of descriptors the 1954 * packet uses the number of DMA segments. This may be 1955 * incremented by 1 if we do checksum offload (a descriptor 1956 * is used to set the checksum context). 1957 */ 1958 txs->txs_mbuf = m0; 1959 txs->txs_firstdesc = sc->sc_txnext; 1960 txs->txs_ndesc = segs_needed; 1961 1962 /* Set up offload parameters for this packet. */ 1963 if (m0->m_pkthdr.csum_flags & 1964 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4| 1965 M_CSUM_TCPv6|M_CSUM_UDPv6)) { 1966 if (wm_tx_offload(sc, txs, &cksumcmd, 1967 &cksumfields) != 0) { 1968 /* Error message already displayed. */ 1969 bus_dmamap_unload(sc->sc_dmat, dmamap); 1970 continue; 1971 } 1972 } else { 1973 cksumcmd = 0; 1974 cksumfields = 0; 1975 } 1976 1977 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS; 1978 1979 /* Sync the DMA map. */ 1980 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 1981 BUS_DMASYNC_PREWRITE); 1982 1983 /* 1984 * Initialize the transmit descriptor. 1985 */ 1986 for (nexttx = sc->sc_txnext, seg = 0; 1987 seg < dmamap->dm_nsegs; seg++) { 1988 for (seglen = dmamap->dm_segs[seg].ds_len, 1989 curaddr = dmamap->dm_segs[seg].ds_addr; 1990 seglen != 0; 1991 curaddr += curlen, seglen -= curlen, 1992 nexttx = WM_NEXTTX(sc, nexttx)) { 1993 curlen = seglen; 1994 1995 /* 1996 * So says the Linux driver: 1997 * Work around for premature descriptor 1998 * write-backs in TSO mode. Append a 1999 * 4-byte sentinel descriptor. 2000 */ 2001 if (use_tso && 2002 seg == dmamap->dm_nsegs - 1 && 2003 curlen > 8) 2004 curlen -= 4; 2005 2006 wm_set_dma_addr( 2007 &sc->sc_txdescs[nexttx].wtx_addr, 2008 curaddr); 2009 sc->sc_txdescs[nexttx].wtx_cmdlen = 2010 htole32(cksumcmd | curlen); 2011 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 2012 0; 2013 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 2014 cksumfields; 2015 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0; 2016 lasttx = nexttx; 2017 2018 DPRINTF(WM_DEBUG_TX, 2019 ("%s: TX: desc %d: low 0x%08lx, " 2020 "len 0x%04x\n", 2021 sc->sc_dev.dv_xname, nexttx, 2022 curaddr & 0xffffffffUL, (unsigned)curlen)); 2023 } 2024 } 2025 2026 KASSERT(lasttx != -1); 2027 2028 /* 2029 * Set up the command byte on the last descriptor of 2030 * the packet. If we're in the interrupt delay window, 2031 * delay the interrupt. 2032 */ 2033 sc->sc_txdescs[lasttx].wtx_cmdlen |= 2034 htole32(WTX_CMD_EOP | WTX_CMD_RS); 2035 2036 #if 0 /* XXXJRT */ 2037 /* 2038 * If VLANs are enabled and the packet has a VLAN tag, set 2039 * up the descriptor to encapsulate the packet for us. 2040 * 2041 * This is only valid on the last descriptor of the packet. 2042 */ 2043 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) { 2044 sc->sc_txdescs[lasttx].wtx_cmdlen |= 2045 htole32(WTX_CMD_VLE); 2046 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan 2047 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff); 2048 } 2049 #endif /* XXXJRT */ 2050 2051 txs->txs_lastdesc = lasttx; 2052 2053 DPRINTF(WM_DEBUG_TX, 2054 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname, 2055 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen))); 2056 2057 /* Sync the descriptors we're using. */ 2058 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc, 2059 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2060 2061 /* Give the packet to the chip. */ 2062 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx); 2063 2064 DPRINTF(WM_DEBUG_TX, 2065 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx)); 2066 2067 DPRINTF(WM_DEBUG_TX, 2068 ("%s: TX: finished transmitting packet, job %d\n", 2069 sc->sc_dev.dv_xname, sc->sc_txsnext)); 2070 2071 /* Advance the tx pointer. */ 2072 sc->sc_txfree -= txs->txs_ndesc; 2073 sc->sc_txnext = nexttx; 2074 2075 sc->sc_txsfree--; 2076 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext); 2077 2078 #if NBPFILTER > 0 2079 /* Pass the packet to any BPF listeners. */ 2080 if (ifp->if_bpf) 2081 bpf_mtap(ifp->if_bpf, m0); 2082 #endif /* NBPFILTER > 0 */ 2083 } 2084 2085 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) { 2086 /* No more slots; notify upper layer. */ 2087 ifp->if_flags |= IFF_OACTIVE; 2088 } 2089 2090 if (sc->sc_txfree != ofree) { 2091 /* Set a watchdog timer in case the chip flakes out. */ 2092 ifp->if_timer = 5; 2093 } 2094 } 2095 2096 /* 2097 * wm_watchdog: [ifnet interface function] 2098 * 2099 * Watchdog timer handler. 2100 */ 2101 static void 2102 wm_watchdog(struct ifnet *ifp) 2103 { 2104 struct wm_softc *sc = ifp->if_softc; 2105 2106 /* 2107 * Since we're using delayed interrupts, sweep up 2108 * before we report an error. 2109 */ 2110 wm_txintr(sc); 2111 2112 if (sc->sc_txfree != WM_NTXDESC(sc)) { 2113 log(LOG_ERR, 2114 "%s: device timeout (txfree %d txsfree %d txnext %d)\n", 2115 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree, 2116 sc->sc_txnext); 2117 ifp->if_oerrors++; 2118 2119 /* Reset the interface. */ 2120 (void) wm_init(ifp); 2121 } 2122 2123 /* Try to get more packets going. */ 2124 wm_start(ifp); 2125 } 2126 2127 /* 2128 * wm_ioctl: [ifnet interface function] 2129 * 2130 * Handle control requests from the operator. 2131 */ 2132 static int 2133 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2134 { 2135 struct wm_softc *sc = ifp->if_softc; 2136 struct ifreq *ifr = (struct ifreq *) data; 2137 int s, error; 2138 2139 s = splnet(); 2140 2141 switch (cmd) { 2142 case SIOCSIFMEDIA: 2143 case SIOCGIFMEDIA: 2144 /* Flow control requires full-duplex mode. */ 2145 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 2146 (ifr->ifr_media & IFM_FDX) == 0) 2147 ifr->ifr_media &= ~IFM_ETH_FMASK; 2148 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 2149 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 2150 /* We can do both TXPAUSE and RXPAUSE. */ 2151 ifr->ifr_media |= 2152 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 2153 } 2154 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 2155 } 2156 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 2157 break; 2158 default: 2159 error = ether_ioctl(ifp, cmd, data); 2160 if (error == ENETRESET) { 2161 /* 2162 * Multicast list has changed; set the hardware filter 2163 * accordingly. 2164 */ 2165 if (ifp->if_flags & IFF_RUNNING) 2166 wm_set_filter(sc); 2167 error = 0; 2168 } 2169 break; 2170 } 2171 2172 /* Try to get more packets going. */ 2173 wm_start(ifp); 2174 2175 splx(s); 2176 return (error); 2177 } 2178 2179 /* 2180 * wm_intr: 2181 * 2182 * Interrupt service routine. 2183 */ 2184 static int 2185 wm_intr(void *arg) 2186 { 2187 struct wm_softc *sc = arg; 2188 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2189 uint32_t icr; 2190 int handled = 0; 2191 2192 while (1 /* CONSTCOND */) { 2193 icr = CSR_READ(sc, WMREG_ICR); 2194 if ((icr & sc->sc_icr) == 0) 2195 break; 2196 2197 #if 0 /*NRND > 0*/ 2198 if (RND_ENABLED(&sc->rnd_source)) 2199 rnd_add_uint32(&sc->rnd_source, icr); 2200 #endif 2201 2202 handled = 1; 2203 2204 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 2205 if (icr & (ICR_RXDMT0|ICR_RXT0)) { 2206 DPRINTF(WM_DEBUG_RX, 2207 ("%s: RX: got Rx intr 0x%08x\n", 2208 sc->sc_dev.dv_xname, 2209 icr & (ICR_RXDMT0|ICR_RXT0))); 2210 WM_EVCNT_INCR(&sc->sc_ev_rxintr); 2211 } 2212 #endif 2213 wm_rxintr(sc); 2214 2215 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 2216 if (icr & ICR_TXDW) { 2217 DPRINTF(WM_DEBUG_TX, 2218 ("%s: TX: got TXDW interrupt\n", 2219 sc->sc_dev.dv_xname)); 2220 WM_EVCNT_INCR(&sc->sc_ev_txdw); 2221 } 2222 #endif 2223 wm_txintr(sc); 2224 2225 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) { 2226 WM_EVCNT_INCR(&sc->sc_ev_linkintr); 2227 wm_linkintr(sc, icr); 2228 } 2229 2230 if (icr & ICR_RXO) { 2231 ifp->if_ierrors++; 2232 #if defined(WM_DEBUG) 2233 log(LOG_WARNING, "%s: Receive overrun\n", 2234 sc->sc_dev.dv_xname); 2235 #endif /* defined(WM_DEBUG) */ 2236 } 2237 } 2238 2239 if (handled) { 2240 /* Try to get more packets going. */ 2241 wm_start(ifp); 2242 } 2243 2244 return (handled); 2245 } 2246 2247 /* 2248 * wm_txintr: 2249 * 2250 * Helper; handle transmit interrupts. 2251 */ 2252 static void 2253 wm_txintr(struct wm_softc *sc) 2254 { 2255 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2256 struct wm_txsoft *txs; 2257 uint8_t status; 2258 int i; 2259 2260 ifp->if_flags &= ~IFF_OACTIVE; 2261 2262 /* 2263 * Go through the Tx list and free mbufs for those 2264 * frames which have been transmitted. 2265 */ 2266 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc); 2267 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) { 2268 txs = &sc->sc_txsoft[i]; 2269 2270 DPRINTF(WM_DEBUG_TX, 2271 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i)); 2272 2273 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc, 2274 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2275 2276 status = 2277 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status; 2278 if ((status & WTX_ST_DD) == 0) { 2279 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1, 2280 BUS_DMASYNC_PREREAD); 2281 break; 2282 } 2283 2284 DPRINTF(WM_DEBUG_TX, 2285 ("%s: TX: job %d done: descs %d..%d\n", 2286 sc->sc_dev.dv_xname, i, txs->txs_firstdesc, 2287 txs->txs_lastdesc)); 2288 2289 /* 2290 * XXX We should probably be using the statistics 2291 * XXX registers, but I don't know if they exist 2292 * XXX on chips before the i82544. 2293 */ 2294 2295 #ifdef WM_EVENT_COUNTERS 2296 if (status & WTX_ST_TU) 2297 WM_EVCNT_INCR(&sc->sc_ev_tu); 2298 #endif /* WM_EVENT_COUNTERS */ 2299 2300 if (status & (WTX_ST_EC|WTX_ST_LC)) { 2301 ifp->if_oerrors++; 2302 if (status & WTX_ST_LC) 2303 log(LOG_WARNING, "%s: late collision\n", 2304 sc->sc_dev.dv_xname); 2305 else if (status & WTX_ST_EC) { 2306 ifp->if_collisions += 16; 2307 log(LOG_WARNING, "%s: excessive collisions\n", 2308 sc->sc_dev.dv_xname); 2309 } 2310 } else 2311 ifp->if_opackets++; 2312 2313 sc->sc_txfree += txs->txs_ndesc; 2314 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 2315 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2316 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 2317 m_freem(txs->txs_mbuf); 2318 txs->txs_mbuf = NULL; 2319 } 2320 2321 /* Update the dirty transmit buffer pointer. */ 2322 sc->sc_txsdirty = i; 2323 DPRINTF(WM_DEBUG_TX, 2324 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i)); 2325 2326 /* 2327 * If there are no more pending transmissions, cancel the watchdog 2328 * timer. 2329 */ 2330 if (sc->sc_txsfree == WM_TXQUEUELEN(sc)) 2331 ifp->if_timer = 0; 2332 } 2333 2334 /* 2335 * wm_rxintr: 2336 * 2337 * Helper; handle receive interrupts. 2338 */ 2339 static void 2340 wm_rxintr(struct wm_softc *sc) 2341 { 2342 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2343 struct wm_rxsoft *rxs; 2344 struct mbuf *m; 2345 int i, len; 2346 uint8_t status, errors; 2347 2348 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) { 2349 rxs = &sc->sc_rxsoft[i]; 2350 2351 DPRINTF(WM_DEBUG_RX, 2352 ("%s: RX: checking descriptor %d\n", 2353 sc->sc_dev.dv_xname, i)); 2354 2355 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2356 2357 status = sc->sc_rxdescs[i].wrx_status; 2358 errors = sc->sc_rxdescs[i].wrx_errors; 2359 len = le16toh(sc->sc_rxdescs[i].wrx_len); 2360 2361 if ((status & WRX_ST_DD) == 0) { 2362 /* 2363 * We have processed all of the receive descriptors. 2364 */ 2365 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD); 2366 break; 2367 } 2368 2369 if (__predict_false(sc->sc_rxdiscard)) { 2370 DPRINTF(WM_DEBUG_RX, 2371 ("%s: RX: discarding contents of descriptor %d\n", 2372 sc->sc_dev.dv_xname, i)); 2373 WM_INIT_RXDESC(sc, i); 2374 if (status & WRX_ST_EOP) { 2375 /* Reset our state. */ 2376 DPRINTF(WM_DEBUG_RX, 2377 ("%s: RX: resetting rxdiscard -> 0\n", 2378 sc->sc_dev.dv_xname)); 2379 sc->sc_rxdiscard = 0; 2380 } 2381 continue; 2382 } 2383 2384 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 2385 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2386 2387 m = rxs->rxs_mbuf; 2388 2389 /* 2390 * Add a new receive buffer to the ring, unless of 2391 * course the length is zero. Treat the latter as a 2392 * failed mapping. 2393 */ 2394 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) { 2395 /* 2396 * Failed, throw away what we've done so 2397 * far, and discard the rest of the packet. 2398 */ 2399 ifp->if_ierrors++; 2400 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 2401 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 2402 WM_INIT_RXDESC(sc, i); 2403 if ((status & WRX_ST_EOP) == 0) 2404 sc->sc_rxdiscard = 1; 2405 if (sc->sc_rxhead != NULL) 2406 m_freem(sc->sc_rxhead); 2407 WM_RXCHAIN_RESET(sc); 2408 DPRINTF(WM_DEBUG_RX, 2409 ("%s: RX: Rx buffer allocation failed, " 2410 "dropping packet%s\n", sc->sc_dev.dv_xname, 2411 sc->sc_rxdiscard ? " (discard)" : "")); 2412 continue; 2413 } 2414 2415 WM_RXCHAIN_LINK(sc, m); 2416 2417 m->m_len = len; 2418 2419 DPRINTF(WM_DEBUG_RX, 2420 ("%s: RX: buffer at %p len %d\n", 2421 sc->sc_dev.dv_xname, m->m_data, len)); 2422 2423 /* 2424 * If this is not the end of the packet, keep 2425 * looking. 2426 */ 2427 if ((status & WRX_ST_EOP) == 0) { 2428 sc->sc_rxlen += len; 2429 DPRINTF(WM_DEBUG_RX, 2430 ("%s: RX: not yet EOP, rxlen -> %d\n", 2431 sc->sc_dev.dv_xname, sc->sc_rxlen)); 2432 continue; 2433 } 2434 2435 /* 2436 * Okay, we have the entire packet now. The chip is 2437 * configured to include the FCS (not all chips can 2438 * be configured to strip it), so we need to trim it. 2439 */ 2440 m->m_len -= ETHER_CRC_LEN; 2441 2442 *sc->sc_rxtailp = NULL; 2443 len = m->m_len + sc->sc_rxlen; 2444 m = sc->sc_rxhead; 2445 2446 WM_RXCHAIN_RESET(sc); 2447 2448 DPRINTF(WM_DEBUG_RX, 2449 ("%s: RX: have entire packet, len -> %d\n", 2450 sc->sc_dev.dv_xname, len)); 2451 2452 /* 2453 * If an error occurred, update stats and drop the packet. 2454 */ 2455 if (errors & 2456 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) { 2457 ifp->if_ierrors++; 2458 if (errors & WRX_ER_SE) 2459 log(LOG_WARNING, "%s: symbol error\n", 2460 sc->sc_dev.dv_xname); 2461 else if (errors & WRX_ER_SEQ) 2462 log(LOG_WARNING, "%s: receive sequence error\n", 2463 sc->sc_dev.dv_xname); 2464 else if (errors & WRX_ER_CE) 2465 log(LOG_WARNING, "%s: CRC error\n", 2466 sc->sc_dev.dv_xname); 2467 m_freem(m); 2468 continue; 2469 } 2470 2471 /* 2472 * No errors. Receive the packet. 2473 */ 2474 m->m_pkthdr.rcvif = ifp; 2475 m->m_pkthdr.len = len; 2476 2477 #if 0 /* XXXJRT */ 2478 /* 2479 * If VLANs are enabled, VLAN packets have been unwrapped 2480 * for us. Associate the tag with the packet. 2481 */ 2482 if ((status & WRX_ST_VP) != 0) { 2483 VLAN_INPUT_TAG(ifp, m, 2484 le16toh(sc->sc_rxdescs[i].wrx_special, 2485 continue); 2486 } 2487 #endif /* XXXJRT */ 2488 2489 /* 2490 * Set up checksum info for this packet. 2491 */ 2492 if ((status & WRX_ST_IXSM) == 0) { 2493 if (status & WRX_ST_IPCS) { 2494 WM_EVCNT_INCR(&sc->sc_ev_rxipsum); 2495 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 2496 if (errors & WRX_ER_IPE) 2497 m->m_pkthdr.csum_flags |= 2498 M_CSUM_IPv4_BAD; 2499 } 2500 if (status & WRX_ST_TCPCS) { 2501 /* 2502 * Note: we don't know if this was TCP or UDP, 2503 * so we just set both bits, and expect the 2504 * upper layers to deal. 2505 */ 2506 WM_EVCNT_INCR(&sc->sc_ev_rxtusum); 2507 m->m_pkthdr.csum_flags |= 2508 M_CSUM_TCPv4|M_CSUM_UDPv4; 2509 if (errors & WRX_ER_TCPE) 2510 m->m_pkthdr.csum_flags |= 2511 M_CSUM_TCP_UDP_BAD; 2512 } 2513 } 2514 2515 ifp->if_ipackets++; 2516 2517 #if NBPFILTER > 0 2518 /* Pass this up to any BPF listeners. */ 2519 if (ifp->if_bpf) 2520 bpf_mtap(ifp->if_bpf, m); 2521 #endif /* NBPFILTER > 0 */ 2522 2523 /* Pass it on. */ 2524 (*ifp->if_input)(ifp, m); 2525 } 2526 2527 /* Update the receive pointer. */ 2528 sc->sc_rxptr = i; 2529 2530 DPRINTF(WM_DEBUG_RX, 2531 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i)); 2532 } 2533 2534 /* 2535 * wm_linkintr: 2536 * 2537 * Helper; handle link interrupts. 2538 */ 2539 static void 2540 wm_linkintr(struct wm_softc *sc, uint32_t icr) 2541 { 2542 uint32_t status; 2543 2544 /* 2545 * If we get a link status interrupt on a 1000BASE-T 2546 * device, just fall into the normal MII tick path. 2547 */ 2548 if (sc->sc_flags & WM_F_HAS_MII) { 2549 if (icr & ICR_LSC) { 2550 DPRINTF(WM_DEBUG_LINK, 2551 ("%s: LINK: LSC -> mii_tick\n", 2552 sc->sc_dev.dv_xname)); 2553 mii_tick(&sc->sc_mii); 2554 } else if (icr & ICR_RXSEQ) { 2555 DPRINTF(WM_DEBUG_LINK, 2556 ("%s: LINK Receive sequence error\n", 2557 sc->sc_dev.dv_xname)); 2558 } 2559 return; 2560 } 2561 2562 /* 2563 * If we are now receiving /C/, check for link again in 2564 * a couple of link clock ticks. 2565 */ 2566 if (icr & ICR_RXCFG) { 2567 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n", 2568 sc->sc_dev.dv_xname)); 2569 sc->sc_tbi_anstate = 2; 2570 } 2571 2572 if (icr & ICR_LSC) { 2573 status = CSR_READ(sc, WMREG_STATUS); 2574 if (status & STATUS_LU) { 2575 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n", 2576 sc->sc_dev.dv_xname, 2577 (status & STATUS_FD) ? "FDX" : "HDX")); 2578 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 2579 sc->sc_fcrtl &= ~FCRTL_XONE; 2580 if (status & STATUS_FD) 2581 sc->sc_tctl |= 2582 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 2583 else 2584 sc->sc_tctl |= 2585 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 2586 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE) 2587 sc->sc_fcrtl |= FCRTL_XONE; 2588 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 2589 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 2590 WMREG_OLD_FCRTL : WMREG_FCRTL, 2591 sc->sc_fcrtl); 2592 sc->sc_tbi_linkup = 1; 2593 } else { 2594 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", 2595 sc->sc_dev.dv_xname)); 2596 sc->sc_tbi_linkup = 0; 2597 } 2598 sc->sc_tbi_anstate = 2; 2599 wm_tbi_set_linkled(sc); 2600 } else if (icr & ICR_RXSEQ) { 2601 DPRINTF(WM_DEBUG_LINK, 2602 ("%s: LINK: Receive sequence error\n", 2603 sc->sc_dev.dv_xname)); 2604 } 2605 } 2606 2607 /* 2608 * wm_tick: 2609 * 2610 * One second timer, used to check link status, sweep up 2611 * completed transmit jobs, etc. 2612 */ 2613 static void 2614 wm_tick(void *arg) 2615 { 2616 struct wm_softc *sc = arg; 2617 int s; 2618 2619 s = splnet(); 2620 2621 if (sc->sc_type >= WM_T_82542_2_1) { 2622 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC)); 2623 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC)); 2624 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC)); 2625 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC)); 2626 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC)); 2627 } 2628 2629 if (sc->sc_flags & WM_F_HAS_MII) 2630 mii_tick(&sc->sc_mii); 2631 else 2632 wm_tbi_check_link(sc); 2633 2634 splx(s); 2635 2636 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); 2637 } 2638 2639 /* 2640 * wm_reset: 2641 * 2642 * Reset the i82542 chip. 2643 */ 2644 static void 2645 wm_reset(struct wm_softc *sc) 2646 { 2647 int i; 2648 2649 /* 2650 * Allocate on-chip memory according to the MTU size. 2651 * The Packet Buffer Allocation register must be written 2652 * before the chip is reset. 2653 */ 2654 switch (sc->sc_type) { 2655 case WM_T_82547: 2656 case WM_T_82547_2: 2657 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? 2658 PBA_22K : PBA_30K; 2659 sc->sc_txfifo_head = 0; 2660 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT; 2661 sc->sc_txfifo_size = 2662 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT; 2663 sc->sc_txfifo_stall = 0; 2664 break; 2665 case WM_T_82571: 2666 case WM_T_82572: 2667 sc->sc_pba = PBA_32K; 2668 break; 2669 case WM_T_82573: 2670 sc->sc_pba = PBA_12K; 2671 break; 2672 default: 2673 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? 2674 PBA_40K : PBA_48K; 2675 break; 2676 } 2677 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba); 2678 2679 switch (sc->sc_type) { 2680 case WM_T_82544: 2681 case WM_T_82540: 2682 case WM_T_82545: 2683 case WM_T_82546: 2684 case WM_T_82541: 2685 case WM_T_82541_2: 2686 /* 2687 * On some chipsets, a reset through a memory-mapped write 2688 * cycle can cause the chip to reset before completing the 2689 * write cycle. This causes major headache that can be 2690 * avoided by issuing the reset via indirect register writes 2691 * through I/O space. 2692 * 2693 * So, if we successfully mapped the I/O BAR at attach time, 2694 * use that. Otherwise, try our luck with a memory-mapped 2695 * reset. 2696 */ 2697 if (sc->sc_flags & WM_F_IOH_VALID) 2698 wm_io_write(sc, WMREG_CTRL, CTRL_RST); 2699 else 2700 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST); 2701 break; 2702 2703 case WM_T_82545_3: 2704 case WM_T_82546_3: 2705 /* Use the shadow control register on these chips. */ 2706 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST); 2707 break; 2708 2709 default: 2710 /* Everything else can safely use the documented method. */ 2711 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST); 2712 break; 2713 } 2714 delay(10000); 2715 2716 for (i = 0; i < 1000; i++) { 2717 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) 2718 return; 2719 delay(20); 2720 } 2721 2722 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST) 2723 log(LOG_ERR, "%s: reset failed to complete\n", 2724 sc->sc_dev.dv_xname); 2725 } 2726 2727 /* 2728 * wm_init: [ifnet interface function] 2729 * 2730 * Initialize the interface. Must be called at splnet(). 2731 */ 2732 static int 2733 wm_init(struct ifnet *ifp) 2734 { 2735 struct wm_softc *sc = ifp->if_softc; 2736 struct wm_rxsoft *rxs; 2737 int i, error = 0; 2738 uint32_t reg; 2739 2740 /* 2741 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set. 2742 * There is a small but measurable benefit to avoiding the adjusment 2743 * of the descriptor so that the headers are aligned, for normal mtu, 2744 * on such platforms. One possibility is that the DMA itself is 2745 * slightly more efficient if the front of the entire packet (instead 2746 * of the front of the headers) is aligned. 2747 * 2748 * Note we must always set align_tweak to 0 if we are using 2749 * jumbo frames. 2750 */ 2751 #ifdef __NO_STRICT_ALIGNMENT 2752 sc->sc_align_tweak = 0; 2753 #else 2754 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2)) 2755 sc->sc_align_tweak = 0; 2756 else 2757 sc->sc_align_tweak = 2; 2758 #endif /* __NO_STRICT_ALIGNMENT */ 2759 2760 /* Cancel any pending I/O. */ 2761 wm_stop(ifp, 0); 2762 2763 /* Reset the chip to a known state. */ 2764 wm_reset(sc); 2765 2766 /* Initialize the transmit descriptor ring. */ 2767 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc)); 2768 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc), 2769 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2770 sc->sc_txfree = WM_NTXDESC(sc); 2771 sc->sc_txnext = 0; 2772 2773 if (sc->sc_type < WM_T_82543) { 2774 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0)); 2775 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0)); 2776 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc)); 2777 CSR_WRITE(sc, WMREG_OLD_TDH, 0); 2778 CSR_WRITE(sc, WMREG_OLD_TDT, 0); 2779 CSR_WRITE(sc, WMREG_OLD_TIDV, 128); 2780 } else { 2781 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0)); 2782 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0)); 2783 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc)); 2784 CSR_WRITE(sc, WMREG_TDH, 0); 2785 CSR_WRITE(sc, WMREG_TDT, 0); 2786 CSR_WRITE(sc, WMREG_TIDV, 64); 2787 CSR_WRITE(sc, WMREG_TADV, 128); 2788 2789 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) | 2790 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0)); 2791 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) | 2792 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1)); 2793 } 2794 CSR_WRITE(sc, WMREG_TQSA_LO, 0); 2795 CSR_WRITE(sc, WMREG_TQSA_HI, 0); 2796 2797 /* Initialize the transmit job descriptors. */ 2798 for (i = 0; i < WM_TXQUEUELEN(sc); i++) 2799 sc->sc_txsoft[i].txs_mbuf = NULL; 2800 sc->sc_txsfree = WM_TXQUEUELEN(sc); 2801 sc->sc_txsnext = 0; 2802 sc->sc_txsdirty = 0; 2803 2804 /* 2805 * Initialize the receive descriptor and receive job 2806 * descriptor rings. 2807 */ 2808 if (sc->sc_type < WM_T_82543) { 2809 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0)); 2810 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0)); 2811 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs)); 2812 CSR_WRITE(sc, WMREG_OLD_RDH0, 0); 2813 CSR_WRITE(sc, WMREG_OLD_RDT0, 0); 2814 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD); 2815 2816 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0); 2817 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0); 2818 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0); 2819 CSR_WRITE(sc, WMREG_OLD_RDH1, 0); 2820 CSR_WRITE(sc, WMREG_OLD_RDT1, 0); 2821 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0); 2822 } else { 2823 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0)); 2824 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0)); 2825 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs)); 2826 CSR_WRITE(sc, WMREG_RDH, 0); 2827 CSR_WRITE(sc, WMREG_RDT, 0); 2828 CSR_WRITE(sc, WMREG_RDTR, 0 | RDTR_FPD); 2829 CSR_WRITE(sc, WMREG_RADV, 128); 2830 } 2831 for (i = 0; i < WM_NRXDESC; i++) { 2832 rxs = &sc->sc_rxsoft[i]; 2833 if (rxs->rxs_mbuf == NULL) { 2834 if ((error = wm_add_rxbuf(sc, i)) != 0) { 2835 log(LOG_ERR, "%s: unable to allocate or map rx " 2836 "buffer %d, error = %d\n", 2837 sc->sc_dev.dv_xname, i, error); 2838 /* 2839 * XXX Should attempt to run with fewer receive 2840 * XXX buffers instead of just failing. 2841 */ 2842 wm_rxdrain(sc); 2843 goto out; 2844 } 2845 } else 2846 WM_INIT_RXDESC(sc, i); 2847 } 2848 sc->sc_rxptr = 0; 2849 sc->sc_rxdiscard = 0; 2850 WM_RXCHAIN_RESET(sc); 2851 2852 /* 2853 * Clear out the VLAN table -- we don't use it (yet). 2854 */ 2855 CSR_WRITE(sc, WMREG_VET, 0); 2856 for (i = 0; i < WM_VLAN_TABSIZE; i++) 2857 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0); 2858 2859 /* 2860 * Set up flow-control parameters. 2861 * 2862 * XXX Values could probably stand some tuning. 2863 */ 2864 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST); 2865 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST); 2866 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL); 2867 2868 sc->sc_fcrtl = FCRTL_DFLT; 2869 if (sc->sc_type < WM_T_82543) { 2870 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT); 2871 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl); 2872 } else { 2873 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT); 2874 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl); 2875 } 2876 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT); 2877 2878 #if 0 /* XXXJRT */ 2879 /* Deal with VLAN enables. */ 2880 if (VLAN_ATTACHED(&sc->sc_ethercom)) 2881 sc->sc_ctrl |= CTRL_VME; 2882 else 2883 #endif /* XXXJRT */ 2884 sc->sc_ctrl &= ~CTRL_VME; 2885 2886 /* Write the control registers. */ 2887 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 2888 #if 0 2889 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 2890 #endif 2891 2892 /* 2893 * Set up checksum offload parameters. 2894 */ 2895 reg = CSR_READ(sc, WMREG_RXCSUM); 2896 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) 2897 reg |= RXCSUM_IPOFL; 2898 else 2899 reg &= ~RXCSUM_IPOFL; 2900 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) 2901 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL; 2902 else { 2903 reg &= ~RXCSUM_TUOFL; 2904 if ((ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) == 0) 2905 reg &= ~RXCSUM_IPOFL; 2906 } 2907 CSR_WRITE(sc, WMREG_RXCSUM, reg); 2908 2909 /* 2910 * Set up the interrupt registers. 2911 */ 2912 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 2913 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 | 2914 ICR_RXO | ICR_RXT0; 2915 if ((sc->sc_flags & WM_F_HAS_MII) == 0) 2916 sc->sc_icr |= ICR_RXCFG; 2917 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr); 2918 2919 /* Set up the inter-packet gap. */ 2920 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); 2921 2922 if (sc->sc_type >= WM_T_82543) { 2923 /* Set up the interrupt throttling register (units of 256ns) */ 2924 sc->sc_itr = 1000000000 / (7000 * 256); 2925 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr); 2926 } 2927 2928 #if 0 /* XXXJRT */ 2929 /* Set the VLAN ethernetype. */ 2930 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN); 2931 #endif 2932 2933 /* 2934 * Set up the transmit control register; we start out with 2935 * a collision distance suitable for FDX, but update it whe 2936 * we resolve the media type. 2937 */ 2938 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) | 2939 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 2940 if (sc->sc_type >= WM_T_82571) 2941 sc->sc_tctl |= TCTL_MULR; 2942 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 2943 2944 /* Set the media. */ 2945 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp); 2946 2947 /* 2948 * Set up the receive control register; we actually program 2949 * the register when we set the receive filter. Use multicast 2950 * address offset type 0. 2951 * 2952 * Only the i82544 has the ability to strip the incoming 2953 * CRC, so we don't enable that feature. 2954 */ 2955 sc->sc_mchash_type = 0; 2956 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF 2957 | RCTL_MO(sc->sc_mchash_type); 2958 2959 /* 82573 doesn't support jumbo frame */ 2960 if (sc->sc_type != WM_T_82573) 2961 sc->sc_rctl |= RCTL_LPE; 2962 2963 if (MCLBYTES == 2048) { 2964 sc->sc_rctl |= RCTL_2k; 2965 } else { 2966 if (sc->sc_type >= WM_T_82543) { 2967 switch(MCLBYTES) { 2968 case 4096: 2969 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k; 2970 break; 2971 case 8192: 2972 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k; 2973 break; 2974 case 16384: 2975 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k; 2976 break; 2977 default: 2978 panic("wm_init: MCLBYTES %d unsupported", 2979 MCLBYTES); 2980 break; 2981 } 2982 } else panic("wm_init: i82542 requires MCLBYTES = 2048"); 2983 } 2984 2985 /* Set the receive filter. */ 2986 wm_set_filter(sc); 2987 2988 /* Start the one second link check clock. */ 2989 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); 2990 2991 /* ...all done! */ 2992 ifp->if_flags |= IFF_RUNNING; 2993 ifp->if_flags &= ~IFF_OACTIVE; 2994 2995 out: 2996 if (error) 2997 log(LOG_ERR, "%s: interface not running\n", 2998 sc->sc_dev.dv_xname); 2999 return (error); 3000 } 3001 3002 /* 3003 * wm_rxdrain: 3004 * 3005 * Drain the receive queue. 3006 */ 3007 static void 3008 wm_rxdrain(struct wm_softc *sc) 3009 { 3010 struct wm_rxsoft *rxs; 3011 int i; 3012 3013 for (i = 0; i < WM_NRXDESC; i++) { 3014 rxs = &sc->sc_rxsoft[i]; 3015 if (rxs->rxs_mbuf != NULL) { 3016 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 3017 m_freem(rxs->rxs_mbuf); 3018 rxs->rxs_mbuf = NULL; 3019 } 3020 } 3021 } 3022 3023 /* 3024 * wm_stop: [ifnet interface function] 3025 * 3026 * Stop transmission on the interface. 3027 */ 3028 static void 3029 wm_stop(struct ifnet *ifp, int disable) 3030 { 3031 struct wm_softc *sc = ifp->if_softc; 3032 struct wm_txsoft *txs; 3033 int i; 3034 3035 /* Stop the one second clock. */ 3036 callout_stop(&sc->sc_tick_ch); 3037 3038 /* Stop the 82547 Tx FIFO stall check timer. */ 3039 if (sc->sc_type == WM_T_82547) 3040 callout_stop(&sc->sc_txfifo_ch); 3041 3042 if (sc->sc_flags & WM_F_HAS_MII) { 3043 /* Down the MII. */ 3044 mii_down(&sc->sc_mii); 3045 } 3046 3047 /* Stop the transmit and receive processes. */ 3048 CSR_WRITE(sc, WMREG_TCTL, 0); 3049 CSR_WRITE(sc, WMREG_RCTL, 0); 3050 3051 /* 3052 * Clear the interrupt mask to ensure the device cannot assert its 3053 * interrupt line. 3054 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service 3055 * any currently pending or shared interrupt. 3056 */ 3057 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 3058 sc->sc_icr = 0; 3059 3060 /* Release any queued transmit buffers. */ 3061 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 3062 txs = &sc->sc_txsoft[i]; 3063 if (txs->txs_mbuf != NULL) { 3064 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 3065 m_freem(txs->txs_mbuf); 3066 txs->txs_mbuf = NULL; 3067 } 3068 } 3069 3070 if (disable) 3071 wm_rxdrain(sc); 3072 3073 /* Mark the interface as down and cancel the watchdog timer. */ 3074 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3075 ifp->if_timer = 0; 3076 } 3077 3078 /* 3079 * wm_acquire_eeprom: 3080 * 3081 * Perform the EEPROM handshake required on some chips. 3082 */ 3083 static int 3084 wm_acquire_eeprom(struct wm_softc *sc) 3085 { 3086 uint32_t reg; 3087 int x; 3088 3089 /* always success */ 3090 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0) 3091 return 0; 3092 3093 if (wm_get_eeprom_semaphore(sc)) 3094 return 1; 3095 3096 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) { 3097 reg = CSR_READ(sc, WMREG_EECD); 3098 3099 /* Request EEPROM access. */ 3100 reg |= EECD_EE_REQ; 3101 CSR_WRITE(sc, WMREG_EECD, reg); 3102 3103 /* ..and wait for it to be granted. */ 3104 for (x = 0; x < 1000; x++) { 3105 reg = CSR_READ(sc, WMREG_EECD); 3106 if (reg & EECD_EE_GNT) 3107 break; 3108 delay(5); 3109 } 3110 if ((reg & EECD_EE_GNT) == 0) { 3111 aprint_error("%s: could not acquire EEPROM GNT\n", 3112 sc->sc_dev.dv_xname); 3113 reg &= ~EECD_EE_REQ; 3114 CSR_WRITE(sc, WMREG_EECD, reg); 3115 wm_put_eeprom_semaphore(sc); 3116 return (1); 3117 } 3118 } 3119 3120 return (0); 3121 } 3122 3123 /* 3124 * wm_release_eeprom: 3125 * 3126 * Release the EEPROM mutex. 3127 */ 3128 static void 3129 wm_release_eeprom(struct wm_softc *sc) 3130 { 3131 uint32_t reg; 3132 3133 /* always success */ 3134 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0) 3135 return; 3136 3137 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) { 3138 reg = CSR_READ(sc, WMREG_EECD); 3139 reg &= ~EECD_EE_REQ; 3140 CSR_WRITE(sc, WMREG_EECD, reg); 3141 } 3142 3143 wm_put_eeprom_semaphore(sc); 3144 } 3145 3146 /* 3147 * wm_eeprom_sendbits: 3148 * 3149 * Send a series of bits to the EEPROM. 3150 */ 3151 static void 3152 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits) 3153 { 3154 uint32_t reg; 3155 int x; 3156 3157 reg = CSR_READ(sc, WMREG_EECD); 3158 3159 for (x = nbits; x > 0; x--) { 3160 if (bits & (1U << (x - 1))) 3161 reg |= EECD_DI; 3162 else 3163 reg &= ~EECD_DI; 3164 CSR_WRITE(sc, WMREG_EECD, reg); 3165 delay(2); 3166 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK); 3167 delay(2); 3168 CSR_WRITE(sc, WMREG_EECD, reg); 3169 delay(2); 3170 } 3171 } 3172 3173 /* 3174 * wm_eeprom_recvbits: 3175 * 3176 * Receive a series of bits from the EEPROM. 3177 */ 3178 static void 3179 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits) 3180 { 3181 uint32_t reg, val; 3182 int x; 3183 3184 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI; 3185 3186 val = 0; 3187 for (x = nbits; x > 0; x--) { 3188 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK); 3189 delay(2); 3190 if (CSR_READ(sc, WMREG_EECD) & EECD_DO) 3191 val |= (1U << (x - 1)); 3192 CSR_WRITE(sc, WMREG_EECD, reg); 3193 delay(2); 3194 } 3195 *valp = val; 3196 } 3197 3198 /* 3199 * wm_read_eeprom_uwire: 3200 * 3201 * Read a word from the EEPROM using the MicroWire protocol. 3202 */ 3203 static int 3204 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 3205 { 3206 uint32_t reg, val; 3207 int i; 3208 3209 for (i = 0; i < wordcnt; i++) { 3210 /* Clear SK and DI. */ 3211 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI); 3212 CSR_WRITE(sc, WMREG_EECD, reg); 3213 3214 /* Set CHIP SELECT. */ 3215 reg |= EECD_CS; 3216 CSR_WRITE(sc, WMREG_EECD, reg); 3217 delay(2); 3218 3219 /* Shift in the READ command. */ 3220 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3); 3221 3222 /* Shift in address. */ 3223 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits); 3224 3225 /* Shift out the data. */ 3226 wm_eeprom_recvbits(sc, &val, 16); 3227 data[i] = val & 0xffff; 3228 3229 /* Clear CHIP SELECT. */ 3230 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS; 3231 CSR_WRITE(sc, WMREG_EECD, reg); 3232 delay(2); 3233 } 3234 3235 return (0); 3236 } 3237 3238 /* 3239 * wm_spi_eeprom_ready: 3240 * 3241 * Wait for a SPI EEPROM to be ready for commands. 3242 */ 3243 static int 3244 wm_spi_eeprom_ready(struct wm_softc *sc) 3245 { 3246 uint32_t val; 3247 int usec; 3248 3249 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) { 3250 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8); 3251 wm_eeprom_recvbits(sc, &val, 8); 3252 if ((val & SPI_SR_RDY) == 0) 3253 break; 3254 } 3255 if (usec >= SPI_MAX_RETRIES) { 3256 aprint_error("%s: EEPROM failed to become ready\n", 3257 sc->sc_dev.dv_xname); 3258 return (1); 3259 } 3260 return (0); 3261 } 3262 3263 /* 3264 * wm_read_eeprom_spi: 3265 * 3266 * Read a work from the EEPROM using the SPI protocol. 3267 */ 3268 static int 3269 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 3270 { 3271 uint32_t reg, val; 3272 int i; 3273 uint8_t opc; 3274 3275 /* Clear SK and CS. */ 3276 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS); 3277 CSR_WRITE(sc, WMREG_EECD, reg); 3278 delay(2); 3279 3280 if (wm_spi_eeprom_ready(sc)) 3281 return (1); 3282 3283 /* Toggle CS to flush commands. */ 3284 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS); 3285 delay(2); 3286 CSR_WRITE(sc, WMREG_EECD, reg); 3287 delay(2); 3288 3289 opc = SPI_OPC_READ; 3290 if (sc->sc_ee_addrbits == 8 && word >= 128) 3291 opc |= SPI_OPC_A8; 3292 3293 wm_eeprom_sendbits(sc, opc, 8); 3294 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits); 3295 3296 for (i = 0; i < wordcnt; i++) { 3297 wm_eeprom_recvbits(sc, &val, 16); 3298 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8); 3299 } 3300 3301 /* Raise CS and clear SK. */ 3302 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS; 3303 CSR_WRITE(sc, WMREG_EECD, reg); 3304 delay(2); 3305 3306 return (0); 3307 } 3308 3309 #define EEPROM_CHECKSUM 0xBABA 3310 #define EEPROM_SIZE 0x0040 3311 3312 /* 3313 * wm_validate_eeprom_checksum 3314 * 3315 * The checksum is defined as the sum of the first 64 (16 bit) words. 3316 */ 3317 static int 3318 wm_validate_eeprom_checksum(struct wm_softc *sc) 3319 { 3320 uint16_t checksum; 3321 uint16_t eeprom_data; 3322 int i; 3323 3324 checksum = 0; 3325 3326 for (i = 0; i < EEPROM_SIZE; i++) { 3327 if (wm_read_eeprom(sc, i, 1, &eeprom_data)) 3328 return 1; 3329 checksum += eeprom_data; 3330 } 3331 3332 if (checksum != (uint16_t) EEPROM_CHECKSUM) 3333 return 1; 3334 3335 return 0; 3336 } 3337 3338 /* 3339 * wm_read_eeprom: 3340 * 3341 * Read data from the serial EEPROM. 3342 */ 3343 static int 3344 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 3345 { 3346 int rv; 3347 3348 if (sc->sc_flags & WM_F_EEPROM_INVALID) 3349 return 1; 3350 3351 if (wm_acquire_eeprom(sc)) 3352 return 1; 3353 3354 if (sc->sc_flags & WM_F_EEPROM_EERDEEWR) 3355 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data); 3356 else if (sc->sc_flags & WM_F_EEPROM_SPI) 3357 rv = wm_read_eeprom_spi(sc, word, wordcnt, data); 3358 else 3359 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data); 3360 3361 wm_release_eeprom(sc); 3362 return rv; 3363 } 3364 3365 static int 3366 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt, 3367 uint16_t *data) 3368 { 3369 int i, eerd = 0; 3370 int error = 0; 3371 3372 for (i = 0; i < wordcnt; i++) { 3373 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START; 3374 3375 CSR_WRITE(sc, WMREG_EERD, eerd); 3376 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD); 3377 if (error != 0) 3378 break; 3379 3380 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT); 3381 } 3382 3383 return error; 3384 } 3385 3386 static int 3387 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw) 3388 { 3389 uint32_t attempts = 100000; 3390 uint32_t i, reg = 0; 3391 int32_t done = -1; 3392 3393 for (i = 0; i < attempts; i++) { 3394 reg = CSR_READ(sc, rw); 3395 3396 if (reg & EERD_DONE) { 3397 done = 0; 3398 break; 3399 } 3400 delay(5); 3401 } 3402 3403 return done; 3404 } 3405 3406 /* 3407 * wm_add_rxbuf: 3408 * 3409 * Add a receive buffer to the indiciated descriptor. 3410 */ 3411 static int 3412 wm_add_rxbuf(struct wm_softc *sc, int idx) 3413 { 3414 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx]; 3415 struct mbuf *m; 3416 int error; 3417 3418 MGETHDR(m, M_DONTWAIT, MT_DATA); 3419 if (m == NULL) 3420 return (ENOBUFS); 3421 3422 MCLGET(m, M_DONTWAIT); 3423 if ((m->m_flags & M_EXT) == 0) { 3424 m_freem(m); 3425 return (ENOBUFS); 3426 } 3427 3428 if (rxs->rxs_mbuf != NULL) 3429 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 3430 3431 rxs->rxs_mbuf = m; 3432 3433 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 3434 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m, 3435 BUS_DMA_READ|BUS_DMA_NOWAIT); 3436 if (error) { 3437 /* XXX XXX XXX */ 3438 printf("%s: unable to load rx DMA map %d, error = %d\n", 3439 sc->sc_dev.dv_xname, idx, error); 3440 panic("wm_add_rxbuf"); 3441 } 3442 3443 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 3444 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 3445 3446 WM_INIT_RXDESC(sc, idx); 3447 3448 return (0); 3449 } 3450 3451 /* 3452 * wm_set_ral: 3453 * 3454 * Set an entery in the receive address list. 3455 */ 3456 static void 3457 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx) 3458 { 3459 uint32_t ral_lo, ral_hi; 3460 3461 if (enaddr != NULL) { 3462 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) | 3463 (enaddr[3] << 24); 3464 ral_hi = enaddr[4] | (enaddr[5] << 8); 3465 ral_hi |= RAL_AV; 3466 } else { 3467 ral_lo = 0; 3468 ral_hi = 0; 3469 } 3470 3471 if (sc->sc_type >= WM_T_82544) { 3472 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx), 3473 ral_lo); 3474 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx), 3475 ral_hi); 3476 } else { 3477 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo); 3478 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi); 3479 } 3480 } 3481 3482 /* 3483 * wm_mchash: 3484 * 3485 * Compute the hash of the multicast address for the 4096-bit 3486 * multicast filter. 3487 */ 3488 static uint32_t 3489 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr) 3490 { 3491 static const int lo_shift[4] = { 4, 3, 2, 0 }; 3492 static const int hi_shift[4] = { 4, 5, 6, 8 }; 3493 uint32_t hash; 3494 3495 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) | 3496 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]); 3497 3498 return (hash & 0xfff); 3499 } 3500 3501 /* 3502 * wm_set_filter: 3503 * 3504 * Set up the receive filter. 3505 */ 3506 static void 3507 wm_set_filter(struct wm_softc *sc) 3508 { 3509 struct ethercom *ec = &sc->sc_ethercom; 3510 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3511 struct ether_multi *enm; 3512 struct ether_multistep step; 3513 bus_addr_t mta_reg; 3514 uint32_t hash, reg, bit; 3515 int i; 3516 3517 if (sc->sc_type >= WM_T_82544) 3518 mta_reg = WMREG_CORDOVA_MTA; 3519 else 3520 mta_reg = WMREG_MTA; 3521 3522 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE); 3523 3524 if (ifp->if_flags & IFF_BROADCAST) 3525 sc->sc_rctl |= RCTL_BAM; 3526 if (ifp->if_flags & IFF_PROMISC) { 3527 sc->sc_rctl |= RCTL_UPE; 3528 goto allmulti; 3529 } 3530 3531 /* 3532 * Set the station address in the first RAL slot, and 3533 * clear the remaining slots. 3534 */ 3535 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0); 3536 for (i = 1; i < WM_RAL_TABSIZE; i++) 3537 wm_set_ral(sc, NULL, i); 3538 3539 /* Clear out the multicast table. */ 3540 for (i = 0; i < WM_MC_TABSIZE; i++) 3541 CSR_WRITE(sc, mta_reg + (i << 2), 0); 3542 3543 ETHER_FIRST_MULTI(step, ec, enm); 3544 while (enm != NULL) { 3545 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 3546 /* 3547 * We must listen to a range of multicast addresses. 3548 * For now, just accept all multicasts, rather than 3549 * trying to set only those filter bits needed to match 3550 * the range. (At this time, the only use of address 3551 * ranges is for IP multicast routing, for which the 3552 * range is big enough to require all bits set.) 3553 */ 3554 goto allmulti; 3555 } 3556 3557 hash = wm_mchash(sc, enm->enm_addrlo); 3558 3559 reg = (hash >> 5) & 0x7f; 3560 bit = hash & 0x1f; 3561 3562 hash = CSR_READ(sc, mta_reg + (reg << 2)); 3563 hash |= 1U << bit; 3564 3565 /* XXX Hardware bug?? */ 3566 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) { 3567 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2)); 3568 CSR_WRITE(sc, mta_reg + (reg << 2), hash); 3569 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit); 3570 } else 3571 CSR_WRITE(sc, mta_reg + (reg << 2), hash); 3572 3573 ETHER_NEXT_MULTI(step, enm); 3574 } 3575 3576 ifp->if_flags &= ~IFF_ALLMULTI; 3577 goto setit; 3578 3579 allmulti: 3580 ifp->if_flags |= IFF_ALLMULTI; 3581 sc->sc_rctl |= RCTL_MPE; 3582 3583 setit: 3584 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl); 3585 } 3586 3587 /* 3588 * wm_tbi_mediainit: 3589 * 3590 * Initialize media for use on 1000BASE-X devices. 3591 */ 3592 static void 3593 wm_tbi_mediainit(struct wm_softc *sc) 3594 { 3595 const char *sep = ""; 3596 3597 if (sc->sc_type < WM_T_82543) 3598 sc->sc_tipg = TIPG_WM_DFLT; 3599 else 3600 sc->sc_tipg = TIPG_LG_DFLT; 3601 3602 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange, 3603 wm_tbi_mediastatus); 3604 3605 /* 3606 * SWD Pins: 3607 * 3608 * 0 = Link LED (output) 3609 * 1 = Loss Of Signal (input) 3610 */ 3611 sc->sc_ctrl |= CTRL_SWDPIO(0); 3612 sc->sc_ctrl &= ~CTRL_SWDPIO(1); 3613 3614 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3615 3616 #define ADD(ss, mm, dd) \ 3617 do { \ 3618 aprint_normal("%s%s", sep, ss); \ 3619 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \ 3620 sep = ", "; \ 3621 } while (/*CONSTCOND*/0) 3622 3623 aprint_normal("%s: ", sc->sc_dev.dv_xname); 3624 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD); 3625 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD); 3626 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD); 3627 aprint_normal("\n"); 3628 3629 #undef ADD 3630 3631 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 3632 } 3633 3634 /* 3635 * wm_tbi_mediastatus: [ifmedia interface function] 3636 * 3637 * Get the current interface media status on a 1000BASE-X device. 3638 */ 3639 static void 3640 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 3641 { 3642 struct wm_softc *sc = ifp->if_softc; 3643 uint32_t ctrl; 3644 3645 ifmr->ifm_status = IFM_AVALID; 3646 ifmr->ifm_active = IFM_ETHER; 3647 3648 if (sc->sc_tbi_linkup == 0) { 3649 ifmr->ifm_active |= IFM_NONE; 3650 return; 3651 } 3652 3653 ifmr->ifm_status |= IFM_ACTIVE; 3654 ifmr->ifm_active |= IFM_1000_SX; 3655 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD) 3656 ifmr->ifm_active |= IFM_FDX; 3657 ctrl = CSR_READ(sc, WMREG_CTRL); 3658 if (ctrl & CTRL_RFCE) 3659 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE; 3660 if (ctrl & CTRL_TFCE) 3661 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE; 3662 } 3663 3664 /* 3665 * wm_tbi_mediachange: [ifmedia interface function] 3666 * 3667 * Set hardware to newly-selected media on a 1000BASE-X device. 3668 */ 3669 static int 3670 wm_tbi_mediachange(struct ifnet *ifp) 3671 { 3672 struct wm_softc *sc = ifp->if_softc; 3673 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 3674 uint32_t status; 3675 int i; 3676 3677 sc->sc_txcw = ife->ifm_data; 3678 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO || 3679 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0) 3680 sc->sc_txcw |= ANAR_X_PAUSE_SYM | ANAR_X_PAUSE_ASYM; 3681 sc->sc_txcw |= TXCW_ANE; 3682 3683 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); 3684 delay(10000); 3685 3686 /* NOTE: CTRL will update TFCE and RFCE automatically. */ 3687 3688 sc->sc_tbi_anstate = 0; 3689 3690 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) { 3691 /* Have signal; wait for the link to come up. */ 3692 for (i = 0; i < 50; i++) { 3693 delay(10000); 3694 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU) 3695 break; 3696 } 3697 3698 status = CSR_READ(sc, WMREG_STATUS); 3699 if (status & STATUS_LU) { 3700 /* Link is up. */ 3701 DPRINTF(WM_DEBUG_LINK, 3702 ("%s: LINK: set media -> link up %s\n", 3703 sc->sc_dev.dv_xname, 3704 (status & STATUS_FD) ? "FDX" : "HDX")); 3705 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 3706 sc->sc_fcrtl &= ~FCRTL_XONE; 3707 if (status & STATUS_FD) 3708 sc->sc_tctl |= 3709 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 3710 else 3711 sc->sc_tctl |= 3712 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 3713 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE) 3714 sc->sc_fcrtl |= FCRTL_XONE; 3715 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 3716 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 3717 WMREG_OLD_FCRTL : WMREG_FCRTL, 3718 sc->sc_fcrtl); 3719 sc->sc_tbi_linkup = 1; 3720 } else { 3721 /* Link is down. */ 3722 DPRINTF(WM_DEBUG_LINK, 3723 ("%s: LINK: set media -> link down\n", 3724 sc->sc_dev.dv_xname)); 3725 sc->sc_tbi_linkup = 0; 3726 } 3727 } else { 3728 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n", 3729 sc->sc_dev.dv_xname)); 3730 sc->sc_tbi_linkup = 0; 3731 } 3732 3733 wm_tbi_set_linkled(sc); 3734 3735 return (0); 3736 } 3737 3738 /* 3739 * wm_tbi_set_linkled: 3740 * 3741 * Update the link LED on 1000BASE-X devices. 3742 */ 3743 static void 3744 wm_tbi_set_linkled(struct wm_softc *sc) 3745 { 3746 3747 if (sc->sc_tbi_linkup) 3748 sc->sc_ctrl |= CTRL_SWDPIN(0); 3749 else 3750 sc->sc_ctrl &= ~CTRL_SWDPIN(0); 3751 3752 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3753 } 3754 3755 /* 3756 * wm_tbi_check_link: 3757 * 3758 * Check the link on 1000BASE-X devices. 3759 */ 3760 static void 3761 wm_tbi_check_link(struct wm_softc *sc) 3762 { 3763 uint32_t rxcw, ctrl, status; 3764 3765 if (sc->sc_tbi_anstate == 0) 3766 return; 3767 else if (sc->sc_tbi_anstate > 1) { 3768 DPRINTF(WM_DEBUG_LINK, 3769 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname, 3770 sc->sc_tbi_anstate)); 3771 sc->sc_tbi_anstate--; 3772 return; 3773 } 3774 3775 sc->sc_tbi_anstate = 0; 3776 3777 rxcw = CSR_READ(sc, WMREG_RXCW); 3778 ctrl = CSR_READ(sc, WMREG_CTRL); 3779 status = CSR_READ(sc, WMREG_STATUS); 3780 3781 if ((status & STATUS_LU) == 0) { 3782 DPRINTF(WM_DEBUG_LINK, 3783 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname)); 3784 sc->sc_tbi_linkup = 0; 3785 } else { 3786 DPRINTF(WM_DEBUG_LINK, 3787 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname, 3788 (status & STATUS_FD) ? "FDX" : "HDX")); 3789 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 3790 sc->sc_fcrtl &= ~FCRTL_XONE; 3791 if (status & STATUS_FD) 3792 sc->sc_tctl |= 3793 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 3794 else 3795 sc->sc_tctl |= 3796 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 3797 if (ctrl & CTRL_TFCE) 3798 sc->sc_fcrtl |= FCRTL_XONE; 3799 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 3800 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 3801 WMREG_OLD_FCRTL : WMREG_FCRTL, 3802 sc->sc_fcrtl); 3803 sc->sc_tbi_linkup = 1; 3804 } 3805 3806 wm_tbi_set_linkled(sc); 3807 } 3808 3809 /* 3810 * wm_gmii_reset: 3811 * 3812 * Reset the PHY. 3813 */ 3814 static void 3815 wm_gmii_reset(struct wm_softc *sc) 3816 { 3817 uint32_t reg; 3818 3819 if (sc->sc_type >= WM_T_82544) { 3820 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 3821 delay(20000); 3822 3823 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3824 delay(20000); 3825 } else { 3826 /* The PHY reset pin is active-low. */ 3827 reg = CSR_READ(sc, WMREG_CTRL_EXT); 3828 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) | 3829 CTRL_EXT_SWDPIN(4)); 3830 reg |= CTRL_EXT_SWDPIO(4); 3831 3832 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4)); 3833 delay(10); 3834 3835 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 3836 delay(10); 3837 3838 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4)); 3839 delay(10); 3840 #if 0 3841 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4); 3842 #endif 3843 } 3844 } 3845 3846 /* 3847 * wm_gmii_mediainit: 3848 * 3849 * Initialize media for use on 1000BASE-T devices. 3850 */ 3851 static void 3852 wm_gmii_mediainit(struct wm_softc *sc) 3853 { 3854 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3855 3856 /* We have MII. */ 3857 sc->sc_flags |= WM_F_HAS_MII; 3858 3859 sc->sc_tipg = TIPG_1000T_DFLT; 3860 3861 /* 3862 * Let the chip set speed/duplex on its own based on 3863 * signals from the PHY. 3864 */ 3865 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE; 3866 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3867 3868 /* Initialize our media structures and probe the GMII. */ 3869 sc->sc_mii.mii_ifp = ifp; 3870 3871 if (sc->sc_type >= WM_T_82544) { 3872 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg; 3873 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg; 3874 } else { 3875 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg; 3876 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg; 3877 } 3878 sc->sc_mii.mii_statchg = wm_gmii_statchg; 3879 3880 wm_gmii_reset(sc); 3881 3882 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange, 3883 wm_gmii_mediastatus); 3884 3885 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 3886 MII_OFFSET_ANY, MIIF_DOPAUSE); 3887 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 3888 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 3889 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 3890 } else 3891 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 3892 } 3893 3894 /* 3895 * wm_gmii_mediastatus: [ifmedia interface function] 3896 * 3897 * Get the current interface media status on a 1000BASE-T device. 3898 */ 3899 static void 3900 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 3901 { 3902 struct wm_softc *sc = ifp->if_softc; 3903 3904 mii_pollstat(&sc->sc_mii); 3905 ifmr->ifm_status = sc->sc_mii.mii_media_status; 3906 ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) | 3907 sc->sc_flowflags; 3908 } 3909 3910 /* 3911 * wm_gmii_mediachange: [ifmedia interface function] 3912 * 3913 * Set hardware to newly-selected media on a 1000BASE-T device. 3914 */ 3915 static int 3916 wm_gmii_mediachange(struct ifnet *ifp) 3917 { 3918 struct wm_softc *sc = ifp->if_softc; 3919 3920 if (ifp->if_flags & IFF_UP) 3921 mii_mediachg(&sc->sc_mii); 3922 return (0); 3923 } 3924 3925 #define MDI_IO CTRL_SWDPIN(2) 3926 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */ 3927 #define MDI_CLK CTRL_SWDPIN(3) 3928 3929 static void 3930 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits) 3931 { 3932 uint32_t i, v; 3933 3934 v = CSR_READ(sc, WMREG_CTRL); 3935 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 3936 v |= MDI_DIR | CTRL_SWDPIO(3); 3937 3938 for (i = 1 << (nbits - 1); i != 0; i >>= 1) { 3939 if (data & i) 3940 v |= MDI_IO; 3941 else 3942 v &= ~MDI_IO; 3943 CSR_WRITE(sc, WMREG_CTRL, v); 3944 delay(10); 3945 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 3946 delay(10); 3947 CSR_WRITE(sc, WMREG_CTRL, v); 3948 delay(10); 3949 } 3950 } 3951 3952 static uint32_t 3953 i82543_mii_recvbits(struct wm_softc *sc) 3954 { 3955 uint32_t v, i, data = 0; 3956 3957 v = CSR_READ(sc, WMREG_CTRL); 3958 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 3959 v |= CTRL_SWDPIO(3); 3960 3961 CSR_WRITE(sc, WMREG_CTRL, v); 3962 delay(10); 3963 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 3964 delay(10); 3965 CSR_WRITE(sc, WMREG_CTRL, v); 3966 delay(10); 3967 3968 for (i = 0; i < 16; i++) { 3969 data <<= 1; 3970 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 3971 delay(10); 3972 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO) 3973 data |= 1; 3974 CSR_WRITE(sc, WMREG_CTRL, v); 3975 delay(10); 3976 } 3977 3978 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 3979 delay(10); 3980 CSR_WRITE(sc, WMREG_CTRL, v); 3981 delay(10); 3982 3983 return (data); 3984 } 3985 3986 #undef MDI_IO 3987 #undef MDI_DIR 3988 #undef MDI_CLK 3989 3990 /* 3991 * wm_gmii_i82543_readreg: [mii interface function] 3992 * 3993 * Read a PHY register on the GMII (i82543 version). 3994 */ 3995 static int 3996 wm_gmii_i82543_readreg(struct device *self, int phy, int reg) 3997 { 3998 struct wm_softc *sc = (void *) self; 3999 int rv; 4000 4001 i82543_mii_sendbits(sc, 0xffffffffU, 32); 4002 i82543_mii_sendbits(sc, reg | (phy << 5) | 4003 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14); 4004 rv = i82543_mii_recvbits(sc) & 0xffff; 4005 4006 DPRINTF(WM_DEBUG_GMII, 4007 ("%s: GMII: read phy %d reg %d -> 0x%04x\n", 4008 sc->sc_dev.dv_xname, phy, reg, rv)); 4009 4010 return (rv); 4011 } 4012 4013 /* 4014 * wm_gmii_i82543_writereg: [mii interface function] 4015 * 4016 * Write a PHY register on the GMII (i82543 version). 4017 */ 4018 static void 4019 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val) 4020 { 4021 struct wm_softc *sc = (void *) self; 4022 4023 i82543_mii_sendbits(sc, 0xffffffffU, 32); 4024 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) | 4025 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) | 4026 (MII_COMMAND_START << 30), 32); 4027 } 4028 4029 /* 4030 * wm_gmii_i82544_readreg: [mii interface function] 4031 * 4032 * Read a PHY register on the GMII. 4033 */ 4034 static int 4035 wm_gmii_i82544_readreg(struct device *self, int phy, int reg) 4036 { 4037 struct wm_softc *sc = (void *) self; 4038 uint32_t mdic = 0; 4039 int i, rv; 4040 4041 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) | 4042 MDIC_REGADD(reg)); 4043 4044 for (i = 0; i < 100; i++) { 4045 mdic = CSR_READ(sc, WMREG_MDIC); 4046 if (mdic & MDIC_READY) 4047 break; 4048 delay(10); 4049 } 4050 4051 if ((mdic & MDIC_READY) == 0) { 4052 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n", 4053 sc->sc_dev.dv_xname, phy, reg); 4054 rv = 0; 4055 } else if (mdic & MDIC_E) { 4056 #if 0 /* This is normal if no PHY is present. */ 4057 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n", 4058 sc->sc_dev.dv_xname, phy, reg); 4059 #endif 4060 rv = 0; 4061 } else { 4062 rv = MDIC_DATA(mdic); 4063 if (rv == 0xffff) 4064 rv = 0; 4065 } 4066 4067 return (rv); 4068 } 4069 4070 /* 4071 * wm_gmii_i82544_writereg: [mii interface function] 4072 * 4073 * Write a PHY register on the GMII. 4074 */ 4075 static void 4076 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val) 4077 { 4078 struct wm_softc *sc = (void *) self; 4079 uint32_t mdic = 0; 4080 int i; 4081 4082 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) | 4083 MDIC_REGADD(reg) | MDIC_DATA(val)); 4084 4085 for (i = 0; i < 100; i++) { 4086 mdic = CSR_READ(sc, WMREG_MDIC); 4087 if (mdic & MDIC_READY) 4088 break; 4089 delay(10); 4090 } 4091 4092 if ((mdic & MDIC_READY) == 0) 4093 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n", 4094 sc->sc_dev.dv_xname, phy, reg); 4095 else if (mdic & MDIC_E) 4096 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n", 4097 sc->sc_dev.dv_xname, phy, reg); 4098 } 4099 4100 /* 4101 * wm_gmii_statchg: [mii interface function] 4102 * 4103 * Callback from MII layer when media changes. 4104 */ 4105 static void 4106 wm_gmii_statchg(struct device *self) 4107 { 4108 struct wm_softc *sc = (void *) self; 4109 struct mii_data *mii = &sc->sc_mii; 4110 4111 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE); 4112 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 4113 sc->sc_fcrtl &= ~FCRTL_XONE; 4114 4115 /* 4116 * Get flow control negotiation result. 4117 */ 4118 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 4119 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { 4120 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 4121 mii->mii_media_active &= ~IFM_ETH_FMASK; 4122 } 4123 4124 if (sc->sc_flowflags & IFM_FLOW) { 4125 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) { 4126 sc->sc_ctrl |= CTRL_TFCE; 4127 sc->sc_fcrtl |= FCRTL_XONE; 4128 } 4129 if (sc->sc_flowflags & IFM_ETH_RXPAUSE) 4130 sc->sc_ctrl |= CTRL_RFCE; 4131 } 4132 4133 if (sc->sc_mii.mii_media_active & IFM_FDX) { 4134 DPRINTF(WM_DEBUG_LINK, 4135 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname)); 4136 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 4137 } else { 4138 DPRINTF(WM_DEBUG_LINK, 4139 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname)); 4140 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 4141 } 4142 4143 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4144 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 4145 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL 4146 : WMREG_FCRTL, sc->sc_fcrtl); 4147 } 4148 4149 static int 4150 wm_is_onboard_nvm_eeprom(struct wm_softc *sc) 4151 { 4152 uint32_t eecd = 0; 4153 4154 if (sc->sc_type == WM_T_82573) { 4155 eecd = CSR_READ(sc, WMREG_EECD); 4156 4157 /* Isolate bits 15 & 16 */ 4158 eecd = ((eecd >> 15) & 0x03); 4159 4160 /* If both bits are set, device is Flash type */ 4161 if (eecd == 0x03) { 4162 return 0; 4163 } 4164 } 4165 return 1; 4166 } 4167 4168 static int 4169 wm_get_eeprom_semaphore(struct wm_softc *sc) 4170 { 4171 int32_t timeout; 4172 uint32_t swsm; 4173 4174 if ((sc->sc_flags & WM_F_EEPROM_SEMAPHORE) == 0) 4175 return 0; 4176 4177 /* Get the FW semaphore. */ 4178 timeout = 1000 + 1; /* XXX */ 4179 while (timeout) { 4180 swsm = CSR_READ(sc, WMREG_SWSM); 4181 swsm |= SWSM_SWESMBI; 4182 CSR_WRITE(sc, WMREG_SWSM, swsm); 4183 /* if we managed to set the bit we got the semaphore. */ 4184 swsm = CSR_READ(sc, WMREG_SWSM); 4185 if (swsm & SWSM_SWESMBI) 4186 break; 4187 4188 delay(50); 4189 timeout--; 4190 } 4191 4192 if (timeout == 0) { 4193 /* Release semaphores */ 4194 wm_put_eeprom_semaphore(sc); 4195 return 1; 4196 } 4197 4198 return 0; 4199 } 4200 4201 static void 4202 wm_put_eeprom_semaphore(struct wm_softc *sc) 4203 { 4204 uint32_t swsm; 4205 4206 if ((sc->sc_flags & WM_F_EEPROM_SEMAPHORE) == 0) 4207 return; 4208 4209 swsm = CSR_READ(sc, WMREG_SWSM); 4210 swsm &= ~(SWSM_SWESMBI); 4211 CSR_WRITE(sc, WMREG_SWSM, swsm); 4212 } 4213