1 /* $NetBSD: if_wm.c,v 1.131 2006/11/23 19:42:59 yamt Exp $ */ 2 3 /* 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips. 40 * 41 * TODO (in order of importance): 42 * 43 * - Rework how parameters are loaded from the EEPROM. 44 * - Figure out what to do with the i82545GM and i82546GB 45 * SERDES controllers. 46 * - Fix hw VLAN assist. 47 */ 48 49 #include <sys/cdefs.h> 50 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.131 2006/11/23 19:42:59 yamt Exp $"); 51 52 #include "bpfilter.h" 53 #include "rnd.h" 54 55 #include <sys/param.h> 56 #include <sys/systm.h> 57 #include <sys/callout.h> 58 #include <sys/mbuf.h> 59 #include <sys/malloc.h> 60 #include <sys/kernel.h> 61 #include <sys/socket.h> 62 #include <sys/ioctl.h> 63 #include <sys/errno.h> 64 #include <sys/device.h> 65 #include <sys/queue.h> 66 #include <sys/syslog.h> 67 68 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */ 69 70 #if NRND > 0 71 #include <sys/rnd.h> 72 #endif 73 74 #include <net/if.h> 75 #include <net/if_dl.h> 76 #include <net/if_media.h> 77 #include <net/if_ether.h> 78 79 #if NBPFILTER > 0 80 #include <net/bpf.h> 81 #endif 82 83 #include <netinet/in.h> /* XXX for struct ip */ 84 #include <netinet/in_systm.h> /* XXX for struct ip */ 85 #include <netinet/ip.h> /* XXX for struct ip */ 86 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */ 87 #include <netinet/tcp.h> /* XXX for struct tcphdr */ 88 89 #include <machine/bus.h> 90 #include <machine/intr.h> 91 #include <machine/endian.h> 92 93 #include <dev/mii/mii.h> 94 #include <dev/mii/miivar.h> 95 #include <dev/mii/mii_bitbang.h> 96 #include <dev/mii/ikphyreg.h> 97 98 #include <dev/pci/pcireg.h> 99 #include <dev/pci/pcivar.h> 100 #include <dev/pci/pcidevs.h> 101 102 #include <dev/pci/if_wmreg.h> 103 104 #ifdef WM_DEBUG 105 #define WM_DEBUG_LINK 0x01 106 #define WM_DEBUG_TX 0x02 107 #define WM_DEBUG_RX 0x04 108 #define WM_DEBUG_GMII 0x08 109 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII; 110 111 #define DPRINTF(x, y) if (wm_debug & (x)) printf y 112 #else 113 #define DPRINTF(x, y) /* nothing */ 114 #endif /* WM_DEBUG */ 115 116 /* 117 * Transmit descriptor list size. Due to errata, we can only have 118 * 256 hardware descriptors in the ring on < 82544, but we use 4096 119 * on >= 82544. We tell the upper layers that they can queue a lot 120 * of packets, and we go ahead and manage up to 64 (16 for the i82547) 121 * of them at a time. 122 * 123 * We allow up to 256 (!) DMA segments per packet. Pathological packet 124 * chains containing many small mbufs have been observed in zero-copy 125 * situations with jumbo frames. 126 */ 127 #define WM_NTXSEGS 256 128 #define WM_IFQUEUELEN 256 129 #define WM_TXQUEUELEN_MAX 64 130 #define WM_TXQUEUELEN_MAX_82547 16 131 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum) 132 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1) 133 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8) 134 #define WM_NTXDESC_82542 256 135 #define WM_NTXDESC_82544 4096 136 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc) 137 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1) 138 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t)) 139 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc)) 140 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc)) 141 142 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */ 143 144 /* 145 * Receive descriptor list size. We have one Rx buffer for normal 146 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized 147 * packet. We allocate 256 receive descriptors, each with a 2k 148 * buffer (MCLBYTES), which gives us room for 50 jumbo packets. 149 */ 150 #define WM_NRXDESC 256 151 #define WM_NRXDESC_MASK (WM_NRXDESC - 1) 152 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) 153 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) 154 155 /* 156 * Control structures are DMA'd to the i82542 chip. We allocate them in 157 * a single clump that maps to a single DMA segment to make several things 158 * easier. 159 */ 160 struct wm_control_data_82544 { 161 /* 162 * The receive descriptors. 163 */ 164 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; 165 166 /* 167 * The transmit descriptors. Put these at the end, because 168 * we might use a smaller number of them. 169 */ 170 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544]; 171 }; 172 173 struct wm_control_data_82542 { 174 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; 175 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542]; 176 }; 177 178 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x) 179 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)]) 180 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)]) 181 182 /* 183 * Software state for transmit jobs. 184 */ 185 struct wm_txsoft { 186 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 187 bus_dmamap_t txs_dmamap; /* our DMA map */ 188 int txs_firstdesc; /* first descriptor in packet */ 189 int txs_lastdesc; /* last descriptor in packet */ 190 int txs_ndesc; /* # of descriptors used */ 191 }; 192 193 /* 194 * Software state for receive buffers. Each descriptor gets a 195 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill 196 * more than one buffer, we chain them together. 197 */ 198 struct wm_rxsoft { 199 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 200 bus_dmamap_t rxs_dmamap; /* our DMA map */ 201 }; 202 203 typedef enum { 204 WM_T_unknown = 0, 205 WM_T_82542_2_0, /* i82542 2.0 (really old) */ 206 WM_T_82542_2_1, /* i82542 2.1+ (old) */ 207 WM_T_82543, /* i82543 */ 208 WM_T_82544, /* i82544 */ 209 WM_T_82540, /* i82540 */ 210 WM_T_82545, /* i82545 */ 211 WM_T_82545_3, /* i82545 3.0+ */ 212 WM_T_82546, /* i82546 */ 213 WM_T_82546_3, /* i82546 3.0+ */ 214 WM_T_82541, /* i82541 */ 215 WM_T_82541_2, /* i82541 2.0+ */ 216 WM_T_82547, /* i82547 */ 217 WM_T_82547_2, /* i82547 2.0+ */ 218 WM_T_82571, /* i82571 */ 219 WM_T_82572, /* i82572 */ 220 WM_T_82573, /* i82573 */ 221 WM_T_80003, /* i80003 */ 222 } wm_chip_type; 223 224 /* 225 * Software state per device. 226 */ 227 struct wm_softc { 228 struct device sc_dev; /* generic device information */ 229 bus_space_tag_t sc_st; /* bus space tag */ 230 bus_space_handle_t sc_sh; /* bus space handle */ 231 bus_space_tag_t sc_iot; /* I/O space tag */ 232 bus_space_handle_t sc_ioh; /* I/O space handle */ 233 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 234 struct ethercom sc_ethercom; /* ethernet common data */ 235 void *sc_sdhook; /* shutdown hook */ 236 void *sc_powerhook; /* power hook */ 237 pci_chipset_tag_t sc_pc; 238 pcitag_t sc_pcitag; 239 struct pci_conf_state sc_pciconf; 240 241 wm_chip_type sc_type; /* chip type */ 242 int sc_flags; /* flags; see below */ 243 int sc_bus_speed; /* PCI/PCIX bus speed */ 244 int sc_pcix_offset; /* PCIX capability register offset */ 245 int sc_flowflags; /* 802.3x flow control flags */ 246 247 void *sc_ih; /* interrupt cookie */ 248 249 int sc_ee_addrbits; /* EEPROM address bits */ 250 251 struct mii_data sc_mii; /* MII/media information */ 252 253 struct callout sc_tick_ch; /* tick callout */ 254 255 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 256 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 257 258 int sc_align_tweak; 259 260 /* 261 * Software state for the transmit and receive descriptors. 262 */ 263 int sc_txnum; /* must be a power of two */ 264 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX]; 265 struct wm_rxsoft sc_rxsoft[WM_NRXDESC]; 266 267 /* 268 * Control data structures. 269 */ 270 int sc_ntxdesc; /* must be a power of two */ 271 struct wm_control_data_82544 *sc_control_data; 272 #define sc_txdescs sc_control_data->wcd_txdescs 273 #define sc_rxdescs sc_control_data->wcd_rxdescs 274 275 #ifdef WM_EVENT_COUNTERS 276 /* Event counters. */ 277 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */ 278 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */ 279 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */ 280 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */ 281 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */ 282 struct evcnt sc_ev_rxintr; /* Rx interrupts */ 283 struct evcnt sc_ev_linkintr; /* Link interrupts */ 284 285 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */ 286 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */ 287 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */ 288 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */ 289 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */ 290 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */ 291 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */ 292 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */ 293 294 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ 295 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */ 296 297 struct evcnt sc_ev_tu; /* Tx underrun */ 298 299 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ 300 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ 301 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ 302 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ 303 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ 304 #endif /* WM_EVENT_COUNTERS */ 305 306 bus_addr_t sc_tdt_reg; /* offset of TDT register */ 307 308 int sc_txfree; /* number of free Tx descriptors */ 309 int sc_txnext; /* next ready Tx descriptor */ 310 311 int sc_txsfree; /* number of free Tx jobs */ 312 int sc_txsnext; /* next free Tx job */ 313 int sc_txsdirty; /* dirty Tx jobs */ 314 315 /* These 5 variables are used only on the 82547. */ 316 int sc_txfifo_size; /* Tx FIFO size */ 317 int sc_txfifo_head; /* current head of FIFO */ 318 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */ 319 int sc_txfifo_stall; /* Tx FIFO is stalled */ 320 struct callout sc_txfifo_ch; /* Tx FIFO stall work-around timer */ 321 322 bus_addr_t sc_rdt_reg; /* offset of RDT register */ 323 324 int sc_rxptr; /* next ready Rx descriptor/queue ent */ 325 int sc_rxdiscard; 326 int sc_rxlen; 327 struct mbuf *sc_rxhead; 328 struct mbuf *sc_rxtail; 329 struct mbuf **sc_rxtailp; 330 331 uint32_t sc_ctrl; /* prototype CTRL register */ 332 #if 0 333 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ 334 #endif 335 uint32_t sc_icr; /* prototype interrupt bits */ 336 uint32_t sc_itr; /* prototype intr throttling reg */ 337 uint32_t sc_tctl; /* prototype TCTL register */ 338 uint32_t sc_rctl; /* prototype RCTL register */ 339 uint32_t sc_txcw; /* prototype TXCW register */ 340 uint32_t sc_tipg; /* prototype TIPG register */ 341 uint32_t sc_fcrtl; /* prototype FCRTL register */ 342 uint32_t sc_pba; /* prototype PBA register */ 343 344 int sc_tbi_linkup; /* TBI link status */ 345 int sc_tbi_anstate; /* autonegotiation state */ 346 347 int sc_mchash_type; /* multicast filter offset */ 348 349 #if NRND > 0 350 rndsource_element_t rnd_source; /* random source */ 351 #endif 352 }; 353 354 #define WM_RXCHAIN_RESET(sc) \ 355 do { \ 356 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \ 357 *(sc)->sc_rxtailp = NULL; \ 358 (sc)->sc_rxlen = 0; \ 359 } while (/*CONSTCOND*/0) 360 361 #define WM_RXCHAIN_LINK(sc, m) \ 362 do { \ 363 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \ 364 (sc)->sc_rxtailp = &(m)->m_next; \ 365 } while (/*CONSTCOND*/0) 366 367 /* sc_flags */ 368 #define WM_F_HAS_MII 0x0001 /* has MII */ 369 #define WM_F_EEPROM_HANDSHAKE 0x0002 /* requires EEPROM handshake */ 370 #define WM_F_EEPROM_SEMAPHORE 0x0004 /* EEPROM with semaphore */ 371 #define WM_F_EEPROM_EERDEEWR 0x0008 /* EEPROM access via EERD/EEWR */ 372 #define WM_F_EEPROM_SPI 0x0010 /* EEPROM is SPI */ 373 #define WM_F_EEPROM_FLASH 0x0020 /* EEPROM is FLASH */ 374 #define WM_F_EEPROM_INVALID 0x0040 /* EEPROM not present (bad checksum) */ 375 #define WM_F_IOH_VALID 0x0080 /* I/O handle is valid */ 376 #define WM_F_BUS64 0x0100 /* bus is 64-bit */ 377 #define WM_F_PCIX 0x0200 /* bus is PCI-X */ 378 #define WM_F_CSA 0x0400 /* bus is CSA */ 379 #define WM_F_PCIE 0x0800 /* bus is PCI-Express */ 380 #define WM_F_SWFW_SYNC 0x1000 /* Software-Firmware synchronisation */ 381 382 #ifdef WM_EVENT_COUNTERS 383 #define WM_EVCNT_INCR(ev) (ev)->ev_count++ 384 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val) 385 #else 386 #define WM_EVCNT_INCR(ev) /* nothing */ 387 #define WM_EVCNT_ADD(ev, val) /* nothing */ 388 #endif 389 390 #define CSR_READ(sc, reg) \ 391 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) 392 #define CSR_WRITE(sc, reg, val) \ 393 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) 394 #define CSR_WRITE_FLUSH(sc) \ 395 (void) CSR_READ((sc), WMREG_STATUS) 396 397 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x))) 398 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x))) 399 400 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU) 401 #define WM_CDTXADDR_HI(sc, x) \ 402 (sizeof(bus_addr_t) == 8 ? \ 403 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0) 404 405 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU) 406 #define WM_CDRXADDR_HI(sc, x) \ 407 (sizeof(bus_addr_t) == 8 ? \ 408 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0) 409 410 #define WM_CDTXSYNC(sc, x, n, ops) \ 411 do { \ 412 int __x, __n; \ 413 \ 414 __x = (x); \ 415 __n = (n); \ 416 \ 417 /* If it will wrap around, sync to the end of the ring. */ \ 418 if ((__x + __n) > WM_NTXDESC(sc)) { \ 419 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 420 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \ 421 (WM_NTXDESC(sc) - __x), (ops)); \ 422 __n -= (WM_NTXDESC(sc) - __x); \ 423 __x = 0; \ 424 } \ 425 \ 426 /* Now sync whatever is left. */ \ 427 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 428 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \ 429 } while (/*CONSTCOND*/0) 430 431 #define WM_CDRXSYNC(sc, x, ops) \ 432 do { \ 433 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 434 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \ 435 } while (/*CONSTCOND*/0) 436 437 #define WM_INIT_RXDESC(sc, x) \ 438 do { \ 439 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 440 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \ 441 struct mbuf *__m = __rxs->rxs_mbuf; \ 442 \ 443 /* \ 444 * Note: We scoot the packet forward 2 bytes in the buffer \ 445 * so that the payload after the Ethernet header is aligned \ 446 * to a 4-byte boundary. \ 447 * \ 448 * XXX BRAINDAMAGE ALERT! \ 449 * The stupid chip uses the same size for every buffer, which \ 450 * is set in the Receive Control register. We are using the 2K \ 451 * size option, but what we REALLY want is (2K - 2)! For this \ 452 * reason, we can't "scoot" packets longer than the standard \ 453 * Ethernet MTU. On strict-alignment platforms, if the total \ 454 * size exceeds (2K - 2) we set align_tweak to 0 and let \ 455 * the upper layer copy the headers. \ 456 */ \ 457 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \ 458 \ 459 wm_set_dma_addr(&__rxd->wrx_addr, \ 460 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \ 461 __rxd->wrx_len = 0; \ 462 __rxd->wrx_cksum = 0; \ 463 __rxd->wrx_status = 0; \ 464 __rxd->wrx_errors = 0; \ 465 __rxd->wrx_special = 0; \ 466 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 467 \ 468 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \ 469 } while (/*CONSTCOND*/0) 470 471 static void wm_start(struct ifnet *); 472 static void wm_watchdog(struct ifnet *); 473 static int wm_ioctl(struct ifnet *, u_long, caddr_t); 474 static int wm_init(struct ifnet *); 475 static void wm_stop(struct ifnet *, int); 476 477 static void wm_shutdown(void *); 478 static void wm_powerhook(int, void *); 479 480 static void wm_reset(struct wm_softc *); 481 static void wm_rxdrain(struct wm_softc *); 482 static int wm_add_rxbuf(struct wm_softc *, int); 483 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *); 484 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *); 485 static int wm_validate_eeprom_checksum(struct wm_softc *); 486 static void wm_tick(void *); 487 488 static void wm_set_filter(struct wm_softc *); 489 490 static int wm_intr(void *); 491 static void wm_txintr(struct wm_softc *); 492 static void wm_rxintr(struct wm_softc *); 493 static void wm_linkintr(struct wm_softc *, uint32_t); 494 495 static void wm_tbi_mediainit(struct wm_softc *); 496 static int wm_tbi_mediachange(struct ifnet *); 497 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); 498 499 static void wm_tbi_set_linkled(struct wm_softc *); 500 static void wm_tbi_check_link(struct wm_softc *); 501 502 static void wm_gmii_reset(struct wm_softc *); 503 504 static int wm_gmii_i82543_readreg(struct device *, int, int); 505 static void wm_gmii_i82543_writereg(struct device *, int, int, int); 506 507 static int wm_gmii_i82544_readreg(struct device *, int, int); 508 static void wm_gmii_i82544_writereg(struct device *, int, int, int); 509 510 static int wm_gmii_i80003_readreg(struct device *, int, int); 511 static void wm_gmii_i80003_writereg(struct device *, int, int, int); 512 513 static void wm_gmii_statchg(struct device *); 514 515 static void wm_gmii_mediainit(struct wm_softc *); 516 static int wm_gmii_mediachange(struct ifnet *); 517 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); 518 519 static int wm_kmrn_i80003_readreg(struct wm_softc *, int); 520 static void wm_kmrn_i80003_writereg(struct wm_softc *, int, int); 521 522 static int wm_match(struct device *, struct cfdata *, void *); 523 static void wm_attach(struct device *, struct device *, void *); 524 static int wm_is_onboard_nvm_eeprom(struct wm_softc *); 525 static int wm_get_swsm_semaphore(struct wm_softc *); 526 static void wm_put_swsm_semaphore(struct wm_softc *); 527 static int wm_poll_eerd_eewr_done(struct wm_softc *, int); 528 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t); 529 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t); 530 531 CFATTACH_DECL(wm, sizeof(struct wm_softc), 532 wm_match, wm_attach, NULL, NULL); 533 534 static void wm_82547_txfifo_stall(void *); 535 536 /* 537 * Devices supported by this driver. 538 */ 539 static const struct wm_product { 540 pci_vendor_id_t wmp_vendor; 541 pci_product_id_t wmp_product; 542 const char *wmp_name; 543 wm_chip_type wmp_type; 544 int wmp_flags; 545 #define WMP_F_1000X 0x01 546 #define WMP_F_1000T 0x02 547 } wm_products[] = { 548 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, 549 "Intel i82542 1000BASE-X Ethernet", 550 WM_T_82542_2_1, WMP_F_1000X }, 551 552 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, 553 "Intel i82543GC 1000BASE-X Ethernet", 554 WM_T_82543, WMP_F_1000X }, 555 556 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, 557 "Intel i82543GC 1000BASE-T Ethernet", 558 WM_T_82543, WMP_F_1000T }, 559 560 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, 561 "Intel i82544EI 1000BASE-T Ethernet", 562 WM_T_82544, WMP_F_1000T }, 563 564 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, 565 "Intel i82544EI 1000BASE-X Ethernet", 566 WM_T_82544, WMP_F_1000X }, 567 568 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, 569 "Intel i82544GC 1000BASE-T Ethernet", 570 WM_T_82544, WMP_F_1000T }, 571 572 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, 573 "Intel i82544GC (LOM) 1000BASE-T Ethernet", 574 WM_T_82544, WMP_F_1000T }, 575 576 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, 577 "Intel i82540EM 1000BASE-T Ethernet", 578 WM_T_82540, WMP_F_1000T }, 579 580 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM, 581 "Intel i82540EM (LOM) 1000BASE-T Ethernet", 582 WM_T_82540, WMP_F_1000T }, 583 584 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM, 585 "Intel i82540EP 1000BASE-T Ethernet", 586 WM_T_82540, WMP_F_1000T }, 587 588 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP, 589 "Intel i82540EP 1000BASE-T Ethernet", 590 WM_T_82540, WMP_F_1000T }, 591 592 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP, 593 "Intel i82540EP 1000BASE-T Ethernet", 594 WM_T_82540, WMP_F_1000T }, 595 596 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER, 597 "Intel i82545EM 1000BASE-T Ethernet", 598 WM_T_82545, WMP_F_1000T }, 599 600 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER, 601 "Intel i82545GM 1000BASE-T Ethernet", 602 WM_T_82545_3, WMP_F_1000T }, 603 604 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER, 605 "Intel i82545GM 1000BASE-X Ethernet", 606 WM_T_82545_3, WMP_F_1000X }, 607 #if 0 608 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES, 609 "Intel i82545GM Gigabit Ethernet (SERDES)", 610 WM_T_82545_3, WMP_F_SERDES }, 611 #endif 612 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER, 613 "Intel i82546EB 1000BASE-T Ethernet", 614 WM_T_82546, WMP_F_1000T }, 615 616 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD, 617 "Intel i82546EB 1000BASE-T Ethernet", 618 WM_T_82546, WMP_F_1000T }, 619 620 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER, 621 "Intel i82545EM 1000BASE-X Ethernet", 622 WM_T_82545, WMP_F_1000X }, 623 624 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER, 625 "Intel i82546EB 1000BASE-X Ethernet", 626 WM_T_82546, WMP_F_1000X }, 627 628 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER, 629 "Intel i82546GB 1000BASE-T Ethernet", 630 WM_T_82546_3, WMP_F_1000T }, 631 632 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER, 633 "Intel i82546GB 1000BASE-X Ethernet", 634 WM_T_82546_3, WMP_F_1000X }, 635 #if 0 636 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES, 637 "Intel i82546GB Gigabit Ethernet (SERDES)", 638 WM_T_82546_3, WMP_F_SERDES }, 639 #endif 640 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER, 641 "i82546GB quad-port Gigabit Ethernet", 642 WM_T_82546_3, WMP_F_1000T }, 643 644 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3, 645 "i82546GB quad-port Gigabit Ethernet (KSP3)", 646 WM_T_82546_3, WMP_F_1000T }, 647 648 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE, 649 "Intel PRO/1000MT (82546GB)", 650 WM_T_82546_3, WMP_F_1000T }, 651 652 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI, 653 "Intel i82541EI 1000BASE-T Ethernet", 654 WM_T_82541, WMP_F_1000T }, 655 656 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM, 657 "Intel i82541ER (LOM) 1000BASE-T Ethernet", 658 WM_T_82541, WMP_F_1000T }, 659 660 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE, 661 "Intel i82541EI Mobile 1000BASE-T Ethernet", 662 WM_T_82541, WMP_F_1000T }, 663 664 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER, 665 "Intel i82541ER 1000BASE-T Ethernet", 666 WM_T_82541_2, WMP_F_1000T }, 667 668 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI, 669 "Intel i82541GI 1000BASE-T Ethernet", 670 WM_T_82541_2, WMP_F_1000T }, 671 672 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE, 673 "Intel i82541GI Mobile 1000BASE-T Ethernet", 674 WM_T_82541_2, WMP_F_1000T }, 675 676 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI, 677 "Intel i82541PI 1000BASE-T Ethernet", 678 WM_T_82541_2, WMP_F_1000T }, 679 680 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI, 681 "Intel i82547EI 1000BASE-T Ethernet", 682 WM_T_82547, WMP_F_1000T }, 683 684 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE, 685 "Intel i82547EI Moblie 1000BASE-T Ethernet", 686 WM_T_82547, WMP_F_1000T }, 687 688 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI, 689 "Intel i82547GI 1000BASE-T Ethernet", 690 WM_T_82547_2, WMP_F_1000T }, 691 692 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER, 693 "Intel PRO/1000 PT (82571EB)", 694 WM_T_82571, WMP_F_1000T }, 695 696 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER, 697 "Intel PRO/1000 PF (82571EB)", 698 WM_T_82571, WMP_F_1000X }, 699 #if 0 700 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES, 701 "Intel PRO/1000 PB (82571EB)", 702 WM_T_82571, WMP_F_SERDES }, 703 #endif 704 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER, 705 "Intel PRO/1000 QT (82571EB)", 706 WM_T_82571, WMP_F_1000T }, 707 708 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER, 709 "Intel i82572EI 1000baseT Ethernet", 710 WM_T_82572, WMP_F_1000T }, 711 712 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER, 713 "Intel i82572EI 1000baseX Ethernet", 714 WM_T_82572, WMP_F_1000X }, 715 #if 0 716 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES, 717 "Intel i82572EI Gigabit Ethernet (SERDES)", 718 WM_T_82572, WMP_F_SERDES }, 719 #endif 720 721 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI, 722 "Intel i82572EI 1000baseT Ethernet", 723 WM_T_82572, WMP_F_1000T }, 724 725 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E, 726 "Intel i82573E", 727 WM_T_82573, WMP_F_1000T }, 728 729 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT, 730 "Intel i82573E IAMT", 731 WM_T_82573, WMP_F_1000T }, 732 733 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L, 734 "Intel i82573L Gigabit Ethernet", 735 WM_T_82573, WMP_F_1000T }, 736 737 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT, 738 "i80003 dual 1000baseT Ethernet", 739 WM_T_80003, WMP_F_1000T }, 740 741 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT, 742 "i80003 dual 1000baseX Ethernet", 743 WM_T_80003, WMP_F_1000T }, 744 #if 0 745 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT, 746 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)", 747 WM_T_80003, WMP_F_SERDES }, 748 #endif 749 750 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT, 751 "Intel i80003 1000baseT Ethernet", 752 WM_T_80003, WMP_F_1000T }, 753 #if 0 754 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT, 755 "Intel i80003 Gigabit Ethernet (SERDES)", 756 WM_T_80003, WMP_F_SERDES }, 757 #endif 758 759 { 0, 0, 760 NULL, 761 0, 0 }, 762 }; 763 764 #ifdef WM_EVENT_COUNTERS 765 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")]; 766 #endif /* WM_EVENT_COUNTERS */ 767 768 #if 0 /* Not currently used */ 769 static inline uint32_t 770 wm_io_read(struct wm_softc *sc, int reg) 771 { 772 773 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 774 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4)); 775 } 776 #endif 777 778 static inline void 779 wm_io_write(struct wm_softc *sc, int reg, uint32_t val) 780 { 781 782 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 783 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val); 784 } 785 786 static inline void 787 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v) 788 { 789 wa->wa_low = htole32(v & 0xffffffffU); 790 if (sizeof(bus_addr_t) == 8) 791 wa->wa_high = htole32((uint64_t) v >> 32); 792 else 793 wa->wa_high = 0; 794 } 795 796 static const struct wm_product * 797 wm_lookup(const struct pci_attach_args *pa) 798 { 799 const struct wm_product *wmp; 800 801 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) { 802 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor && 803 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product) 804 return (wmp); 805 } 806 return (NULL); 807 } 808 809 static int 810 wm_match(struct device *parent, struct cfdata *cf, void *aux) 811 { 812 struct pci_attach_args *pa = aux; 813 814 if (wm_lookup(pa) != NULL) 815 return (1); 816 817 return (0); 818 } 819 820 static void 821 wm_attach(struct device *parent, struct device *self, void *aux) 822 { 823 struct wm_softc *sc = (void *) self; 824 struct pci_attach_args *pa = aux; 825 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 826 pci_chipset_tag_t pc = pa->pa_pc; 827 pci_intr_handle_t ih; 828 size_t cdata_size; 829 const char *intrstr = NULL; 830 const char *eetype; 831 bus_space_tag_t memt; 832 bus_space_handle_t memh; 833 bus_dma_segment_t seg; 834 int memh_valid; 835 int i, rseg, error; 836 const struct wm_product *wmp; 837 prop_data_t ea; 838 prop_number_t pn; 839 uint8_t enaddr[ETHER_ADDR_LEN]; 840 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin; 841 pcireg_t preg, memtype; 842 uint32_t reg; 843 844 callout_init(&sc->sc_tick_ch); 845 846 wmp = wm_lookup(pa); 847 if (wmp == NULL) { 848 printf("\n"); 849 panic("wm_attach: impossible"); 850 } 851 852 sc->sc_pc = pa->pa_pc; 853 sc->sc_pcitag = pa->pa_tag; 854 855 if (pci_dma64_available(pa)) 856 sc->sc_dmat = pa->pa_dmat64; 857 else 858 sc->sc_dmat = pa->pa_dmat; 859 860 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG)); 861 aprint_naive(": Ethernet controller\n"); 862 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg); 863 864 sc->sc_type = wmp->wmp_type; 865 if (sc->sc_type < WM_T_82543) { 866 if (preg < 2) { 867 aprint_error("%s: i82542 must be at least rev. 2\n", 868 sc->sc_dev.dv_xname); 869 return; 870 } 871 if (preg < 3) 872 sc->sc_type = WM_T_82542_2_0; 873 } 874 875 /* 876 * Map the device. All devices support memory-mapped acccess, 877 * and it is really required for normal operation. 878 */ 879 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA); 880 switch (memtype) { 881 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 882 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 883 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA, 884 memtype, 0, &memt, &memh, NULL, NULL) == 0); 885 break; 886 default: 887 memh_valid = 0; 888 } 889 890 if (memh_valid) { 891 sc->sc_st = memt; 892 sc->sc_sh = memh; 893 } else { 894 aprint_error("%s: unable to map device registers\n", 895 sc->sc_dev.dv_xname); 896 return; 897 } 898 899 /* 900 * In addition, i82544 and later support I/O mapped indirect 901 * register access. It is not desirable (nor supported in 902 * this driver) to use it for normal operation, though it is 903 * required to work around bugs in some chip versions. 904 */ 905 if (sc->sc_type >= WM_T_82544) { 906 /* First we have to find the I/O BAR. */ 907 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) { 908 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) == 909 PCI_MAPREG_TYPE_IO) 910 break; 911 } 912 if (i == PCI_MAPREG_END) 913 aprint_error("%s: WARNING: unable to find I/O BAR\n", 914 sc->sc_dev.dv_xname); 915 else { 916 /* 917 * The i8254x doesn't apparently respond when the 918 * I/O BAR is 0, which looks somewhat like it's not 919 * been configured. 920 */ 921 preg = pci_conf_read(pc, pa->pa_tag, i); 922 if (PCI_MAPREG_MEM_ADDR(preg) == 0) { 923 aprint_error("%s: WARNING: I/O BAR at zero.\n", 924 sc->sc_dev.dv_xname); 925 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO, 926 0, &sc->sc_iot, &sc->sc_ioh, 927 NULL, NULL) == 0) { 928 sc->sc_flags |= WM_F_IOH_VALID; 929 } else { 930 aprint_error("%s: WARNING: unable to map " 931 "I/O space\n", sc->sc_dev.dv_xname); 932 } 933 } 934 935 } 936 937 /* Enable bus mastering. Disable MWI on the i82542 2.0. */ 938 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 939 preg |= PCI_COMMAND_MASTER_ENABLE; 940 if (sc->sc_type < WM_T_82542_2_1) 941 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE; 942 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg); 943 944 /* power up chip */ 945 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, sc, 946 NULL)) && error != EOPNOTSUPP) { 947 aprint_error("%s: cannot activate %d\n", sc->sc_dev.dv_xname, 948 error); 949 return; 950 } 951 952 /* 953 * Map and establish our interrupt. 954 */ 955 if (pci_intr_map(pa, &ih)) { 956 aprint_error("%s: unable to map interrupt\n", 957 sc->sc_dev.dv_xname); 958 return; 959 } 960 intrstr = pci_intr_string(pc, ih); 961 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc); 962 if (sc->sc_ih == NULL) { 963 aprint_error("%s: unable to establish interrupt", 964 sc->sc_dev.dv_xname); 965 if (intrstr != NULL) 966 aprint_normal(" at %s", intrstr); 967 aprint_normal("\n"); 968 return; 969 } 970 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr); 971 972 /* 973 * Determine a few things about the bus we're connected to. 974 */ 975 if (sc->sc_type < WM_T_82543) { 976 /* We don't really know the bus characteristics here. */ 977 sc->sc_bus_speed = 33; 978 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) { 979 /* 980 * CSA (Communication Streaming Architecture) is about as fast 981 * a 32-bit 66MHz PCI Bus. 982 */ 983 sc->sc_flags |= WM_F_CSA; 984 sc->sc_bus_speed = 66; 985 aprint_verbose("%s: Communication Streaming Architecture\n", 986 sc->sc_dev.dv_xname); 987 if (sc->sc_type == WM_T_82547) { 988 callout_init(&sc->sc_txfifo_ch); 989 callout_setfunc(&sc->sc_txfifo_ch, 990 wm_82547_txfifo_stall, sc); 991 aprint_verbose("%s: using 82547 Tx FIFO stall " 992 "work-around\n", sc->sc_dev.dv_xname); 993 } 994 } else if (sc->sc_type >= WM_T_82571) { 995 sc->sc_flags |= WM_F_PCIE | WM_F_EEPROM_SEMAPHORE; 996 aprint_verbose("%s: PCI-Express bus\n", sc->sc_dev.dv_xname); 997 } else { 998 reg = CSR_READ(sc, WMREG_STATUS); 999 if (reg & STATUS_BUS64) 1000 sc->sc_flags |= WM_F_BUS64; 1001 if (sc->sc_type >= WM_T_82544 && 1002 (reg & STATUS_PCIX_MODE) != 0) { 1003 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb; 1004 1005 sc->sc_flags |= WM_F_PCIX; 1006 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 1007 PCI_CAP_PCIX, 1008 &sc->sc_pcix_offset, NULL) == 0) 1009 aprint_error("%s: unable to find PCIX " 1010 "capability\n", sc->sc_dev.dv_xname); 1011 else if (sc->sc_type != WM_T_82545_3 && 1012 sc->sc_type != WM_T_82546_3) { 1013 /* 1014 * Work around a problem caused by the BIOS 1015 * setting the max memory read byte count 1016 * incorrectly. 1017 */ 1018 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, 1019 sc->sc_pcix_offset + PCI_PCIX_CMD); 1020 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag, 1021 sc->sc_pcix_offset + PCI_PCIX_STATUS); 1022 1023 bytecnt = 1024 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >> 1025 PCI_PCIX_CMD_BYTECNT_SHIFT; 1026 maxb = 1027 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >> 1028 PCI_PCIX_STATUS_MAXB_SHIFT; 1029 if (bytecnt > maxb) { 1030 aprint_verbose("%s: resetting PCI-X " 1031 "MMRBC: %d -> %d\n", 1032 sc->sc_dev.dv_xname, 1033 512 << bytecnt, 512 << maxb); 1034 pcix_cmd = (pcix_cmd & 1035 ~PCI_PCIX_CMD_BYTECNT_MASK) | 1036 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT); 1037 pci_conf_write(pa->pa_pc, pa->pa_tag, 1038 sc->sc_pcix_offset + PCI_PCIX_CMD, 1039 pcix_cmd); 1040 } 1041 } 1042 } 1043 /* 1044 * The quad port adapter is special; it has a PCIX-PCIX 1045 * bridge on the board, and can run the secondary bus at 1046 * a higher speed. 1047 */ 1048 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) { 1049 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120 1050 : 66; 1051 } else if (sc->sc_flags & WM_F_PCIX) { 1052 switch (reg & STATUS_PCIXSPD_MASK) { 1053 case STATUS_PCIXSPD_50_66: 1054 sc->sc_bus_speed = 66; 1055 break; 1056 case STATUS_PCIXSPD_66_100: 1057 sc->sc_bus_speed = 100; 1058 break; 1059 case STATUS_PCIXSPD_100_133: 1060 sc->sc_bus_speed = 133; 1061 break; 1062 default: 1063 aprint_error( 1064 "%s: unknown PCIXSPD %d; assuming 66MHz\n", 1065 sc->sc_dev.dv_xname, 1066 reg & STATUS_PCIXSPD_MASK); 1067 sc->sc_bus_speed = 66; 1068 } 1069 } else 1070 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33; 1071 aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname, 1072 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed, 1073 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI"); 1074 } 1075 1076 /* 1077 * Allocate the control data structures, and create and load the 1078 * DMA map for it. 1079 * 1080 * NOTE: All Tx descriptors must be in the same 4G segment of 1081 * memory. So must Rx descriptors. We simplify by allocating 1082 * both sets within the same 4G segment. 1083 */ 1084 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ? 1085 WM_NTXDESC_82542 : WM_NTXDESC_82544; 1086 cdata_size = sc->sc_type < WM_T_82544 ? 1087 sizeof(struct wm_control_data_82542) : 1088 sizeof(struct wm_control_data_82544); 1089 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE, 1090 (bus_size_t) 0x100000000ULL, 1091 &seg, 1, &rseg, 0)) != 0) { 1092 aprint_error( 1093 "%s: unable to allocate control data, error = %d\n", 1094 sc->sc_dev.dv_xname, error); 1095 goto fail_0; 1096 } 1097 1098 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size, 1099 (caddr_t *)&sc->sc_control_data, 0)) != 0) { 1100 aprint_error("%s: unable to map control data, error = %d\n", 1101 sc->sc_dev.dv_xname, error); 1102 goto fail_1; 1103 } 1104 1105 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size, 1106 0, 0, &sc->sc_cddmamap)) != 0) { 1107 aprint_error("%s: unable to create control data DMA map, " 1108 "error = %d\n", sc->sc_dev.dv_xname, error); 1109 goto fail_2; 1110 } 1111 1112 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 1113 sc->sc_control_data, cdata_size, NULL, 1114 0)) != 0) { 1115 aprint_error( 1116 "%s: unable to load control data DMA map, error = %d\n", 1117 sc->sc_dev.dv_xname, error); 1118 goto fail_3; 1119 } 1120 1121 1122 /* 1123 * Create the transmit buffer DMA maps. 1124 */ 1125 WM_TXQUEUELEN(sc) = 1126 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ? 1127 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX; 1128 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 1129 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA, 1130 WM_NTXSEGS, WTX_MAX_LEN, 0, 0, 1131 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 1132 aprint_error("%s: unable to create Tx DMA map %d, " 1133 "error = %d\n", sc->sc_dev.dv_xname, i, error); 1134 goto fail_4; 1135 } 1136 } 1137 1138 /* 1139 * Create the receive buffer DMA maps. 1140 */ 1141 for (i = 0; i < WM_NRXDESC; i++) { 1142 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1143 MCLBYTES, 0, 0, 1144 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 1145 aprint_error("%s: unable to create Rx DMA map %d, " 1146 "error = %d\n", sc->sc_dev.dv_xname, i, error); 1147 goto fail_5; 1148 } 1149 sc->sc_rxsoft[i].rxs_mbuf = NULL; 1150 } 1151 1152 /* clear interesting stat counters */ 1153 CSR_READ(sc, WMREG_COLC); 1154 CSR_READ(sc, WMREG_RXERRC); 1155 1156 /* 1157 * Reset the chip to a known state. 1158 */ 1159 wm_reset(sc); 1160 1161 /* 1162 * Get some information about the EEPROM. 1163 */ 1164 if (sc->sc_type == WM_T_80003) 1165 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC; 1166 else if (sc->sc_type == WM_T_82573) 1167 sc->sc_flags |= WM_F_EEPROM_EERDEEWR; 1168 else if (sc->sc_type > WM_T_82544) 1169 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE; 1170 1171 if (sc->sc_type <= WM_T_82544) 1172 sc->sc_ee_addrbits = 6; 1173 else if (sc->sc_type <= WM_T_82546_3) { 1174 reg = CSR_READ(sc, WMREG_EECD); 1175 if (reg & EECD_EE_SIZE) 1176 sc->sc_ee_addrbits = 8; 1177 else 1178 sc->sc_ee_addrbits = 6; 1179 } else if (sc->sc_type <= WM_T_82547_2) { 1180 reg = CSR_READ(sc, WMREG_EECD); 1181 if (reg & EECD_EE_TYPE) { 1182 sc->sc_flags |= WM_F_EEPROM_SPI; 1183 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8; 1184 } else 1185 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6; 1186 } else if ((sc->sc_type == WM_T_82573) && 1187 (wm_is_onboard_nvm_eeprom(sc) == 0)) { 1188 sc->sc_flags |= WM_F_EEPROM_FLASH; 1189 } else { 1190 /* Assume everything else is SPI. */ 1191 reg = CSR_READ(sc, WMREG_EECD); 1192 sc->sc_flags |= WM_F_EEPROM_SPI; 1193 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8; 1194 } 1195 1196 /* 1197 * Defer printing the EEPROM type until after verifying the checksum 1198 * This allows the EEPROM type to be printed correctly in the case 1199 * that no EEPROM is attached. 1200 */ 1201 1202 1203 /* 1204 * Validate the EEPROM checksum. If the checksum fails, flag this for 1205 * later, so we can fail future reads from the EEPROM. 1206 */ 1207 if (wm_validate_eeprom_checksum(sc)) 1208 sc->sc_flags |= WM_F_EEPROM_INVALID; 1209 1210 if (sc->sc_flags & WM_F_EEPROM_INVALID) 1211 aprint_verbose("%s: No EEPROM\n", sc->sc_dev.dv_xname); 1212 else if (sc->sc_flags & WM_F_EEPROM_FLASH) { 1213 aprint_verbose("%s: FLASH\n", sc->sc_dev.dv_xname); 1214 } else { 1215 if (sc->sc_flags & WM_F_EEPROM_SPI) 1216 eetype = "SPI"; 1217 else 1218 eetype = "MicroWire"; 1219 aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n", 1220 sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits, 1221 sc->sc_ee_addrbits, eetype); 1222 } 1223 1224 /* 1225 * Read the Ethernet address from the EEPROM, if not first found 1226 * in device properties. 1227 */ 1228 ea = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-addr"); 1229 if (ea != NULL) { 1230 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 1231 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 1232 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN); 1233 } else { 1234 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR, 1235 sizeof(myea) / sizeof(myea[0]), myea)) { 1236 aprint_error("%s: unable to read Ethernet address\n", 1237 sc->sc_dev.dv_xname); 1238 return; 1239 } 1240 enaddr[0] = myea[0] & 0xff; 1241 enaddr[1] = myea[0] >> 8; 1242 enaddr[2] = myea[1] & 0xff; 1243 enaddr[3] = myea[1] >> 8; 1244 enaddr[4] = myea[2] & 0xff; 1245 enaddr[5] = myea[2] >> 8; 1246 } 1247 1248 /* 1249 * Toggle the LSB of the MAC address on the second port 1250 * of the dual port controller. 1251 */ 1252 if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3 1253 || sc->sc_type == WM_T_82571 || sc->sc_type == WM_T_80003) { 1254 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1) 1255 enaddr[5] ^= 1; 1256 } 1257 1258 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname, 1259 ether_sprintf(enaddr)); 1260 1261 /* 1262 * Read the config info from the EEPROM, and set up various 1263 * bits in the control registers based on their contents. 1264 */ 1265 pn = prop_dictionary_get(device_properties(&sc->sc_dev), 1266 "i82543-cfg1"); 1267 if (pn != NULL) { 1268 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1269 cfg1 = (uint16_t) prop_number_integer_value(pn); 1270 } else { 1271 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) { 1272 aprint_error("%s: unable to read CFG1\n", 1273 sc->sc_dev.dv_xname); 1274 return; 1275 } 1276 } 1277 1278 pn = prop_dictionary_get(device_properties(&sc->sc_dev), 1279 "i82543-cfg2"); 1280 if (pn != NULL) { 1281 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1282 cfg2 = (uint16_t) prop_number_integer_value(pn); 1283 } else { 1284 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) { 1285 aprint_error("%s: unable to read CFG2\n", 1286 sc->sc_dev.dv_xname); 1287 return; 1288 } 1289 } 1290 1291 if (sc->sc_type >= WM_T_82544) { 1292 pn = prop_dictionary_get(device_properties(&sc->sc_dev), 1293 "i82543-swdpin"); 1294 if (pn != NULL) { 1295 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1296 swdpin = (uint16_t) prop_number_integer_value(pn); 1297 } else { 1298 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) { 1299 aprint_error("%s: unable to read SWDPIN\n", 1300 sc->sc_dev.dv_xname); 1301 return; 1302 } 1303 } 1304 } 1305 1306 if (cfg1 & EEPROM_CFG1_ILOS) 1307 sc->sc_ctrl |= CTRL_ILOS; 1308 if (sc->sc_type >= WM_T_82544) { 1309 sc->sc_ctrl |= 1310 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) << 1311 CTRL_SWDPIO_SHIFT; 1312 sc->sc_ctrl |= 1313 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) << 1314 CTRL_SWDPINS_SHIFT; 1315 } else { 1316 sc->sc_ctrl |= 1317 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) << 1318 CTRL_SWDPIO_SHIFT; 1319 } 1320 1321 #if 0 1322 if (sc->sc_type >= WM_T_82544) { 1323 if (cfg1 & EEPROM_CFG1_IPS0) 1324 sc->sc_ctrl_ext |= CTRL_EXT_IPS; 1325 if (cfg1 & EEPROM_CFG1_IPS1) 1326 sc->sc_ctrl_ext |= CTRL_EXT_IPS1; 1327 sc->sc_ctrl_ext |= 1328 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) << 1329 CTRL_EXT_SWDPIO_SHIFT; 1330 sc->sc_ctrl_ext |= 1331 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) << 1332 CTRL_EXT_SWDPINS_SHIFT; 1333 } else { 1334 sc->sc_ctrl_ext |= 1335 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) << 1336 CTRL_EXT_SWDPIO_SHIFT; 1337 } 1338 #endif 1339 1340 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 1341 #if 0 1342 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 1343 #endif 1344 1345 /* 1346 * Set up some register offsets that are different between 1347 * the i82542 and the i82543 and later chips. 1348 */ 1349 if (sc->sc_type < WM_T_82543) { 1350 sc->sc_rdt_reg = WMREG_OLD_RDT0; 1351 sc->sc_tdt_reg = WMREG_OLD_TDT; 1352 } else { 1353 sc->sc_rdt_reg = WMREG_RDT; 1354 sc->sc_tdt_reg = WMREG_TDT; 1355 } 1356 1357 /* 1358 * Determine if we're TBI or GMII mode, and initialize the 1359 * media structures accordingly. 1360 */ 1361 if (sc->sc_type < WM_T_82543 || 1362 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) { 1363 if (wmp->wmp_flags & WMP_F_1000T) 1364 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T " 1365 "product!\n", sc->sc_dev.dv_xname); 1366 wm_tbi_mediainit(sc); 1367 } else { 1368 if (wmp->wmp_flags & WMP_F_1000X) 1369 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X " 1370 "product!\n", sc->sc_dev.dv_xname); 1371 wm_gmii_mediainit(sc); 1372 } 1373 1374 ifp = &sc->sc_ethercom.ec_if; 1375 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 1376 ifp->if_softc = sc; 1377 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1378 ifp->if_ioctl = wm_ioctl; 1379 ifp->if_start = wm_start; 1380 ifp->if_watchdog = wm_watchdog; 1381 ifp->if_init = wm_init; 1382 ifp->if_stop = wm_stop; 1383 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN)); 1384 IFQ_SET_READY(&ifp->if_snd); 1385 1386 if (sc->sc_type != WM_T_82573) 1387 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1388 1389 /* 1390 * If we're a i82543 or greater, we can support VLANs. 1391 */ 1392 if (sc->sc_type >= WM_T_82543) 1393 sc->sc_ethercom.ec_capabilities |= 1394 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */; 1395 1396 /* 1397 * We can perform TCPv4 and UDPv4 checkums in-bound. Only 1398 * on i82543 and later. 1399 */ 1400 if (sc->sc_type >= WM_T_82543) { 1401 ifp->if_capabilities |= 1402 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 1403 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 1404 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx | 1405 IFCAP_CSUM_TCPv6_Tx | 1406 IFCAP_CSUM_UDPv6_Tx; 1407 } 1408 1409 /* 1410 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL. 1411 * 1412 * 82541GI (8086:1076) ... no 1413 * 82572EI (8086:10b9) ... yes 1414 */ 1415 if (sc->sc_type >= WM_T_82571) { 1416 ifp->if_capabilities |= 1417 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 1418 } 1419 1420 /* 1421 * If we're a i82544 or greater (except i82547), we can do 1422 * TCP segmentation offload. 1423 */ 1424 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) { 1425 ifp->if_capabilities |= IFCAP_TSOv4; 1426 } 1427 1428 if (sc->sc_type >= WM_T_82571) { 1429 ifp->if_capabilities |= IFCAP_TSOv6; 1430 } 1431 1432 /* 1433 * Attach the interface. 1434 */ 1435 if_attach(ifp); 1436 ether_ifattach(ifp, enaddr); 1437 #if NRND > 0 1438 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname, 1439 RND_TYPE_NET, 0); 1440 #endif 1441 1442 #ifdef WM_EVENT_COUNTERS 1443 /* Attach event counters. */ 1444 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC, 1445 NULL, sc->sc_dev.dv_xname, "txsstall"); 1446 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC, 1447 NULL, sc->sc_dev.dv_xname, "txdstall"); 1448 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC, 1449 NULL, sc->sc_dev.dv_xname, "txfifo_stall"); 1450 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR, 1451 NULL, sc->sc_dev.dv_xname, "txdw"); 1452 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR, 1453 NULL, sc->sc_dev.dv_xname, "txqe"); 1454 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR, 1455 NULL, sc->sc_dev.dv_xname, "rxintr"); 1456 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR, 1457 NULL, sc->sc_dev.dv_xname, "linkintr"); 1458 1459 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC, 1460 NULL, sc->sc_dev.dv_xname, "rxipsum"); 1461 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC, 1462 NULL, sc->sc_dev.dv_xname, "rxtusum"); 1463 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC, 1464 NULL, sc->sc_dev.dv_xname, "txipsum"); 1465 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC, 1466 NULL, sc->sc_dev.dv_xname, "txtusum"); 1467 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC, 1468 NULL, sc->sc_dev.dv_xname, "txtusum6"); 1469 1470 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC, 1471 NULL, sc->sc_dev.dv_xname, "txtso"); 1472 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC, 1473 NULL, sc->sc_dev.dv_xname, "txtso6"); 1474 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC, 1475 NULL, sc->sc_dev.dv_xname, "txtsopain"); 1476 1477 for (i = 0; i < WM_NTXSEGS; i++) { 1478 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i); 1479 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC, 1480 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]); 1481 } 1482 1483 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC, 1484 NULL, sc->sc_dev.dv_xname, "txdrop"); 1485 1486 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC, 1487 NULL, sc->sc_dev.dv_xname, "tu"); 1488 1489 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC, 1490 NULL, sc->sc_dev.dv_xname, "tx_xoff"); 1491 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC, 1492 NULL, sc->sc_dev.dv_xname, "tx_xon"); 1493 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC, 1494 NULL, sc->sc_dev.dv_xname, "rx_xoff"); 1495 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC, 1496 NULL, sc->sc_dev.dv_xname, "rx_xon"); 1497 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC, 1498 NULL, sc->sc_dev.dv_xname, "rx_macctl"); 1499 #endif /* WM_EVENT_COUNTERS */ 1500 1501 /* 1502 * Make sure the interface is shutdown during reboot. 1503 */ 1504 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc); 1505 if (sc->sc_sdhook == NULL) 1506 aprint_error("%s: WARNING: unable to establish shutdown hook\n", 1507 sc->sc_dev.dv_xname); 1508 1509 sc->sc_powerhook = powerhook_establish(sc->sc_dev.dv_xname, 1510 wm_powerhook, sc); 1511 if (sc->sc_powerhook == NULL) 1512 aprint_error("%s: can't establish powerhook\n", 1513 sc->sc_dev.dv_xname); 1514 return; 1515 1516 /* 1517 * Free any resources we've allocated during the failed attach 1518 * attempt. Do this in reverse order and fall through. 1519 */ 1520 fail_5: 1521 for (i = 0; i < WM_NRXDESC; i++) { 1522 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 1523 bus_dmamap_destroy(sc->sc_dmat, 1524 sc->sc_rxsoft[i].rxs_dmamap); 1525 } 1526 fail_4: 1527 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 1528 if (sc->sc_txsoft[i].txs_dmamap != NULL) 1529 bus_dmamap_destroy(sc->sc_dmat, 1530 sc->sc_txsoft[i].txs_dmamap); 1531 } 1532 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 1533 fail_3: 1534 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 1535 fail_2: 1536 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data, 1537 cdata_size); 1538 fail_1: 1539 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1540 fail_0: 1541 return; 1542 } 1543 1544 /* 1545 * wm_shutdown: 1546 * 1547 * Make sure the interface is stopped at reboot time. 1548 */ 1549 static void 1550 wm_shutdown(void *arg) 1551 { 1552 struct wm_softc *sc = arg; 1553 1554 wm_stop(&sc->sc_ethercom.ec_if, 1); 1555 } 1556 1557 static void 1558 wm_powerhook(int why, void *arg) 1559 { 1560 struct wm_softc *sc = arg; 1561 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1562 pci_chipset_tag_t pc = sc->sc_pc; 1563 pcitag_t tag = sc->sc_pcitag; 1564 1565 switch (why) { 1566 case PWR_SOFTSUSPEND: 1567 wm_shutdown(sc); 1568 break; 1569 case PWR_SOFTRESUME: 1570 ifp->if_flags &= ~IFF_RUNNING; 1571 wm_init(ifp); 1572 if (ifp->if_flags & IFF_RUNNING) 1573 wm_start(ifp); 1574 break; 1575 case PWR_SUSPEND: 1576 pci_conf_capture(pc, tag, &sc->sc_pciconf); 1577 break; 1578 case PWR_RESUME: 1579 pci_conf_restore(pc, tag, &sc->sc_pciconf); 1580 break; 1581 } 1582 1583 return; 1584 } 1585 1586 /* 1587 * wm_tx_offload: 1588 * 1589 * Set up TCP/IP checksumming parameters for the 1590 * specified packet. 1591 */ 1592 static int 1593 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp, 1594 uint8_t *fieldsp) 1595 { 1596 struct mbuf *m0 = txs->txs_mbuf; 1597 struct livengood_tcpip_ctxdesc *t; 1598 uint32_t ipcs, tucs, cmd, cmdlen, seg; 1599 uint32_t ipcse; 1600 struct ether_header *eh; 1601 int offset, iphl; 1602 uint8_t fields; 1603 1604 /* 1605 * XXX It would be nice if the mbuf pkthdr had offset 1606 * fields for the protocol headers. 1607 */ 1608 1609 eh = mtod(m0, struct ether_header *); 1610 switch (htons(eh->ether_type)) { 1611 case ETHERTYPE_IP: 1612 case ETHERTYPE_IPV6: 1613 offset = ETHER_HDR_LEN; 1614 break; 1615 1616 case ETHERTYPE_VLAN: 1617 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1618 break; 1619 1620 default: 1621 /* 1622 * Don't support this protocol or encapsulation. 1623 */ 1624 *fieldsp = 0; 1625 *cmdp = 0; 1626 return (0); 1627 } 1628 1629 if ((m0->m_pkthdr.csum_flags & 1630 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) { 1631 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 1632 } else { 1633 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data); 1634 } 1635 ipcse = offset + iphl - 1; 1636 1637 cmd = WTX_CMD_DEXT | WTX_DTYP_D; 1638 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE; 1639 seg = 0; 1640 fields = 0; 1641 1642 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) { 1643 int hlen = offset + iphl; 1644 boolean_t v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 1645 1646 if (__predict_false(m0->m_len < 1647 (hlen + sizeof(struct tcphdr)))) { 1648 /* 1649 * TCP/IP headers are not in the first mbuf; we need 1650 * to do this the slow and painful way. Let's just 1651 * hope this doesn't happen very often. 1652 */ 1653 struct tcphdr th; 1654 1655 WM_EVCNT_INCR(&sc->sc_ev_txtsopain); 1656 1657 m_copydata(m0, hlen, sizeof(th), &th); 1658 if (v4) { 1659 struct ip ip; 1660 1661 m_copydata(m0, offset, sizeof(ip), &ip); 1662 ip.ip_len = 0; 1663 m_copyback(m0, 1664 offset + offsetof(struct ip, ip_len), 1665 sizeof(ip.ip_len), &ip.ip_len); 1666 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 1667 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 1668 } else { 1669 struct ip6_hdr ip6; 1670 1671 m_copydata(m0, offset, sizeof(ip6), &ip6); 1672 ip6.ip6_plen = 0; 1673 m_copyback(m0, 1674 offset + offsetof(struct ip6_hdr, ip6_plen), 1675 sizeof(ip6.ip6_plen), &ip6.ip6_plen); 1676 th.th_sum = in6_cksum_phdr(&ip6.ip6_src, 1677 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP)); 1678 } 1679 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 1680 sizeof(th.th_sum), &th.th_sum); 1681 1682 hlen += th.th_off << 2; 1683 } else { 1684 /* 1685 * TCP/IP headers are in the first mbuf; we can do 1686 * this the easy way. 1687 */ 1688 struct tcphdr *th; 1689 1690 if (v4) { 1691 struct ip *ip = 1692 (void *)(mtod(m0, caddr_t) + offset); 1693 th = (void *)(mtod(m0, caddr_t) + hlen); 1694 1695 ip->ip_len = 0; 1696 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 1697 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 1698 } else { 1699 struct ip6_hdr *ip6 = 1700 (void *)(mtod(m0, char *) + offset); 1701 th = (void *)(mtod(m0, char *) + hlen); 1702 1703 ip6->ip6_plen = 0; 1704 th->th_sum = in6_cksum_phdr(&ip6->ip6_src, 1705 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); 1706 } 1707 hlen += th->th_off << 2; 1708 } 1709 1710 if (v4) { 1711 WM_EVCNT_INCR(&sc->sc_ev_txtso); 1712 cmdlen |= WTX_TCPIP_CMD_IP; 1713 } else { 1714 WM_EVCNT_INCR(&sc->sc_ev_txtso6); 1715 ipcse = 0; 1716 } 1717 cmd |= WTX_TCPIP_CMD_TSE; 1718 cmdlen |= WTX_TCPIP_CMD_TSE | 1719 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen); 1720 seg = WTX_TCPIP_SEG_HDRLEN(hlen) | 1721 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz); 1722 } 1723 1724 /* 1725 * NOTE: Even if we're not using the IP or TCP/UDP checksum 1726 * offload feature, if we load the context descriptor, we 1727 * MUST provide valid values for IPCSS and TUCSS fields. 1728 */ 1729 1730 ipcs = WTX_TCPIP_IPCSS(offset) | 1731 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) | 1732 WTX_TCPIP_IPCSE(ipcse); 1733 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) { 1734 WM_EVCNT_INCR(&sc->sc_ev_txipsum); 1735 fields |= WTX_IXSM; 1736 } 1737 1738 offset += iphl; 1739 1740 if (m0->m_pkthdr.csum_flags & 1741 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) { 1742 WM_EVCNT_INCR(&sc->sc_ev_txtusum); 1743 fields |= WTX_TXSM; 1744 tucs = WTX_TCPIP_TUCSS(offset) | 1745 WTX_TCPIP_TUCSO(offset + 1746 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) | 1747 WTX_TCPIP_TUCSE(0) /* rest of packet */; 1748 } else if ((m0->m_pkthdr.csum_flags & 1749 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) { 1750 WM_EVCNT_INCR(&sc->sc_ev_txtusum6); 1751 fields |= WTX_TXSM; 1752 tucs = WTX_TCPIP_TUCSS(offset) | 1753 WTX_TCPIP_TUCSO(offset + 1754 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) | 1755 WTX_TCPIP_TUCSE(0) /* rest of packet */; 1756 } else { 1757 /* Just initialize it to a valid TCP context. */ 1758 tucs = WTX_TCPIP_TUCSS(offset) | 1759 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) | 1760 WTX_TCPIP_TUCSE(0) /* rest of packet */; 1761 } 1762 1763 /* Fill in the context descriptor. */ 1764 t = (struct livengood_tcpip_ctxdesc *) 1765 &sc->sc_txdescs[sc->sc_txnext]; 1766 t->tcpip_ipcs = htole32(ipcs); 1767 t->tcpip_tucs = htole32(tucs); 1768 t->tcpip_cmdlen = htole32(cmdlen); 1769 t->tcpip_seg = htole32(seg); 1770 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE); 1771 1772 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext); 1773 txs->txs_ndesc++; 1774 1775 *cmdp = cmd; 1776 *fieldsp = fields; 1777 1778 return (0); 1779 } 1780 1781 static void 1782 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0) 1783 { 1784 struct mbuf *m; 1785 int i; 1786 1787 log(LOG_DEBUG, "%s: mbuf chain:\n", sc->sc_dev.dv_xname); 1788 for (m = m0, i = 0; m != NULL; m = m->m_next, i++) 1789 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, " 1790 "m_flags = 0x%08x\n", sc->sc_dev.dv_xname, 1791 m->m_data, m->m_len, m->m_flags); 1792 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", sc->sc_dev.dv_xname, 1793 i, i == 1 ? "" : "s"); 1794 } 1795 1796 /* 1797 * wm_82547_txfifo_stall: 1798 * 1799 * Callout used to wait for the 82547 Tx FIFO to drain, 1800 * reset the FIFO pointers, and restart packet transmission. 1801 */ 1802 static void 1803 wm_82547_txfifo_stall(void *arg) 1804 { 1805 struct wm_softc *sc = arg; 1806 int s; 1807 1808 s = splnet(); 1809 1810 if (sc->sc_txfifo_stall) { 1811 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) && 1812 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) && 1813 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) { 1814 /* 1815 * Packets have drained. Stop transmitter, reset 1816 * FIFO pointers, restart transmitter, and kick 1817 * the packet queue. 1818 */ 1819 uint32_t tctl = CSR_READ(sc, WMREG_TCTL); 1820 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN); 1821 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr); 1822 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr); 1823 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr); 1824 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr); 1825 CSR_WRITE(sc, WMREG_TCTL, tctl); 1826 CSR_WRITE_FLUSH(sc); 1827 1828 sc->sc_txfifo_head = 0; 1829 sc->sc_txfifo_stall = 0; 1830 wm_start(&sc->sc_ethercom.ec_if); 1831 } else { 1832 /* 1833 * Still waiting for packets to drain; try again in 1834 * another tick. 1835 */ 1836 callout_schedule(&sc->sc_txfifo_ch, 1); 1837 } 1838 } 1839 1840 splx(s); 1841 } 1842 1843 /* 1844 * wm_82547_txfifo_bugchk: 1845 * 1846 * Check for bug condition in the 82547 Tx FIFO. We need to 1847 * prevent enqueueing a packet that would wrap around the end 1848 * if the Tx FIFO ring buffer, otherwise the chip will croak. 1849 * 1850 * We do this by checking the amount of space before the end 1851 * of the Tx FIFO buffer. If the packet will not fit, we "stall" 1852 * the Tx FIFO, wait for all remaining packets to drain, reset 1853 * the internal FIFO pointers to the beginning, and restart 1854 * transmission on the interface. 1855 */ 1856 #define WM_FIFO_HDR 0x10 1857 #define WM_82547_PAD_LEN 0x3e0 1858 static int 1859 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0) 1860 { 1861 int space = sc->sc_txfifo_size - sc->sc_txfifo_head; 1862 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR); 1863 1864 /* Just return if already stalled. */ 1865 if (sc->sc_txfifo_stall) 1866 return (1); 1867 1868 if (sc->sc_mii.mii_media_active & IFM_FDX) { 1869 /* Stall only occurs in half-duplex mode. */ 1870 goto send_packet; 1871 } 1872 1873 if (len >= WM_82547_PAD_LEN + space) { 1874 sc->sc_txfifo_stall = 1; 1875 callout_schedule(&sc->sc_txfifo_ch, 1); 1876 return (1); 1877 } 1878 1879 send_packet: 1880 sc->sc_txfifo_head += len; 1881 if (sc->sc_txfifo_head >= sc->sc_txfifo_size) 1882 sc->sc_txfifo_head -= sc->sc_txfifo_size; 1883 1884 return (0); 1885 } 1886 1887 /* 1888 * wm_start: [ifnet interface function] 1889 * 1890 * Start packet transmission on the interface. 1891 */ 1892 static void 1893 wm_start(struct ifnet *ifp) 1894 { 1895 struct wm_softc *sc = ifp->if_softc; 1896 struct mbuf *m0; 1897 #if 0 /* XXXJRT */ 1898 struct m_tag *mtag; 1899 #endif 1900 struct wm_txsoft *txs; 1901 bus_dmamap_t dmamap; 1902 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso; 1903 bus_addr_t curaddr; 1904 bus_size_t seglen, curlen; 1905 uint32_t cksumcmd; 1906 uint8_t cksumfields; 1907 1908 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 1909 return; 1910 1911 /* 1912 * Remember the previous number of free descriptors. 1913 */ 1914 ofree = sc->sc_txfree; 1915 1916 /* 1917 * Loop through the send queue, setting up transmit descriptors 1918 * until we drain the queue, or use up all available transmit 1919 * descriptors. 1920 */ 1921 for (;;) { 1922 /* Grab a packet off the queue. */ 1923 IFQ_POLL(&ifp->if_snd, m0); 1924 if (m0 == NULL) 1925 break; 1926 1927 DPRINTF(WM_DEBUG_TX, 1928 ("%s: TX: have packet to transmit: %p\n", 1929 sc->sc_dev.dv_xname, m0)); 1930 1931 /* Get a work queue entry. */ 1932 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) { 1933 wm_txintr(sc); 1934 if (sc->sc_txsfree == 0) { 1935 DPRINTF(WM_DEBUG_TX, 1936 ("%s: TX: no free job descriptors\n", 1937 sc->sc_dev.dv_xname)); 1938 WM_EVCNT_INCR(&sc->sc_ev_txsstall); 1939 break; 1940 } 1941 } 1942 1943 txs = &sc->sc_txsoft[sc->sc_txsnext]; 1944 dmamap = txs->txs_dmamap; 1945 1946 use_tso = (m0->m_pkthdr.csum_flags & 1947 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0; 1948 1949 /* 1950 * So says the Linux driver: 1951 * The controller does a simple calculation to make sure 1952 * there is enough room in the FIFO before initiating the 1953 * DMA for each buffer. The calc is: 1954 * 4 = ceil(buffer len / MSS) 1955 * To make sure we don't overrun the FIFO, adjust the max 1956 * buffer len if the MSS drops. 1957 */ 1958 dmamap->dm_maxsegsz = 1959 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN) 1960 ? m0->m_pkthdr.segsz << 2 1961 : WTX_MAX_LEN; 1962 1963 /* 1964 * Load the DMA map. If this fails, the packet either 1965 * didn't fit in the allotted number of segments, or we 1966 * were short on resources. For the too-many-segments 1967 * case, we simply report an error and drop the packet, 1968 * since we can't sanely copy a jumbo packet to a single 1969 * buffer. 1970 */ 1971 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 1972 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1973 if (error) { 1974 if (error == EFBIG) { 1975 WM_EVCNT_INCR(&sc->sc_ev_txdrop); 1976 log(LOG_ERR, "%s: Tx packet consumes too many " 1977 "DMA segments, dropping...\n", 1978 sc->sc_dev.dv_xname); 1979 IFQ_DEQUEUE(&ifp->if_snd, m0); 1980 wm_dump_mbuf_chain(sc, m0); 1981 m_freem(m0); 1982 continue; 1983 } 1984 /* 1985 * Short on resources, just stop for now. 1986 */ 1987 DPRINTF(WM_DEBUG_TX, 1988 ("%s: TX: dmamap load failed: %d\n", 1989 sc->sc_dev.dv_xname, error)); 1990 break; 1991 } 1992 1993 segs_needed = dmamap->dm_nsegs; 1994 if (use_tso) { 1995 /* For sentinel descriptor; see below. */ 1996 segs_needed++; 1997 } 1998 1999 /* 2000 * Ensure we have enough descriptors free to describe 2001 * the packet. Note, we always reserve one descriptor 2002 * at the end of the ring due to the semantics of the 2003 * TDT register, plus one more in the event we need 2004 * to load offload context. 2005 */ 2006 if (segs_needed > sc->sc_txfree - 2) { 2007 /* 2008 * Not enough free descriptors to transmit this 2009 * packet. We haven't committed anything yet, 2010 * so just unload the DMA map, put the packet 2011 * pack on the queue, and punt. Notify the upper 2012 * layer that there are no more slots left. 2013 */ 2014 DPRINTF(WM_DEBUG_TX, 2015 ("%s: TX: need %d (%d) descriptors, have %d\n", 2016 sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed, 2017 sc->sc_txfree - 1)); 2018 ifp->if_flags |= IFF_OACTIVE; 2019 bus_dmamap_unload(sc->sc_dmat, dmamap); 2020 WM_EVCNT_INCR(&sc->sc_ev_txdstall); 2021 break; 2022 } 2023 2024 /* 2025 * Check for 82547 Tx FIFO bug. We need to do this 2026 * once we know we can transmit the packet, since we 2027 * do some internal FIFO space accounting here. 2028 */ 2029 if (sc->sc_type == WM_T_82547 && 2030 wm_82547_txfifo_bugchk(sc, m0)) { 2031 DPRINTF(WM_DEBUG_TX, 2032 ("%s: TX: 82547 Tx FIFO bug detected\n", 2033 sc->sc_dev.dv_xname)); 2034 ifp->if_flags |= IFF_OACTIVE; 2035 bus_dmamap_unload(sc->sc_dmat, dmamap); 2036 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall); 2037 break; 2038 } 2039 2040 IFQ_DEQUEUE(&ifp->if_snd, m0); 2041 2042 /* 2043 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 2044 */ 2045 2046 DPRINTF(WM_DEBUG_TX, 2047 ("%s: TX: packet has %d (%d) DMA segments\n", 2048 sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed)); 2049 2050 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]); 2051 2052 /* 2053 * Store a pointer to the packet so that we can free it 2054 * later. 2055 * 2056 * Initially, we consider the number of descriptors the 2057 * packet uses the number of DMA segments. This may be 2058 * incremented by 1 if we do checksum offload (a descriptor 2059 * is used to set the checksum context). 2060 */ 2061 txs->txs_mbuf = m0; 2062 txs->txs_firstdesc = sc->sc_txnext; 2063 txs->txs_ndesc = segs_needed; 2064 2065 /* Set up offload parameters for this packet. */ 2066 if (m0->m_pkthdr.csum_flags & 2067 (M_CSUM_TSOv4|M_CSUM_TSOv6| 2068 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4| 2069 M_CSUM_TCPv6|M_CSUM_UDPv6)) { 2070 if (wm_tx_offload(sc, txs, &cksumcmd, 2071 &cksumfields) != 0) { 2072 /* Error message already displayed. */ 2073 bus_dmamap_unload(sc->sc_dmat, dmamap); 2074 continue; 2075 } 2076 } else { 2077 cksumcmd = 0; 2078 cksumfields = 0; 2079 } 2080 2081 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS; 2082 2083 /* Sync the DMA map. */ 2084 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 2085 BUS_DMASYNC_PREWRITE); 2086 2087 /* 2088 * Initialize the transmit descriptor. 2089 */ 2090 for (nexttx = sc->sc_txnext, seg = 0; 2091 seg < dmamap->dm_nsegs; seg++) { 2092 for (seglen = dmamap->dm_segs[seg].ds_len, 2093 curaddr = dmamap->dm_segs[seg].ds_addr; 2094 seglen != 0; 2095 curaddr += curlen, seglen -= curlen, 2096 nexttx = WM_NEXTTX(sc, nexttx)) { 2097 curlen = seglen; 2098 2099 /* 2100 * So says the Linux driver: 2101 * Work around for premature descriptor 2102 * write-backs in TSO mode. Append a 2103 * 4-byte sentinel descriptor. 2104 */ 2105 if (use_tso && 2106 seg == dmamap->dm_nsegs - 1 && 2107 curlen > 8) 2108 curlen -= 4; 2109 2110 wm_set_dma_addr( 2111 &sc->sc_txdescs[nexttx].wtx_addr, 2112 curaddr); 2113 sc->sc_txdescs[nexttx].wtx_cmdlen = 2114 htole32(cksumcmd | curlen); 2115 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 2116 0; 2117 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 2118 cksumfields; 2119 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0; 2120 lasttx = nexttx; 2121 2122 DPRINTF(WM_DEBUG_TX, 2123 ("%s: TX: desc %d: low 0x%08lx, " 2124 "len 0x%04x\n", 2125 sc->sc_dev.dv_xname, nexttx, 2126 curaddr & 0xffffffffUL, (unsigned)curlen)); 2127 } 2128 } 2129 2130 KASSERT(lasttx != -1); 2131 2132 /* 2133 * Set up the command byte on the last descriptor of 2134 * the packet. If we're in the interrupt delay window, 2135 * delay the interrupt. 2136 */ 2137 sc->sc_txdescs[lasttx].wtx_cmdlen |= 2138 htole32(WTX_CMD_EOP | WTX_CMD_RS); 2139 2140 #if 0 /* XXXJRT */ 2141 /* 2142 * If VLANs are enabled and the packet has a VLAN tag, set 2143 * up the descriptor to encapsulate the packet for us. 2144 * 2145 * This is only valid on the last descriptor of the packet. 2146 */ 2147 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) { 2148 sc->sc_txdescs[lasttx].wtx_cmdlen |= 2149 htole32(WTX_CMD_VLE); 2150 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan 2151 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff); 2152 } 2153 #endif /* XXXJRT */ 2154 2155 txs->txs_lastdesc = lasttx; 2156 2157 DPRINTF(WM_DEBUG_TX, 2158 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname, 2159 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen))); 2160 2161 /* Sync the descriptors we're using. */ 2162 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc, 2163 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2164 2165 /* Give the packet to the chip. */ 2166 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx); 2167 2168 DPRINTF(WM_DEBUG_TX, 2169 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx)); 2170 2171 DPRINTF(WM_DEBUG_TX, 2172 ("%s: TX: finished transmitting packet, job %d\n", 2173 sc->sc_dev.dv_xname, sc->sc_txsnext)); 2174 2175 /* Advance the tx pointer. */ 2176 sc->sc_txfree -= txs->txs_ndesc; 2177 sc->sc_txnext = nexttx; 2178 2179 sc->sc_txsfree--; 2180 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext); 2181 2182 #if NBPFILTER > 0 2183 /* Pass the packet to any BPF listeners. */ 2184 if (ifp->if_bpf) 2185 bpf_mtap(ifp->if_bpf, m0); 2186 #endif /* NBPFILTER > 0 */ 2187 } 2188 2189 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) { 2190 /* No more slots; notify upper layer. */ 2191 ifp->if_flags |= IFF_OACTIVE; 2192 } 2193 2194 if (sc->sc_txfree != ofree) { 2195 /* Set a watchdog timer in case the chip flakes out. */ 2196 ifp->if_timer = 5; 2197 } 2198 } 2199 2200 /* 2201 * wm_watchdog: [ifnet interface function] 2202 * 2203 * Watchdog timer handler. 2204 */ 2205 static void 2206 wm_watchdog(struct ifnet *ifp) 2207 { 2208 struct wm_softc *sc = ifp->if_softc; 2209 2210 /* 2211 * Since we're using delayed interrupts, sweep up 2212 * before we report an error. 2213 */ 2214 wm_txintr(sc); 2215 2216 if (sc->sc_txfree != WM_NTXDESC(sc)) { 2217 log(LOG_ERR, 2218 "%s: device timeout (txfree %d txsfree %d txnext %d)\n", 2219 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree, 2220 sc->sc_txnext); 2221 ifp->if_oerrors++; 2222 2223 /* Reset the interface. */ 2224 (void) wm_init(ifp); 2225 } 2226 2227 /* Try to get more packets going. */ 2228 wm_start(ifp); 2229 } 2230 2231 /* 2232 * wm_ioctl: [ifnet interface function] 2233 * 2234 * Handle control requests from the operator. 2235 */ 2236 static int 2237 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2238 { 2239 struct wm_softc *sc = ifp->if_softc; 2240 struct ifreq *ifr = (struct ifreq *) data; 2241 int s, error; 2242 2243 s = splnet(); 2244 2245 switch (cmd) { 2246 case SIOCSIFMEDIA: 2247 case SIOCGIFMEDIA: 2248 /* Flow control requires full-duplex mode. */ 2249 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 2250 (ifr->ifr_media & IFM_FDX) == 0) 2251 ifr->ifr_media &= ~IFM_ETH_FMASK; 2252 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 2253 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 2254 /* We can do both TXPAUSE and RXPAUSE. */ 2255 ifr->ifr_media |= 2256 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 2257 } 2258 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 2259 } 2260 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 2261 break; 2262 default: 2263 error = ether_ioctl(ifp, cmd, data); 2264 if (error == ENETRESET) { 2265 /* 2266 * Multicast list has changed; set the hardware filter 2267 * accordingly. 2268 */ 2269 if (ifp->if_flags & IFF_RUNNING) 2270 wm_set_filter(sc); 2271 error = 0; 2272 } 2273 break; 2274 } 2275 2276 /* Try to get more packets going. */ 2277 wm_start(ifp); 2278 2279 splx(s); 2280 return (error); 2281 } 2282 2283 /* 2284 * wm_intr: 2285 * 2286 * Interrupt service routine. 2287 */ 2288 static int 2289 wm_intr(void *arg) 2290 { 2291 struct wm_softc *sc = arg; 2292 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2293 uint32_t icr; 2294 int handled = 0; 2295 2296 while (1 /* CONSTCOND */) { 2297 icr = CSR_READ(sc, WMREG_ICR); 2298 if ((icr & sc->sc_icr) == 0) 2299 break; 2300 #if 0 /*NRND > 0*/ 2301 if (RND_ENABLED(&sc->rnd_source)) 2302 rnd_add_uint32(&sc->rnd_source, icr); 2303 #endif 2304 2305 handled = 1; 2306 2307 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 2308 if (icr & (ICR_RXDMT0|ICR_RXT0)) { 2309 DPRINTF(WM_DEBUG_RX, 2310 ("%s: RX: got Rx intr 0x%08x\n", 2311 sc->sc_dev.dv_xname, 2312 icr & (ICR_RXDMT0|ICR_RXT0))); 2313 WM_EVCNT_INCR(&sc->sc_ev_rxintr); 2314 } 2315 #endif 2316 wm_rxintr(sc); 2317 2318 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 2319 if (icr & ICR_TXDW) { 2320 DPRINTF(WM_DEBUG_TX, 2321 ("%s: TX: got TXDW interrupt\n", 2322 sc->sc_dev.dv_xname)); 2323 WM_EVCNT_INCR(&sc->sc_ev_txdw); 2324 } 2325 #endif 2326 wm_txintr(sc); 2327 2328 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) { 2329 WM_EVCNT_INCR(&sc->sc_ev_linkintr); 2330 wm_linkintr(sc, icr); 2331 } 2332 2333 if (icr & ICR_RXO) { 2334 ifp->if_ierrors++; 2335 #if defined(WM_DEBUG) 2336 log(LOG_WARNING, "%s: Receive overrun\n", 2337 sc->sc_dev.dv_xname); 2338 #endif /* defined(WM_DEBUG) */ 2339 } 2340 } 2341 2342 if (handled) { 2343 /* Try to get more packets going. */ 2344 wm_start(ifp); 2345 } 2346 2347 return (handled); 2348 } 2349 2350 /* 2351 * wm_txintr: 2352 * 2353 * Helper; handle transmit interrupts. 2354 */ 2355 static void 2356 wm_txintr(struct wm_softc *sc) 2357 { 2358 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2359 struct wm_txsoft *txs; 2360 uint8_t status; 2361 int i; 2362 2363 ifp->if_flags &= ~IFF_OACTIVE; 2364 2365 /* 2366 * Go through the Tx list and free mbufs for those 2367 * frames which have been transmitted. 2368 */ 2369 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc); 2370 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) { 2371 txs = &sc->sc_txsoft[i]; 2372 2373 DPRINTF(WM_DEBUG_TX, 2374 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i)); 2375 2376 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc, 2377 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2378 2379 status = 2380 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status; 2381 if ((status & WTX_ST_DD) == 0) { 2382 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1, 2383 BUS_DMASYNC_PREREAD); 2384 break; 2385 } 2386 2387 DPRINTF(WM_DEBUG_TX, 2388 ("%s: TX: job %d done: descs %d..%d\n", 2389 sc->sc_dev.dv_xname, i, txs->txs_firstdesc, 2390 txs->txs_lastdesc)); 2391 2392 /* 2393 * XXX We should probably be using the statistics 2394 * XXX registers, but I don't know if they exist 2395 * XXX on chips before the i82544. 2396 */ 2397 2398 #ifdef WM_EVENT_COUNTERS 2399 if (status & WTX_ST_TU) 2400 WM_EVCNT_INCR(&sc->sc_ev_tu); 2401 #endif /* WM_EVENT_COUNTERS */ 2402 2403 if (status & (WTX_ST_EC|WTX_ST_LC)) { 2404 ifp->if_oerrors++; 2405 if (status & WTX_ST_LC) 2406 log(LOG_WARNING, "%s: late collision\n", 2407 sc->sc_dev.dv_xname); 2408 else if (status & WTX_ST_EC) { 2409 ifp->if_collisions += 16; 2410 log(LOG_WARNING, "%s: excessive collisions\n", 2411 sc->sc_dev.dv_xname); 2412 } 2413 } else 2414 ifp->if_opackets++; 2415 2416 sc->sc_txfree += txs->txs_ndesc; 2417 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 2418 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2419 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 2420 m_freem(txs->txs_mbuf); 2421 txs->txs_mbuf = NULL; 2422 } 2423 2424 /* Update the dirty transmit buffer pointer. */ 2425 sc->sc_txsdirty = i; 2426 DPRINTF(WM_DEBUG_TX, 2427 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i)); 2428 2429 /* 2430 * If there are no more pending transmissions, cancel the watchdog 2431 * timer. 2432 */ 2433 if (sc->sc_txsfree == WM_TXQUEUELEN(sc)) 2434 ifp->if_timer = 0; 2435 } 2436 2437 /* 2438 * wm_rxintr: 2439 * 2440 * Helper; handle receive interrupts. 2441 */ 2442 static void 2443 wm_rxintr(struct wm_softc *sc) 2444 { 2445 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2446 struct wm_rxsoft *rxs; 2447 struct mbuf *m; 2448 int i, len; 2449 uint8_t status, errors; 2450 2451 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) { 2452 rxs = &sc->sc_rxsoft[i]; 2453 2454 DPRINTF(WM_DEBUG_RX, 2455 ("%s: RX: checking descriptor %d\n", 2456 sc->sc_dev.dv_xname, i)); 2457 2458 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2459 2460 status = sc->sc_rxdescs[i].wrx_status; 2461 errors = sc->sc_rxdescs[i].wrx_errors; 2462 len = le16toh(sc->sc_rxdescs[i].wrx_len); 2463 2464 if ((status & WRX_ST_DD) == 0) { 2465 /* 2466 * We have processed all of the receive descriptors. 2467 */ 2468 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD); 2469 break; 2470 } 2471 2472 if (__predict_false(sc->sc_rxdiscard)) { 2473 DPRINTF(WM_DEBUG_RX, 2474 ("%s: RX: discarding contents of descriptor %d\n", 2475 sc->sc_dev.dv_xname, i)); 2476 WM_INIT_RXDESC(sc, i); 2477 if (status & WRX_ST_EOP) { 2478 /* Reset our state. */ 2479 DPRINTF(WM_DEBUG_RX, 2480 ("%s: RX: resetting rxdiscard -> 0\n", 2481 sc->sc_dev.dv_xname)); 2482 sc->sc_rxdiscard = 0; 2483 } 2484 continue; 2485 } 2486 2487 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 2488 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2489 2490 m = rxs->rxs_mbuf; 2491 2492 /* 2493 * Add a new receive buffer to the ring, unless of 2494 * course the length is zero. Treat the latter as a 2495 * failed mapping. 2496 */ 2497 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) { 2498 /* 2499 * Failed, throw away what we've done so 2500 * far, and discard the rest of the packet. 2501 */ 2502 ifp->if_ierrors++; 2503 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 2504 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 2505 WM_INIT_RXDESC(sc, i); 2506 if ((status & WRX_ST_EOP) == 0) 2507 sc->sc_rxdiscard = 1; 2508 if (sc->sc_rxhead != NULL) 2509 m_freem(sc->sc_rxhead); 2510 WM_RXCHAIN_RESET(sc); 2511 DPRINTF(WM_DEBUG_RX, 2512 ("%s: RX: Rx buffer allocation failed, " 2513 "dropping packet%s\n", sc->sc_dev.dv_xname, 2514 sc->sc_rxdiscard ? " (discard)" : "")); 2515 continue; 2516 } 2517 2518 WM_RXCHAIN_LINK(sc, m); 2519 2520 m->m_len = len; 2521 2522 DPRINTF(WM_DEBUG_RX, 2523 ("%s: RX: buffer at %p len %d\n", 2524 sc->sc_dev.dv_xname, m->m_data, len)); 2525 2526 /* 2527 * If this is not the end of the packet, keep 2528 * looking. 2529 */ 2530 if ((status & WRX_ST_EOP) == 0) { 2531 sc->sc_rxlen += len; 2532 DPRINTF(WM_DEBUG_RX, 2533 ("%s: RX: not yet EOP, rxlen -> %d\n", 2534 sc->sc_dev.dv_xname, sc->sc_rxlen)); 2535 continue; 2536 } 2537 2538 /* 2539 * Okay, we have the entire packet now. The chip is 2540 * configured to include the FCS (not all chips can 2541 * be configured to strip it), so we need to trim it. 2542 */ 2543 m->m_len -= ETHER_CRC_LEN; 2544 2545 *sc->sc_rxtailp = NULL; 2546 len = m->m_len + sc->sc_rxlen; 2547 m = sc->sc_rxhead; 2548 2549 WM_RXCHAIN_RESET(sc); 2550 2551 DPRINTF(WM_DEBUG_RX, 2552 ("%s: RX: have entire packet, len -> %d\n", 2553 sc->sc_dev.dv_xname, len)); 2554 2555 /* 2556 * If an error occurred, update stats and drop the packet. 2557 */ 2558 if (errors & 2559 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) { 2560 ifp->if_ierrors++; 2561 if (errors & WRX_ER_SE) 2562 log(LOG_WARNING, "%s: symbol error\n", 2563 sc->sc_dev.dv_xname); 2564 else if (errors & WRX_ER_SEQ) 2565 log(LOG_WARNING, "%s: receive sequence error\n", 2566 sc->sc_dev.dv_xname); 2567 else if (errors & WRX_ER_CE) 2568 log(LOG_WARNING, "%s: CRC error\n", 2569 sc->sc_dev.dv_xname); 2570 m_freem(m); 2571 continue; 2572 } 2573 2574 /* 2575 * No errors. Receive the packet. 2576 */ 2577 m->m_pkthdr.rcvif = ifp; 2578 m->m_pkthdr.len = len; 2579 2580 #if 0 /* XXXJRT */ 2581 /* 2582 * If VLANs are enabled, VLAN packets have been unwrapped 2583 * for us. Associate the tag with the packet. 2584 */ 2585 if ((status & WRX_ST_VP) != 0) { 2586 VLAN_INPUT_TAG(ifp, m, 2587 le16toh(sc->sc_rxdescs[i].wrx_special, 2588 continue); 2589 } 2590 #endif /* XXXJRT */ 2591 2592 /* 2593 * Set up checksum info for this packet. 2594 */ 2595 if ((status & WRX_ST_IXSM) == 0) { 2596 if (status & WRX_ST_IPCS) { 2597 WM_EVCNT_INCR(&sc->sc_ev_rxipsum); 2598 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 2599 if (errors & WRX_ER_IPE) 2600 m->m_pkthdr.csum_flags |= 2601 M_CSUM_IPv4_BAD; 2602 } 2603 if (status & WRX_ST_TCPCS) { 2604 /* 2605 * Note: we don't know if this was TCP or UDP, 2606 * so we just set both bits, and expect the 2607 * upper layers to deal. 2608 */ 2609 WM_EVCNT_INCR(&sc->sc_ev_rxtusum); 2610 m->m_pkthdr.csum_flags |= 2611 M_CSUM_TCPv4 | M_CSUM_UDPv4 | 2612 M_CSUM_TCPv6 | M_CSUM_UDPv6; 2613 if (errors & WRX_ER_TCPE) 2614 m->m_pkthdr.csum_flags |= 2615 M_CSUM_TCP_UDP_BAD; 2616 } 2617 } 2618 2619 ifp->if_ipackets++; 2620 2621 #if NBPFILTER > 0 2622 /* Pass this up to any BPF listeners. */ 2623 if (ifp->if_bpf) 2624 bpf_mtap(ifp->if_bpf, m); 2625 #endif /* NBPFILTER > 0 */ 2626 2627 /* Pass it on. */ 2628 (*ifp->if_input)(ifp, m); 2629 } 2630 2631 /* Update the receive pointer. */ 2632 sc->sc_rxptr = i; 2633 2634 DPRINTF(WM_DEBUG_RX, 2635 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i)); 2636 } 2637 2638 /* 2639 * wm_linkintr: 2640 * 2641 * Helper; handle link interrupts. 2642 */ 2643 static void 2644 wm_linkintr(struct wm_softc *sc, uint32_t icr) 2645 { 2646 uint32_t status; 2647 2648 /* 2649 * If we get a link status interrupt on a 1000BASE-T 2650 * device, just fall into the normal MII tick path. 2651 */ 2652 if (sc->sc_flags & WM_F_HAS_MII) { 2653 if (icr & ICR_LSC) { 2654 DPRINTF(WM_DEBUG_LINK, 2655 ("%s: LINK: LSC -> mii_tick\n", 2656 sc->sc_dev.dv_xname)); 2657 mii_tick(&sc->sc_mii); 2658 } else if (icr & ICR_RXSEQ) { 2659 DPRINTF(WM_DEBUG_LINK, 2660 ("%s: LINK Receive sequence error\n", 2661 sc->sc_dev.dv_xname)); 2662 } 2663 return; 2664 } 2665 2666 /* 2667 * If we are now receiving /C/, check for link again in 2668 * a couple of link clock ticks. 2669 */ 2670 if (icr & ICR_RXCFG) { 2671 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n", 2672 sc->sc_dev.dv_xname)); 2673 sc->sc_tbi_anstate = 2; 2674 } 2675 2676 if (icr & ICR_LSC) { 2677 status = CSR_READ(sc, WMREG_STATUS); 2678 if (status & STATUS_LU) { 2679 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n", 2680 sc->sc_dev.dv_xname, 2681 (status & STATUS_FD) ? "FDX" : "HDX")); 2682 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 2683 sc->sc_fcrtl &= ~FCRTL_XONE; 2684 if (status & STATUS_FD) 2685 sc->sc_tctl |= 2686 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 2687 else 2688 sc->sc_tctl |= 2689 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 2690 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE) 2691 sc->sc_fcrtl |= FCRTL_XONE; 2692 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 2693 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 2694 WMREG_OLD_FCRTL : WMREG_FCRTL, 2695 sc->sc_fcrtl); 2696 sc->sc_tbi_linkup = 1; 2697 } else { 2698 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", 2699 sc->sc_dev.dv_xname)); 2700 sc->sc_tbi_linkup = 0; 2701 } 2702 sc->sc_tbi_anstate = 2; 2703 wm_tbi_set_linkled(sc); 2704 } else if (icr & ICR_RXSEQ) { 2705 DPRINTF(WM_DEBUG_LINK, 2706 ("%s: LINK: Receive sequence error\n", 2707 sc->sc_dev.dv_xname)); 2708 } 2709 } 2710 2711 /* 2712 * wm_tick: 2713 * 2714 * One second timer, used to check link status, sweep up 2715 * completed transmit jobs, etc. 2716 */ 2717 static void 2718 wm_tick(void *arg) 2719 { 2720 struct wm_softc *sc = arg; 2721 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2722 int s; 2723 2724 s = splnet(); 2725 2726 if (sc->sc_type >= WM_T_82542_2_1) { 2727 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC)); 2728 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC)); 2729 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC)); 2730 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC)); 2731 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC)); 2732 } 2733 2734 ifp->if_collisions += CSR_READ(sc, WMREG_COLC); 2735 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC); 2736 2737 2738 if (sc->sc_flags & WM_F_HAS_MII) 2739 mii_tick(&sc->sc_mii); 2740 else 2741 wm_tbi_check_link(sc); 2742 2743 splx(s); 2744 2745 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); 2746 } 2747 2748 /* 2749 * wm_reset: 2750 * 2751 * Reset the i82542 chip. 2752 */ 2753 static void 2754 wm_reset(struct wm_softc *sc) 2755 { 2756 int i; 2757 2758 /* 2759 * Allocate on-chip memory according to the MTU size. 2760 * The Packet Buffer Allocation register must be written 2761 * before the chip is reset. 2762 */ 2763 switch (sc->sc_type) { 2764 case WM_T_82547: 2765 case WM_T_82547_2: 2766 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? 2767 PBA_22K : PBA_30K; 2768 sc->sc_txfifo_head = 0; 2769 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT; 2770 sc->sc_txfifo_size = 2771 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT; 2772 sc->sc_txfifo_stall = 0; 2773 break; 2774 case WM_T_82571: 2775 case WM_T_82572: 2776 case WM_T_80003: 2777 sc->sc_pba = PBA_32K; 2778 break; 2779 case WM_T_82573: 2780 sc->sc_pba = PBA_12K; 2781 break; 2782 default: 2783 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? 2784 PBA_40K : PBA_48K; 2785 break; 2786 } 2787 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba); 2788 2789 switch (sc->sc_type) { 2790 case WM_T_82544: 2791 case WM_T_82540: 2792 case WM_T_82545: 2793 case WM_T_82546: 2794 case WM_T_82541: 2795 case WM_T_82541_2: 2796 /* 2797 * On some chipsets, a reset through a memory-mapped write 2798 * cycle can cause the chip to reset before completing the 2799 * write cycle. This causes major headache that can be 2800 * avoided by issuing the reset via indirect register writes 2801 * through I/O space. 2802 * 2803 * So, if we successfully mapped the I/O BAR at attach time, 2804 * use that. Otherwise, try our luck with a memory-mapped 2805 * reset. 2806 */ 2807 if (sc->sc_flags & WM_F_IOH_VALID) 2808 wm_io_write(sc, WMREG_CTRL, CTRL_RST); 2809 else 2810 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST); 2811 break; 2812 2813 case WM_T_82545_3: 2814 case WM_T_82546_3: 2815 /* Use the shadow control register on these chips. */ 2816 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST); 2817 break; 2818 2819 default: 2820 /* Everything else can safely use the documented method. */ 2821 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST); 2822 break; 2823 } 2824 delay(10000); 2825 2826 for (i = 0; i < 1000; i++) { 2827 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) 2828 return; 2829 delay(20); 2830 } 2831 2832 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST) 2833 log(LOG_ERR, "%s: reset failed to complete\n", 2834 sc->sc_dev.dv_xname); 2835 2836 if (sc->sc_type == WM_T_80003) { 2837 /* wait for eeprom to reload */ 2838 for (i = 1000; i > 0; i--) { 2839 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD) 2840 break; 2841 } 2842 if (i == 0) { 2843 log(LOG_ERR, "%s: auto read from eeprom failed to " 2844 "complete\n", sc->sc_dev.dv_xname); 2845 } 2846 } 2847 } 2848 2849 /* 2850 * wm_init: [ifnet interface function] 2851 * 2852 * Initialize the interface. Must be called at splnet(). 2853 */ 2854 static int 2855 wm_init(struct ifnet *ifp) 2856 { 2857 struct wm_softc *sc = ifp->if_softc; 2858 struct wm_rxsoft *rxs; 2859 int i, error = 0; 2860 uint32_t reg; 2861 2862 /* 2863 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set. 2864 * There is a small but measurable benefit to avoiding the adjusment 2865 * of the descriptor so that the headers are aligned, for normal mtu, 2866 * on such platforms. One possibility is that the DMA itself is 2867 * slightly more efficient if the front of the entire packet (instead 2868 * of the front of the headers) is aligned. 2869 * 2870 * Note we must always set align_tweak to 0 if we are using 2871 * jumbo frames. 2872 */ 2873 #ifdef __NO_STRICT_ALIGNMENT 2874 sc->sc_align_tweak = 0; 2875 #else 2876 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2)) 2877 sc->sc_align_tweak = 0; 2878 else 2879 sc->sc_align_tweak = 2; 2880 #endif /* __NO_STRICT_ALIGNMENT */ 2881 2882 /* Cancel any pending I/O. */ 2883 wm_stop(ifp, 0); 2884 2885 /* update statistics before reset */ 2886 ifp->if_collisions += CSR_READ(sc, WMREG_COLC); 2887 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC); 2888 2889 /* Reset the chip to a known state. */ 2890 wm_reset(sc); 2891 2892 /* Initialize the transmit descriptor ring. */ 2893 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc)); 2894 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc), 2895 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2896 sc->sc_txfree = WM_NTXDESC(sc); 2897 sc->sc_txnext = 0; 2898 2899 if (sc->sc_type < WM_T_82543) { 2900 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0)); 2901 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0)); 2902 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc)); 2903 CSR_WRITE(sc, WMREG_OLD_TDH, 0); 2904 CSR_WRITE(sc, WMREG_OLD_TDT, 0); 2905 CSR_WRITE(sc, WMREG_OLD_TIDV, 128); 2906 } else { 2907 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0)); 2908 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0)); 2909 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc)); 2910 CSR_WRITE(sc, WMREG_TDH, 0); 2911 CSR_WRITE(sc, WMREG_TDT, 0); 2912 CSR_WRITE(sc, WMREG_TIDV, 64); 2913 CSR_WRITE(sc, WMREG_TADV, 128); 2914 2915 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) | 2916 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0)); 2917 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) | 2918 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1)); 2919 } 2920 CSR_WRITE(sc, WMREG_TQSA_LO, 0); 2921 CSR_WRITE(sc, WMREG_TQSA_HI, 0); 2922 2923 /* Initialize the transmit job descriptors. */ 2924 for (i = 0; i < WM_TXQUEUELEN(sc); i++) 2925 sc->sc_txsoft[i].txs_mbuf = NULL; 2926 sc->sc_txsfree = WM_TXQUEUELEN(sc); 2927 sc->sc_txsnext = 0; 2928 sc->sc_txsdirty = 0; 2929 2930 /* 2931 * Initialize the receive descriptor and receive job 2932 * descriptor rings. 2933 */ 2934 if (sc->sc_type < WM_T_82543) { 2935 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0)); 2936 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0)); 2937 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs)); 2938 CSR_WRITE(sc, WMREG_OLD_RDH0, 0); 2939 CSR_WRITE(sc, WMREG_OLD_RDT0, 0); 2940 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD); 2941 2942 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0); 2943 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0); 2944 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0); 2945 CSR_WRITE(sc, WMREG_OLD_RDH1, 0); 2946 CSR_WRITE(sc, WMREG_OLD_RDT1, 0); 2947 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0); 2948 } else { 2949 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0)); 2950 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0)); 2951 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs)); 2952 CSR_WRITE(sc, WMREG_RDH, 0); 2953 CSR_WRITE(sc, WMREG_RDT, 0); 2954 CSR_WRITE(sc, WMREG_RDTR, 0 | RDTR_FPD); 2955 CSR_WRITE(sc, WMREG_RADV, 128); 2956 } 2957 for (i = 0; i < WM_NRXDESC; i++) { 2958 rxs = &sc->sc_rxsoft[i]; 2959 if (rxs->rxs_mbuf == NULL) { 2960 if ((error = wm_add_rxbuf(sc, i)) != 0) { 2961 log(LOG_ERR, "%s: unable to allocate or map rx " 2962 "buffer %d, error = %d\n", 2963 sc->sc_dev.dv_xname, i, error); 2964 /* 2965 * XXX Should attempt to run with fewer receive 2966 * XXX buffers instead of just failing. 2967 */ 2968 wm_rxdrain(sc); 2969 goto out; 2970 } 2971 } else 2972 WM_INIT_RXDESC(sc, i); 2973 } 2974 sc->sc_rxptr = 0; 2975 sc->sc_rxdiscard = 0; 2976 WM_RXCHAIN_RESET(sc); 2977 2978 /* 2979 * Clear out the VLAN table -- we don't use it (yet). 2980 */ 2981 CSR_WRITE(sc, WMREG_VET, 0); 2982 for (i = 0; i < WM_VLAN_TABSIZE; i++) 2983 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0); 2984 2985 /* 2986 * Set up flow-control parameters. 2987 * 2988 * XXX Values could probably stand some tuning. 2989 */ 2990 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST); 2991 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST); 2992 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL); 2993 2994 sc->sc_fcrtl = FCRTL_DFLT; 2995 if (sc->sc_type < WM_T_82543) { 2996 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT); 2997 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl); 2998 } else { 2999 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT); 3000 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl); 3001 } 3002 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT); 3003 3004 #if 0 /* XXXJRT */ 3005 /* Deal with VLAN enables. */ 3006 if (VLAN_ATTACHED(&sc->sc_ethercom)) 3007 sc->sc_ctrl |= CTRL_VME; 3008 else 3009 #endif /* XXXJRT */ 3010 sc->sc_ctrl &= ~CTRL_VME; 3011 3012 /* Write the control registers. */ 3013 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3014 if (sc->sc_type >= WM_T_80003 && (sc->sc_flags & WM_F_HAS_MII)) { 3015 int val; 3016 val = CSR_READ(sc, WMREG_CTRL_EXT); 3017 val &= ~CTRL_EXT_LINK_MODE_MASK; 3018 CSR_WRITE(sc, WMREG_CTRL_EXT, val); 3019 3020 /* Bypass RX and TX FIFO's */ 3021 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL, 3022 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS | 3023 KUMCTRLSTA_FIFO_CTRL_TX_BYPASS); 3024 3025 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL, 3026 KUMCTRLSTA_INB_CTRL_DIS_PADDING | 3027 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT); 3028 /* 3029 * Set the mac to wait the maximum time between each 3030 * iteration and increase the max iterations when 3031 * polling the phy; this fixes erroneous timeouts at 10Mbps. 3032 */ 3033 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, 0xFFFF); 3034 val = wm_kmrn_i80003_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM); 3035 val |= 0x3F; 3036 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM, val); 3037 } 3038 #if 0 3039 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 3040 #endif 3041 3042 /* 3043 * Set up checksum offload parameters. 3044 */ 3045 reg = CSR_READ(sc, WMREG_RXCSUM); 3046 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL); 3047 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) 3048 reg |= RXCSUM_IPOFL; 3049 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) 3050 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL; 3051 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx)) 3052 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL; 3053 CSR_WRITE(sc, WMREG_RXCSUM, reg); 3054 3055 /* 3056 * Set up the interrupt registers. 3057 */ 3058 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 3059 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 | 3060 ICR_RXO | ICR_RXT0; 3061 if ((sc->sc_flags & WM_F_HAS_MII) == 0) 3062 sc->sc_icr |= ICR_RXCFG; 3063 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr); 3064 3065 /* Set up the inter-packet gap. */ 3066 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); 3067 3068 if (sc->sc_type >= WM_T_82543) { 3069 /* Set up the interrupt throttling register (units of 256ns) */ 3070 sc->sc_itr = 1000000000 / (7000 * 256); 3071 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr); 3072 } 3073 3074 #if 0 /* XXXJRT */ 3075 /* Set the VLAN ethernetype. */ 3076 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN); 3077 #endif 3078 3079 /* 3080 * Set up the transmit control register; we start out with 3081 * a collision distance suitable for FDX, but update it whe 3082 * we resolve the media type. 3083 */ 3084 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) | 3085 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 3086 if (sc->sc_type >= WM_T_82571) 3087 sc->sc_tctl |= TCTL_MULR; 3088 if (sc->sc_type >= WM_T_80003) 3089 sc->sc_tctl |= TCTL_RTLC; 3090 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 3091 3092 /* Set the media. */ 3093 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp); 3094 3095 /* 3096 * Set up the receive control register; we actually program 3097 * the register when we set the receive filter. Use multicast 3098 * address offset type 0. 3099 * 3100 * Only the i82544 has the ability to strip the incoming 3101 * CRC, so we don't enable that feature. 3102 */ 3103 sc->sc_mchash_type = 0; 3104 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF 3105 | RCTL_MO(sc->sc_mchash_type); 3106 3107 /* 82573 doesn't support jumbo frame */ 3108 if (sc->sc_type != WM_T_82573) 3109 sc->sc_rctl |= RCTL_LPE; 3110 3111 if (MCLBYTES == 2048) { 3112 sc->sc_rctl |= RCTL_2k; 3113 } else { 3114 if (sc->sc_type >= WM_T_82543) { 3115 switch(MCLBYTES) { 3116 case 4096: 3117 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k; 3118 break; 3119 case 8192: 3120 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k; 3121 break; 3122 case 16384: 3123 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k; 3124 break; 3125 default: 3126 panic("wm_init: MCLBYTES %d unsupported", 3127 MCLBYTES); 3128 break; 3129 } 3130 } else panic("wm_init: i82542 requires MCLBYTES = 2048"); 3131 } 3132 3133 /* Set the receive filter. */ 3134 wm_set_filter(sc); 3135 3136 /* Start the one second link check clock. */ 3137 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); 3138 3139 /* ...all done! */ 3140 ifp->if_flags |= IFF_RUNNING; 3141 ifp->if_flags &= ~IFF_OACTIVE; 3142 3143 out: 3144 if (error) 3145 log(LOG_ERR, "%s: interface not running\n", 3146 sc->sc_dev.dv_xname); 3147 return (error); 3148 } 3149 3150 /* 3151 * wm_rxdrain: 3152 * 3153 * Drain the receive queue. 3154 */ 3155 static void 3156 wm_rxdrain(struct wm_softc *sc) 3157 { 3158 struct wm_rxsoft *rxs; 3159 int i; 3160 3161 for (i = 0; i < WM_NRXDESC; i++) { 3162 rxs = &sc->sc_rxsoft[i]; 3163 if (rxs->rxs_mbuf != NULL) { 3164 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 3165 m_freem(rxs->rxs_mbuf); 3166 rxs->rxs_mbuf = NULL; 3167 } 3168 } 3169 } 3170 3171 /* 3172 * wm_stop: [ifnet interface function] 3173 * 3174 * Stop transmission on the interface. 3175 */ 3176 static void 3177 wm_stop(struct ifnet *ifp, int disable) 3178 { 3179 struct wm_softc *sc = ifp->if_softc; 3180 struct wm_txsoft *txs; 3181 int i; 3182 3183 /* Stop the one second clock. */ 3184 callout_stop(&sc->sc_tick_ch); 3185 3186 /* Stop the 82547 Tx FIFO stall check timer. */ 3187 if (sc->sc_type == WM_T_82547) 3188 callout_stop(&sc->sc_txfifo_ch); 3189 3190 if (sc->sc_flags & WM_F_HAS_MII) { 3191 /* Down the MII. */ 3192 mii_down(&sc->sc_mii); 3193 } 3194 3195 /* Stop the transmit and receive processes. */ 3196 CSR_WRITE(sc, WMREG_TCTL, 0); 3197 CSR_WRITE(sc, WMREG_RCTL, 0); 3198 3199 /* 3200 * Clear the interrupt mask to ensure the device cannot assert its 3201 * interrupt line. 3202 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service 3203 * any currently pending or shared interrupt. 3204 */ 3205 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 3206 sc->sc_icr = 0; 3207 3208 /* Release any queued transmit buffers. */ 3209 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 3210 txs = &sc->sc_txsoft[i]; 3211 if (txs->txs_mbuf != NULL) { 3212 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 3213 m_freem(txs->txs_mbuf); 3214 txs->txs_mbuf = NULL; 3215 } 3216 } 3217 3218 if (disable) 3219 wm_rxdrain(sc); 3220 3221 /* Mark the interface as down and cancel the watchdog timer. */ 3222 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3223 ifp->if_timer = 0; 3224 } 3225 3226 /* 3227 * wm_acquire_eeprom: 3228 * 3229 * Perform the EEPROM handshake required on some chips. 3230 */ 3231 static int 3232 wm_acquire_eeprom(struct wm_softc *sc) 3233 { 3234 uint32_t reg; 3235 int x; 3236 int ret = 0; 3237 3238 /* always success */ 3239 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0) 3240 return 0; 3241 3242 if (sc->sc_flags & WM_F_SWFW_SYNC) { 3243 /* this will also do wm_get_swsm_semaphore() if needed */ 3244 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM); 3245 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) { 3246 ret = wm_get_swsm_semaphore(sc); 3247 } 3248 3249 if (ret) 3250 return 1; 3251 3252 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) { 3253 reg = CSR_READ(sc, WMREG_EECD); 3254 3255 /* Request EEPROM access. */ 3256 reg |= EECD_EE_REQ; 3257 CSR_WRITE(sc, WMREG_EECD, reg); 3258 3259 /* ..and wait for it to be granted. */ 3260 for (x = 0; x < 1000; x++) { 3261 reg = CSR_READ(sc, WMREG_EECD); 3262 if (reg & EECD_EE_GNT) 3263 break; 3264 delay(5); 3265 } 3266 if ((reg & EECD_EE_GNT) == 0) { 3267 aprint_error("%s: could not acquire EEPROM GNT\n", 3268 sc->sc_dev.dv_xname); 3269 reg &= ~EECD_EE_REQ; 3270 CSR_WRITE(sc, WMREG_EECD, reg); 3271 if (sc->sc_flags & WM_F_SWFW_SYNC) 3272 wm_put_swfw_semaphore(sc, SWFW_EEP_SM); 3273 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 3274 wm_put_swsm_semaphore(sc); 3275 return (1); 3276 } 3277 } 3278 3279 return (0); 3280 } 3281 3282 /* 3283 * wm_release_eeprom: 3284 * 3285 * Release the EEPROM mutex. 3286 */ 3287 static void 3288 wm_release_eeprom(struct wm_softc *sc) 3289 { 3290 uint32_t reg; 3291 3292 /* always success */ 3293 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0) 3294 return; 3295 3296 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) { 3297 reg = CSR_READ(sc, WMREG_EECD); 3298 reg &= ~EECD_EE_REQ; 3299 CSR_WRITE(sc, WMREG_EECD, reg); 3300 } 3301 3302 if (sc->sc_flags & WM_F_SWFW_SYNC) 3303 wm_put_swfw_semaphore(sc, SWFW_EEP_SM); 3304 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 3305 wm_put_swsm_semaphore(sc); 3306 } 3307 3308 /* 3309 * wm_eeprom_sendbits: 3310 * 3311 * Send a series of bits to the EEPROM. 3312 */ 3313 static void 3314 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits) 3315 { 3316 uint32_t reg; 3317 int x; 3318 3319 reg = CSR_READ(sc, WMREG_EECD); 3320 3321 for (x = nbits; x > 0; x--) { 3322 if (bits & (1U << (x - 1))) 3323 reg |= EECD_DI; 3324 else 3325 reg &= ~EECD_DI; 3326 CSR_WRITE(sc, WMREG_EECD, reg); 3327 delay(2); 3328 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK); 3329 delay(2); 3330 CSR_WRITE(sc, WMREG_EECD, reg); 3331 delay(2); 3332 } 3333 } 3334 3335 /* 3336 * wm_eeprom_recvbits: 3337 * 3338 * Receive a series of bits from the EEPROM. 3339 */ 3340 static void 3341 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits) 3342 { 3343 uint32_t reg, val; 3344 int x; 3345 3346 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI; 3347 3348 val = 0; 3349 for (x = nbits; x > 0; x--) { 3350 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK); 3351 delay(2); 3352 if (CSR_READ(sc, WMREG_EECD) & EECD_DO) 3353 val |= (1U << (x - 1)); 3354 CSR_WRITE(sc, WMREG_EECD, reg); 3355 delay(2); 3356 } 3357 *valp = val; 3358 } 3359 3360 /* 3361 * wm_read_eeprom_uwire: 3362 * 3363 * Read a word from the EEPROM using the MicroWire protocol. 3364 */ 3365 static int 3366 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 3367 { 3368 uint32_t reg, val; 3369 int i; 3370 3371 for (i = 0; i < wordcnt; i++) { 3372 /* Clear SK and DI. */ 3373 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI); 3374 CSR_WRITE(sc, WMREG_EECD, reg); 3375 3376 /* Set CHIP SELECT. */ 3377 reg |= EECD_CS; 3378 CSR_WRITE(sc, WMREG_EECD, reg); 3379 delay(2); 3380 3381 /* Shift in the READ command. */ 3382 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3); 3383 3384 /* Shift in address. */ 3385 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits); 3386 3387 /* Shift out the data. */ 3388 wm_eeprom_recvbits(sc, &val, 16); 3389 data[i] = val & 0xffff; 3390 3391 /* Clear CHIP SELECT. */ 3392 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS; 3393 CSR_WRITE(sc, WMREG_EECD, reg); 3394 delay(2); 3395 } 3396 3397 return (0); 3398 } 3399 3400 /* 3401 * wm_spi_eeprom_ready: 3402 * 3403 * Wait for a SPI EEPROM to be ready for commands. 3404 */ 3405 static int 3406 wm_spi_eeprom_ready(struct wm_softc *sc) 3407 { 3408 uint32_t val; 3409 int usec; 3410 3411 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) { 3412 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8); 3413 wm_eeprom_recvbits(sc, &val, 8); 3414 if ((val & SPI_SR_RDY) == 0) 3415 break; 3416 } 3417 if (usec >= SPI_MAX_RETRIES) { 3418 aprint_error("%s: EEPROM failed to become ready\n", 3419 sc->sc_dev.dv_xname); 3420 return (1); 3421 } 3422 return (0); 3423 } 3424 3425 /* 3426 * wm_read_eeprom_spi: 3427 * 3428 * Read a work from the EEPROM using the SPI protocol. 3429 */ 3430 static int 3431 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 3432 { 3433 uint32_t reg, val; 3434 int i; 3435 uint8_t opc; 3436 3437 /* Clear SK and CS. */ 3438 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS); 3439 CSR_WRITE(sc, WMREG_EECD, reg); 3440 delay(2); 3441 3442 if (wm_spi_eeprom_ready(sc)) 3443 return (1); 3444 3445 /* Toggle CS to flush commands. */ 3446 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS); 3447 delay(2); 3448 CSR_WRITE(sc, WMREG_EECD, reg); 3449 delay(2); 3450 3451 opc = SPI_OPC_READ; 3452 if (sc->sc_ee_addrbits == 8 && word >= 128) 3453 opc |= SPI_OPC_A8; 3454 3455 wm_eeprom_sendbits(sc, opc, 8); 3456 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits); 3457 3458 for (i = 0; i < wordcnt; i++) { 3459 wm_eeprom_recvbits(sc, &val, 16); 3460 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8); 3461 } 3462 3463 /* Raise CS and clear SK. */ 3464 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS; 3465 CSR_WRITE(sc, WMREG_EECD, reg); 3466 delay(2); 3467 3468 return (0); 3469 } 3470 3471 #define EEPROM_CHECKSUM 0xBABA 3472 #define EEPROM_SIZE 0x0040 3473 3474 /* 3475 * wm_validate_eeprom_checksum 3476 * 3477 * The checksum is defined as the sum of the first 64 (16 bit) words. 3478 */ 3479 static int 3480 wm_validate_eeprom_checksum(struct wm_softc *sc) 3481 { 3482 uint16_t checksum; 3483 uint16_t eeprom_data; 3484 int i; 3485 3486 checksum = 0; 3487 3488 for (i = 0; i < EEPROM_SIZE; i++) { 3489 if (wm_read_eeprom(sc, i, 1, &eeprom_data)) 3490 return 1; 3491 checksum += eeprom_data; 3492 } 3493 3494 if (checksum != (uint16_t) EEPROM_CHECKSUM) 3495 return 1; 3496 3497 return 0; 3498 } 3499 3500 /* 3501 * wm_read_eeprom: 3502 * 3503 * Read data from the serial EEPROM. 3504 */ 3505 static int 3506 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 3507 { 3508 int rv; 3509 3510 if (sc->sc_flags & WM_F_EEPROM_INVALID) 3511 return 1; 3512 3513 if (wm_acquire_eeprom(sc)) 3514 return 1; 3515 3516 if (sc->sc_flags & WM_F_EEPROM_EERDEEWR) 3517 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data); 3518 else if (sc->sc_flags & WM_F_EEPROM_SPI) 3519 rv = wm_read_eeprom_spi(sc, word, wordcnt, data); 3520 else 3521 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data); 3522 3523 wm_release_eeprom(sc); 3524 return rv; 3525 } 3526 3527 static int 3528 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt, 3529 uint16_t *data) 3530 { 3531 int i, eerd = 0; 3532 int error = 0; 3533 3534 for (i = 0; i < wordcnt; i++) { 3535 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START; 3536 3537 CSR_WRITE(sc, WMREG_EERD, eerd); 3538 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD); 3539 if (error != 0) 3540 break; 3541 3542 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT); 3543 } 3544 3545 return error; 3546 } 3547 3548 static int 3549 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw) 3550 { 3551 uint32_t attempts = 100000; 3552 uint32_t i, reg = 0; 3553 int32_t done = -1; 3554 3555 for (i = 0; i < attempts; i++) { 3556 reg = CSR_READ(sc, rw); 3557 3558 if (reg & EERD_DONE) { 3559 done = 0; 3560 break; 3561 } 3562 delay(5); 3563 } 3564 3565 return done; 3566 } 3567 3568 /* 3569 * wm_add_rxbuf: 3570 * 3571 * Add a receive buffer to the indiciated descriptor. 3572 */ 3573 static int 3574 wm_add_rxbuf(struct wm_softc *sc, int idx) 3575 { 3576 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx]; 3577 struct mbuf *m; 3578 int error; 3579 3580 MGETHDR(m, M_DONTWAIT, MT_DATA); 3581 if (m == NULL) 3582 return (ENOBUFS); 3583 3584 MCLGET(m, M_DONTWAIT); 3585 if ((m->m_flags & M_EXT) == 0) { 3586 m_freem(m); 3587 return (ENOBUFS); 3588 } 3589 3590 if (rxs->rxs_mbuf != NULL) 3591 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 3592 3593 rxs->rxs_mbuf = m; 3594 3595 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 3596 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m, 3597 BUS_DMA_READ|BUS_DMA_NOWAIT); 3598 if (error) { 3599 /* XXX XXX XXX */ 3600 printf("%s: unable to load rx DMA map %d, error = %d\n", 3601 sc->sc_dev.dv_xname, idx, error); 3602 panic("wm_add_rxbuf"); 3603 } 3604 3605 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 3606 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 3607 3608 WM_INIT_RXDESC(sc, idx); 3609 3610 return (0); 3611 } 3612 3613 /* 3614 * wm_set_ral: 3615 * 3616 * Set an entery in the receive address list. 3617 */ 3618 static void 3619 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx) 3620 { 3621 uint32_t ral_lo, ral_hi; 3622 3623 if (enaddr != NULL) { 3624 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) | 3625 (enaddr[3] << 24); 3626 ral_hi = enaddr[4] | (enaddr[5] << 8); 3627 ral_hi |= RAL_AV; 3628 } else { 3629 ral_lo = 0; 3630 ral_hi = 0; 3631 } 3632 3633 if (sc->sc_type >= WM_T_82544) { 3634 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx), 3635 ral_lo); 3636 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx), 3637 ral_hi); 3638 } else { 3639 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo); 3640 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi); 3641 } 3642 } 3643 3644 /* 3645 * wm_mchash: 3646 * 3647 * Compute the hash of the multicast address for the 4096-bit 3648 * multicast filter. 3649 */ 3650 static uint32_t 3651 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr) 3652 { 3653 static const int lo_shift[4] = { 4, 3, 2, 0 }; 3654 static const int hi_shift[4] = { 4, 5, 6, 8 }; 3655 uint32_t hash; 3656 3657 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) | 3658 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]); 3659 3660 return (hash & 0xfff); 3661 } 3662 3663 /* 3664 * wm_set_filter: 3665 * 3666 * Set up the receive filter. 3667 */ 3668 static void 3669 wm_set_filter(struct wm_softc *sc) 3670 { 3671 struct ethercom *ec = &sc->sc_ethercom; 3672 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3673 struct ether_multi *enm; 3674 struct ether_multistep step; 3675 bus_addr_t mta_reg; 3676 uint32_t hash, reg, bit; 3677 int i; 3678 3679 if (sc->sc_type >= WM_T_82544) 3680 mta_reg = WMREG_CORDOVA_MTA; 3681 else 3682 mta_reg = WMREG_MTA; 3683 3684 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE); 3685 3686 if (ifp->if_flags & IFF_BROADCAST) 3687 sc->sc_rctl |= RCTL_BAM; 3688 if (ifp->if_flags & IFF_PROMISC) { 3689 sc->sc_rctl |= RCTL_UPE; 3690 goto allmulti; 3691 } 3692 3693 /* 3694 * Set the station address in the first RAL slot, and 3695 * clear the remaining slots. 3696 */ 3697 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0); 3698 for (i = 1; i < WM_RAL_TABSIZE; i++) 3699 wm_set_ral(sc, NULL, i); 3700 3701 /* Clear out the multicast table. */ 3702 for (i = 0; i < WM_MC_TABSIZE; i++) 3703 CSR_WRITE(sc, mta_reg + (i << 2), 0); 3704 3705 ETHER_FIRST_MULTI(step, ec, enm); 3706 while (enm != NULL) { 3707 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 3708 /* 3709 * We must listen to a range of multicast addresses. 3710 * For now, just accept all multicasts, rather than 3711 * trying to set only those filter bits needed to match 3712 * the range. (At this time, the only use of address 3713 * ranges is for IP multicast routing, for which the 3714 * range is big enough to require all bits set.) 3715 */ 3716 goto allmulti; 3717 } 3718 3719 hash = wm_mchash(sc, enm->enm_addrlo); 3720 3721 reg = (hash >> 5) & 0x7f; 3722 bit = hash & 0x1f; 3723 3724 hash = CSR_READ(sc, mta_reg + (reg << 2)); 3725 hash |= 1U << bit; 3726 3727 /* XXX Hardware bug?? */ 3728 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) { 3729 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2)); 3730 CSR_WRITE(sc, mta_reg + (reg << 2), hash); 3731 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit); 3732 } else 3733 CSR_WRITE(sc, mta_reg + (reg << 2), hash); 3734 3735 ETHER_NEXT_MULTI(step, enm); 3736 } 3737 3738 ifp->if_flags &= ~IFF_ALLMULTI; 3739 goto setit; 3740 3741 allmulti: 3742 ifp->if_flags |= IFF_ALLMULTI; 3743 sc->sc_rctl |= RCTL_MPE; 3744 3745 setit: 3746 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl); 3747 } 3748 3749 /* 3750 * wm_tbi_mediainit: 3751 * 3752 * Initialize media for use on 1000BASE-X devices. 3753 */ 3754 static void 3755 wm_tbi_mediainit(struct wm_softc *sc) 3756 { 3757 const char *sep = ""; 3758 3759 if (sc->sc_type < WM_T_82543) 3760 sc->sc_tipg = TIPG_WM_DFLT; 3761 else 3762 sc->sc_tipg = TIPG_LG_DFLT; 3763 3764 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange, 3765 wm_tbi_mediastatus); 3766 3767 /* 3768 * SWD Pins: 3769 * 3770 * 0 = Link LED (output) 3771 * 1 = Loss Of Signal (input) 3772 */ 3773 sc->sc_ctrl |= CTRL_SWDPIO(0); 3774 sc->sc_ctrl &= ~CTRL_SWDPIO(1); 3775 3776 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3777 3778 #define ADD(ss, mm, dd) \ 3779 do { \ 3780 aprint_normal("%s%s", sep, ss); \ 3781 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \ 3782 sep = ", "; \ 3783 } while (/*CONSTCOND*/0) 3784 3785 aprint_normal("%s: ", sc->sc_dev.dv_xname); 3786 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD); 3787 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD); 3788 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD); 3789 aprint_normal("\n"); 3790 3791 #undef ADD 3792 3793 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 3794 } 3795 3796 /* 3797 * wm_tbi_mediastatus: [ifmedia interface function] 3798 * 3799 * Get the current interface media status on a 1000BASE-X device. 3800 */ 3801 static void 3802 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 3803 { 3804 struct wm_softc *sc = ifp->if_softc; 3805 uint32_t ctrl; 3806 3807 ifmr->ifm_status = IFM_AVALID; 3808 ifmr->ifm_active = IFM_ETHER; 3809 3810 if (sc->sc_tbi_linkup == 0) { 3811 ifmr->ifm_active |= IFM_NONE; 3812 return; 3813 } 3814 3815 ifmr->ifm_status |= IFM_ACTIVE; 3816 ifmr->ifm_active |= IFM_1000_SX; 3817 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD) 3818 ifmr->ifm_active |= IFM_FDX; 3819 ctrl = CSR_READ(sc, WMREG_CTRL); 3820 if (ctrl & CTRL_RFCE) 3821 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE; 3822 if (ctrl & CTRL_TFCE) 3823 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE; 3824 } 3825 3826 /* 3827 * wm_tbi_mediachange: [ifmedia interface function] 3828 * 3829 * Set hardware to newly-selected media on a 1000BASE-X device. 3830 */ 3831 static int 3832 wm_tbi_mediachange(struct ifnet *ifp) 3833 { 3834 struct wm_softc *sc = ifp->if_softc; 3835 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 3836 uint32_t status; 3837 int i; 3838 3839 sc->sc_txcw = ife->ifm_data; 3840 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO || 3841 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0) 3842 sc->sc_txcw |= ANAR_X_PAUSE_SYM | ANAR_X_PAUSE_ASYM; 3843 sc->sc_txcw |= TXCW_ANE; 3844 3845 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); 3846 delay(10000); 3847 3848 /* NOTE: CTRL will update TFCE and RFCE automatically. */ 3849 3850 sc->sc_tbi_anstate = 0; 3851 3852 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) { 3853 /* Have signal; wait for the link to come up. */ 3854 for (i = 0; i < 50; i++) { 3855 delay(10000); 3856 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU) 3857 break; 3858 } 3859 3860 status = CSR_READ(sc, WMREG_STATUS); 3861 if (status & STATUS_LU) { 3862 /* Link is up. */ 3863 DPRINTF(WM_DEBUG_LINK, 3864 ("%s: LINK: set media -> link up %s\n", 3865 sc->sc_dev.dv_xname, 3866 (status & STATUS_FD) ? "FDX" : "HDX")); 3867 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 3868 sc->sc_fcrtl &= ~FCRTL_XONE; 3869 if (status & STATUS_FD) 3870 sc->sc_tctl |= 3871 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 3872 else 3873 sc->sc_tctl |= 3874 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 3875 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE) 3876 sc->sc_fcrtl |= FCRTL_XONE; 3877 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 3878 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 3879 WMREG_OLD_FCRTL : WMREG_FCRTL, 3880 sc->sc_fcrtl); 3881 sc->sc_tbi_linkup = 1; 3882 } else { 3883 /* Link is down. */ 3884 DPRINTF(WM_DEBUG_LINK, 3885 ("%s: LINK: set media -> link down\n", 3886 sc->sc_dev.dv_xname)); 3887 sc->sc_tbi_linkup = 0; 3888 } 3889 } else { 3890 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n", 3891 sc->sc_dev.dv_xname)); 3892 sc->sc_tbi_linkup = 0; 3893 } 3894 3895 wm_tbi_set_linkled(sc); 3896 3897 return (0); 3898 } 3899 3900 /* 3901 * wm_tbi_set_linkled: 3902 * 3903 * Update the link LED on 1000BASE-X devices. 3904 */ 3905 static void 3906 wm_tbi_set_linkled(struct wm_softc *sc) 3907 { 3908 3909 if (sc->sc_tbi_linkup) 3910 sc->sc_ctrl |= CTRL_SWDPIN(0); 3911 else 3912 sc->sc_ctrl &= ~CTRL_SWDPIN(0); 3913 3914 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3915 } 3916 3917 /* 3918 * wm_tbi_check_link: 3919 * 3920 * Check the link on 1000BASE-X devices. 3921 */ 3922 static void 3923 wm_tbi_check_link(struct wm_softc *sc) 3924 { 3925 uint32_t rxcw, ctrl, status; 3926 3927 if (sc->sc_tbi_anstate == 0) 3928 return; 3929 else if (sc->sc_tbi_anstate > 1) { 3930 DPRINTF(WM_DEBUG_LINK, 3931 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname, 3932 sc->sc_tbi_anstate)); 3933 sc->sc_tbi_anstate--; 3934 return; 3935 } 3936 3937 sc->sc_tbi_anstate = 0; 3938 3939 rxcw = CSR_READ(sc, WMREG_RXCW); 3940 ctrl = CSR_READ(sc, WMREG_CTRL); 3941 status = CSR_READ(sc, WMREG_STATUS); 3942 3943 if ((status & STATUS_LU) == 0) { 3944 DPRINTF(WM_DEBUG_LINK, 3945 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname)); 3946 sc->sc_tbi_linkup = 0; 3947 } else { 3948 DPRINTF(WM_DEBUG_LINK, 3949 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname, 3950 (status & STATUS_FD) ? "FDX" : "HDX")); 3951 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 3952 sc->sc_fcrtl &= ~FCRTL_XONE; 3953 if (status & STATUS_FD) 3954 sc->sc_tctl |= 3955 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 3956 else 3957 sc->sc_tctl |= 3958 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 3959 if (ctrl & CTRL_TFCE) 3960 sc->sc_fcrtl |= FCRTL_XONE; 3961 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 3962 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 3963 WMREG_OLD_FCRTL : WMREG_FCRTL, 3964 sc->sc_fcrtl); 3965 sc->sc_tbi_linkup = 1; 3966 } 3967 3968 wm_tbi_set_linkled(sc); 3969 } 3970 3971 /* 3972 * wm_gmii_reset: 3973 * 3974 * Reset the PHY. 3975 */ 3976 static void 3977 wm_gmii_reset(struct wm_softc *sc) 3978 { 3979 uint32_t reg; 3980 int func = 0; /* XXX gcc */ 3981 3982 if (sc->sc_type >= WM_T_80003) { 3983 func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1; 3984 if (wm_get_swfw_semaphore(sc, 3985 func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) 3986 return; 3987 } 3988 if (sc->sc_type >= WM_T_82544) { 3989 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 3990 delay(20000); 3991 3992 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3993 delay(20000); 3994 } else { 3995 /* The PHY reset pin is active-low. */ 3996 reg = CSR_READ(sc, WMREG_CTRL_EXT); 3997 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) | 3998 CTRL_EXT_SWDPIN(4)); 3999 reg |= CTRL_EXT_SWDPIO(4); 4000 4001 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4)); 4002 delay(10); 4003 4004 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 4005 delay(10); 4006 4007 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4)); 4008 delay(10); 4009 #if 0 4010 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4); 4011 #endif 4012 } 4013 if (sc->sc_type >= WM_T_80003) 4014 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM); 4015 } 4016 4017 /* 4018 * wm_gmii_mediainit: 4019 * 4020 * Initialize media for use on 1000BASE-T devices. 4021 */ 4022 static void 4023 wm_gmii_mediainit(struct wm_softc *sc) 4024 { 4025 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 4026 4027 /* We have MII. */ 4028 sc->sc_flags |= WM_F_HAS_MII; 4029 4030 if (sc->sc_type >= WM_T_80003) 4031 sc->sc_tipg = TIPG_1000T_80003_DFLT; 4032 else 4033 sc->sc_tipg = TIPG_1000T_DFLT; 4034 4035 /* 4036 * Let the chip set speed/duplex on its own based on 4037 * signals from the PHY. 4038 * XXXbouyer - I'm not sure this is right for the 80003, 4039 * the em driver only sets CTRL_SLU here - but it seems to work. 4040 */ 4041 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE; 4042 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4043 4044 /* Initialize our media structures and probe the GMII. */ 4045 sc->sc_mii.mii_ifp = ifp; 4046 4047 if (sc->sc_type >= WM_T_80003) { 4048 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg; 4049 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg; 4050 } else if (sc->sc_type >= WM_T_82544) { 4051 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg; 4052 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg; 4053 } else { 4054 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg; 4055 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg; 4056 } 4057 sc->sc_mii.mii_statchg = wm_gmii_statchg; 4058 4059 wm_gmii_reset(sc); 4060 4061 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange, 4062 wm_gmii_mediastatus); 4063 4064 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 4065 MII_OFFSET_ANY, MIIF_DOPAUSE); 4066 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 4067 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 4068 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 4069 } else 4070 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 4071 } 4072 4073 /* 4074 * wm_gmii_mediastatus: [ifmedia interface function] 4075 * 4076 * Get the current interface media status on a 1000BASE-T device. 4077 */ 4078 static void 4079 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 4080 { 4081 struct wm_softc *sc = ifp->if_softc; 4082 4083 mii_pollstat(&sc->sc_mii); 4084 ifmr->ifm_status = sc->sc_mii.mii_media_status; 4085 ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) | 4086 sc->sc_flowflags; 4087 } 4088 4089 /* 4090 * wm_gmii_mediachange: [ifmedia interface function] 4091 * 4092 * Set hardware to newly-selected media on a 1000BASE-T device. 4093 */ 4094 static int 4095 wm_gmii_mediachange(struct ifnet *ifp) 4096 { 4097 struct wm_softc *sc = ifp->if_softc; 4098 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 4099 4100 if (ifp->if_flags & IFF_UP) { 4101 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); 4102 sc->sc_ctrl |= CTRL_SLU; 4103 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 4104 sc->sc_ctrl |= CTRL_ASDE; 4105 } else { 4106 sc->sc_ctrl &= ~CTRL_ASDE; 4107 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; 4108 if (ife->ifm_media & IFM_FDX) 4109 sc->sc_ctrl |= CTRL_FD; 4110 switch(IFM_SUBTYPE(ife->ifm_media)) { 4111 case IFM_10_T: 4112 sc->sc_ctrl |= CTRL_SPEED_10; 4113 break; 4114 case IFM_100_TX: 4115 sc->sc_ctrl |= CTRL_SPEED_100; 4116 break; 4117 case IFM_1000_T: 4118 sc->sc_ctrl |= CTRL_SPEED_1000; 4119 break; 4120 default: 4121 panic("wm_gmii_mediachange: bad media 0x%x", 4122 ife->ifm_media); 4123 } 4124 } 4125 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4126 mii_mediachg(&sc->sc_mii); 4127 } 4128 return (0); 4129 } 4130 4131 #define MDI_IO CTRL_SWDPIN(2) 4132 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */ 4133 #define MDI_CLK CTRL_SWDPIN(3) 4134 4135 static void 4136 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits) 4137 { 4138 uint32_t i, v; 4139 4140 v = CSR_READ(sc, WMREG_CTRL); 4141 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 4142 v |= MDI_DIR | CTRL_SWDPIO(3); 4143 4144 for (i = 1 << (nbits - 1); i != 0; i >>= 1) { 4145 if (data & i) 4146 v |= MDI_IO; 4147 else 4148 v &= ~MDI_IO; 4149 CSR_WRITE(sc, WMREG_CTRL, v); 4150 delay(10); 4151 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 4152 delay(10); 4153 CSR_WRITE(sc, WMREG_CTRL, v); 4154 delay(10); 4155 } 4156 } 4157 4158 static uint32_t 4159 i82543_mii_recvbits(struct wm_softc *sc) 4160 { 4161 uint32_t v, i, data = 0; 4162 4163 v = CSR_READ(sc, WMREG_CTRL); 4164 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 4165 v |= CTRL_SWDPIO(3); 4166 4167 CSR_WRITE(sc, WMREG_CTRL, v); 4168 delay(10); 4169 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 4170 delay(10); 4171 CSR_WRITE(sc, WMREG_CTRL, v); 4172 delay(10); 4173 4174 for (i = 0; i < 16; i++) { 4175 data <<= 1; 4176 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 4177 delay(10); 4178 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO) 4179 data |= 1; 4180 CSR_WRITE(sc, WMREG_CTRL, v); 4181 delay(10); 4182 } 4183 4184 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 4185 delay(10); 4186 CSR_WRITE(sc, WMREG_CTRL, v); 4187 delay(10); 4188 4189 return (data); 4190 } 4191 4192 #undef MDI_IO 4193 #undef MDI_DIR 4194 #undef MDI_CLK 4195 4196 /* 4197 * wm_gmii_i82543_readreg: [mii interface function] 4198 * 4199 * Read a PHY register on the GMII (i82543 version). 4200 */ 4201 static int 4202 wm_gmii_i82543_readreg(struct device *self, int phy, int reg) 4203 { 4204 struct wm_softc *sc = (void *) self; 4205 int rv; 4206 4207 i82543_mii_sendbits(sc, 0xffffffffU, 32); 4208 i82543_mii_sendbits(sc, reg | (phy << 5) | 4209 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14); 4210 rv = i82543_mii_recvbits(sc) & 0xffff; 4211 4212 DPRINTF(WM_DEBUG_GMII, 4213 ("%s: GMII: read phy %d reg %d -> 0x%04x\n", 4214 sc->sc_dev.dv_xname, phy, reg, rv)); 4215 4216 return (rv); 4217 } 4218 4219 /* 4220 * wm_gmii_i82543_writereg: [mii interface function] 4221 * 4222 * Write a PHY register on the GMII (i82543 version). 4223 */ 4224 static void 4225 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val) 4226 { 4227 struct wm_softc *sc = (void *) self; 4228 4229 i82543_mii_sendbits(sc, 0xffffffffU, 32); 4230 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) | 4231 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) | 4232 (MII_COMMAND_START << 30), 32); 4233 } 4234 4235 /* 4236 * wm_gmii_i82544_readreg: [mii interface function] 4237 * 4238 * Read a PHY register on the GMII. 4239 */ 4240 static int 4241 wm_gmii_i82544_readreg(struct device *self, int phy, int reg) 4242 { 4243 struct wm_softc *sc = (void *) self; 4244 uint32_t mdic = 0; 4245 int i, rv; 4246 4247 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) | 4248 MDIC_REGADD(reg)); 4249 4250 for (i = 0; i < 320; i++) { 4251 mdic = CSR_READ(sc, WMREG_MDIC); 4252 if (mdic & MDIC_READY) 4253 break; 4254 delay(10); 4255 } 4256 4257 if ((mdic & MDIC_READY) == 0) { 4258 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n", 4259 sc->sc_dev.dv_xname, phy, reg); 4260 rv = 0; 4261 } else if (mdic & MDIC_E) { 4262 #if 0 /* This is normal if no PHY is present. */ 4263 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n", 4264 sc->sc_dev.dv_xname, phy, reg); 4265 #endif 4266 rv = 0; 4267 } else { 4268 rv = MDIC_DATA(mdic); 4269 if (rv == 0xffff) 4270 rv = 0; 4271 } 4272 4273 return (rv); 4274 } 4275 4276 /* 4277 * wm_gmii_i82544_writereg: [mii interface function] 4278 * 4279 * Write a PHY register on the GMII. 4280 */ 4281 static void 4282 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val) 4283 { 4284 struct wm_softc *sc = (void *) self; 4285 uint32_t mdic = 0; 4286 int i; 4287 4288 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) | 4289 MDIC_REGADD(reg) | MDIC_DATA(val)); 4290 4291 for (i = 0; i < 320; i++) { 4292 mdic = CSR_READ(sc, WMREG_MDIC); 4293 if (mdic & MDIC_READY) 4294 break; 4295 delay(10); 4296 } 4297 4298 if ((mdic & MDIC_READY) == 0) 4299 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n", 4300 sc->sc_dev.dv_xname, phy, reg); 4301 else if (mdic & MDIC_E) 4302 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n", 4303 sc->sc_dev.dv_xname, phy, reg); 4304 } 4305 4306 /* 4307 * wm_gmii_i80003_readreg: [mii interface function] 4308 * 4309 * Read a PHY register on the kumeran 4310 * This could be handled by the PHY layer if we didn't have to lock the 4311 * ressource ... 4312 */ 4313 static int 4314 wm_gmii_i80003_readreg(struct device *self, int phy, int reg) 4315 { 4316 struct wm_softc *sc = (void *) self; 4317 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1); 4318 int rv; 4319 4320 if (phy != 1) /* only one PHY on kumeran bus */ 4321 return 0; 4322 4323 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) 4324 return 0; 4325 4326 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) { 4327 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT, 4328 reg >> GG82563_PAGE_SHIFT); 4329 } else { 4330 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT, 4331 reg >> GG82563_PAGE_SHIFT); 4332 } 4333 4334 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS); 4335 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM); 4336 return (rv); 4337 } 4338 4339 /* 4340 * wm_gmii_i80003_writereg: [mii interface function] 4341 * 4342 * Write a PHY register on the kumeran. 4343 * This could be handled by the PHY layer if we didn't have to lock the 4344 * ressource ... 4345 */ 4346 static void 4347 wm_gmii_i80003_writereg(struct device *self, int phy, int reg, int val) 4348 { 4349 struct wm_softc *sc = (void *) self; 4350 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1); 4351 4352 if (phy != 1) /* only one PHY on kumeran bus */ 4353 return; 4354 4355 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) 4356 return; 4357 4358 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) { 4359 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT, 4360 reg >> GG82563_PAGE_SHIFT); 4361 } else { 4362 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT, 4363 reg >> GG82563_PAGE_SHIFT); 4364 } 4365 4366 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val); 4367 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM); 4368 } 4369 4370 /* 4371 * wm_gmii_statchg: [mii interface function] 4372 * 4373 * Callback from MII layer when media changes. 4374 */ 4375 static void 4376 wm_gmii_statchg(struct device *self) 4377 { 4378 struct wm_softc *sc = (void *) self; 4379 struct mii_data *mii = &sc->sc_mii; 4380 4381 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE); 4382 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 4383 sc->sc_fcrtl &= ~FCRTL_XONE; 4384 4385 /* 4386 * Get flow control negotiation result. 4387 */ 4388 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 4389 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { 4390 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 4391 mii->mii_media_active &= ~IFM_ETH_FMASK; 4392 } 4393 4394 if (sc->sc_flowflags & IFM_FLOW) { 4395 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) { 4396 sc->sc_ctrl |= CTRL_TFCE; 4397 sc->sc_fcrtl |= FCRTL_XONE; 4398 } 4399 if (sc->sc_flowflags & IFM_ETH_RXPAUSE) 4400 sc->sc_ctrl |= CTRL_RFCE; 4401 } 4402 4403 if (sc->sc_mii.mii_media_active & IFM_FDX) { 4404 DPRINTF(WM_DEBUG_LINK, 4405 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname)); 4406 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 4407 } else { 4408 DPRINTF(WM_DEBUG_LINK, 4409 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname)); 4410 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 4411 } 4412 4413 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4414 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 4415 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL 4416 : WMREG_FCRTL, sc->sc_fcrtl); 4417 if (sc->sc_type >= WM_T_80003) { 4418 switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { 4419 case IFM_1000_T: 4420 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL, 4421 KUMCTRLSTA_HD_CTRL_1000_DEFAULT); 4422 sc->sc_tipg = TIPG_1000T_80003_DFLT; 4423 break; 4424 default: 4425 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL, 4426 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT); 4427 sc->sc_tipg = TIPG_10_100_80003_DFLT; 4428 break; 4429 } 4430 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); 4431 } 4432 } 4433 4434 /* 4435 * wm_kmrn_i80003_readreg: 4436 * 4437 * Read a kumeran register 4438 */ 4439 static int 4440 wm_kmrn_i80003_readreg(struct wm_softc *sc, int reg) 4441 { 4442 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1); 4443 int rv; 4444 4445 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) 4446 return 0; 4447 4448 CSR_WRITE(sc, WMREG_KUMCTRLSTA, 4449 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | 4450 KUMCTRLSTA_REN); 4451 delay(2); 4452 4453 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK; 4454 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM); 4455 return (rv); 4456 } 4457 4458 /* 4459 * wm_kmrn_i80003_writereg: 4460 * 4461 * Write a kumeran register 4462 */ 4463 static void 4464 wm_kmrn_i80003_writereg(struct wm_softc *sc, int reg, int val) 4465 { 4466 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1); 4467 4468 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) 4469 return; 4470 4471 CSR_WRITE(sc, WMREG_KUMCTRLSTA, 4472 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | 4473 (val & KUMCTRLSTA_MASK)); 4474 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM); 4475 } 4476 4477 static int 4478 wm_is_onboard_nvm_eeprom(struct wm_softc *sc) 4479 { 4480 uint32_t eecd = 0; 4481 4482 if (sc->sc_type == WM_T_82573) { 4483 eecd = CSR_READ(sc, WMREG_EECD); 4484 4485 /* Isolate bits 15 & 16 */ 4486 eecd = ((eecd >> 15) & 0x03); 4487 4488 /* If both bits are set, device is Flash type */ 4489 if (eecd == 0x03) { 4490 return 0; 4491 } 4492 } 4493 return 1; 4494 } 4495 4496 static int 4497 wm_get_swsm_semaphore(struct wm_softc *sc) 4498 { 4499 int32_t timeout; 4500 uint32_t swsm; 4501 4502 /* Get the FW semaphore. */ 4503 timeout = 1000 + 1; /* XXX */ 4504 while (timeout) { 4505 swsm = CSR_READ(sc, WMREG_SWSM); 4506 swsm |= SWSM_SWESMBI; 4507 CSR_WRITE(sc, WMREG_SWSM, swsm); 4508 /* if we managed to set the bit we got the semaphore. */ 4509 swsm = CSR_READ(sc, WMREG_SWSM); 4510 if (swsm & SWSM_SWESMBI) 4511 break; 4512 4513 delay(50); 4514 timeout--; 4515 } 4516 4517 if (timeout == 0) { 4518 aprint_error("%s: could not acquire EEPROM GNT\n", 4519 sc->sc_dev.dv_xname); 4520 /* Release semaphores */ 4521 wm_put_swsm_semaphore(sc); 4522 return 1; 4523 } 4524 return 0; 4525 } 4526 4527 static void 4528 wm_put_swsm_semaphore(struct wm_softc *sc) 4529 { 4530 uint32_t swsm; 4531 4532 swsm = CSR_READ(sc, WMREG_SWSM); 4533 swsm &= ~(SWSM_SWESMBI); 4534 CSR_WRITE(sc, WMREG_SWSM, swsm); 4535 } 4536 4537 static int 4538 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask) { 4539 uint32_t swfw_sync; 4540 uint32_t swmask = mask << SWFW_SOFT_SHIFT; 4541 uint32_t fwmask = mask << SWFW_FIRM_SHIFT; 4542 int timeout = 200; 4543 4544 for(timeout = 0; timeout < 200; timeout++) { 4545 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) { 4546 if (wm_get_swsm_semaphore(sc)) 4547 return 1; 4548 } 4549 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC); 4550 if ((swfw_sync & (swmask | fwmask)) == 0) { 4551 swfw_sync |= swmask; 4552 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync); 4553 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 4554 wm_put_swsm_semaphore(sc); 4555 return 0; 4556 } 4557 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 4558 wm_put_swsm_semaphore(sc); 4559 delay(5000); 4560 } 4561 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n", 4562 sc->sc_dev.dv_xname, mask, swfw_sync); 4563 return 1; 4564 } 4565 4566 static void 4567 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask) { 4568 uint32_t swfw_sync; 4569 4570 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) { 4571 while (wm_get_swsm_semaphore(sc) != 0) 4572 continue; 4573 } 4574 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC); 4575 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT); 4576 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync); 4577 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 4578 wm_put_swsm_semaphore(sc); 4579 } 4580