1 /* $NetBSD: if_wm.c,v 1.264 2013/09/13 21:22:10 martin Exp $ */ 2 3 /* 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /******************************************************************************* 39 40 Copyright (c) 2001-2005, Intel Corporation 41 All rights reserved. 42 43 Redistribution and use in source and binary forms, with or without 44 modification, are permitted provided that the following conditions are met: 45 46 1. Redistributions of source code must retain the above copyright notice, 47 this list of conditions and the following disclaimer. 48 49 2. Redistributions in binary form must reproduce the above copyright 50 notice, this list of conditions and the following disclaimer in the 51 documentation and/or other materials provided with the distribution. 52 53 3. Neither the name of the Intel Corporation nor the names of its 54 contributors may be used to endorse or promote products derived from 55 this software without specific prior written permission. 56 57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 67 POSSIBILITY OF SUCH DAMAGE. 68 69 *******************************************************************************/ 70 /* 71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips. 72 * 73 * TODO (in order of importance): 74 * 75 * - Rework how parameters are loaded from the EEPROM. 76 */ 77 78 #include <sys/cdefs.h> 79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.264 2013/09/13 21:22:10 martin Exp $"); 80 81 #include <sys/param.h> 82 #include <sys/systm.h> 83 #include <sys/callout.h> 84 #include <sys/mbuf.h> 85 #include <sys/malloc.h> 86 #include <sys/kernel.h> 87 #include <sys/socket.h> 88 #include <sys/ioctl.h> 89 #include <sys/errno.h> 90 #include <sys/device.h> 91 #include <sys/queue.h> 92 #include <sys/syslog.h> 93 94 #include <sys/rnd.h> 95 96 #include <net/if.h> 97 #include <net/if_dl.h> 98 #include <net/if_media.h> 99 #include <net/if_ether.h> 100 101 #include <net/bpf.h> 102 103 #include <netinet/in.h> /* XXX for struct ip */ 104 #include <netinet/in_systm.h> /* XXX for struct ip */ 105 #include <netinet/ip.h> /* XXX for struct ip */ 106 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */ 107 #include <netinet/tcp.h> /* XXX for struct tcphdr */ 108 109 #include <sys/bus.h> 110 #include <sys/intr.h> 111 #include <machine/endian.h> 112 113 #include <dev/mii/mii.h> 114 #include <dev/mii/miivar.h> 115 #include <dev/mii/miidevs.h> 116 #include <dev/mii/mii_bitbang.h> 117 #include <dev/mii/ikphyreg.h> 118 #include <dev/mii/igphyreg.h> 119 #include <dev/mii/igphyvar.h> 120 #include <dev/mii/inbmphyreg.h> 121 122 #include <dev/pci/pcireg.h> 123 #include <dev/pci/pcivar.h> 124 #include <dev/pci/pcidevs.h> 125 126 #include <dev/pci/if_wmreg.h> 127 #include <dev/pci/if_wmvar.h> 128 129 #ifdef WM_DEBUG 130 #define WM_DEBUG_LINK 0x01 131 #define WM_DEBUG_TX 0x02 132 #define WM_DEBUG_RX 0x04 133 #define WM_DEBUG_GMII 0x08 134 #define WM_DEBUG_MANAGE 0x10 135 #define WM_DEBUG_NVM 0x20 136 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII 137 | WM_DEBUG_MANAGE | WM_DEBUG_NVM; 138 139 #define DPRINTF(x, y) if (wm_debug & (x)) printf y 140 #else 141 #define DPRINTF(x, y) /* nothing */ 142 #endif /* WM_DEBUG */ 143 144 /* 145 * Transmit descriptor list size. Due to errata, we can only have 146 * 256 hardware descriptors in the ring on < 82544, but we use 4096 147 * on >= 82544. We tell the upper layers that they can queue a lot 148 * of packets, and we go ahead and manage up to 64 (16 for the i82547) 149 * of them at a time. 150 * 151 * We allow up to 256 (!) DMA segments per packet. Pathological packet 152 * chains containing many small mbufs have been observed in zero-copy 153 * situations with jumbo frames. 154 */ 155 #define WM_NTXSEGS 256 156 #define WM_IFQUEUELEN 256 157 #define WM_TXQUEUELEN_MAX 64 158 #define WM_TXQUEUELEN_MAX_82547 16 159 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum) 160 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1) 161 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8) 162 #define WM_NTXDESC_82542 256 163 #define WM_NTXDESC_82544 4096 164 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc) 165 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1) 166 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t)) 167 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc)) 168 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc)) 169 170 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */ 171 172 /* 173 * Receive descriptor list size. We have one Rx buffer for normal 174 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized 175 * packet. We allocate 256 receive descriptors, each with a 2k 176 * buffer (MCLBYTES), which gives us room for 50 jumbo packets. 177 */ 178 #define WM_NRXDESC 256 179 #define WM_NRXDESC_MASK (WM_NRXDESC - 1) 180 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) 181 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) 182 183 /* 184 * Control structures are DMA'd to the i82542 chip. We allocate them in 185 * a single clump that maps to a single DMA segment to make several things 186 * easier. 187 */ 188 struct wm_control_data_82544 { 189 /* 190 * The receive descriptors. 191 */ 192 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; 193 194 /* 195 * The transmit descriptors. Put these at the end, because 196 * we might use a smaller number of them. 197 */ 198 union { 199 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544]; 200 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544]; 201 } wdc_u; 202 }; 203 204 struct wm_control_data_82542 { 205 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; 206 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542]; 207 }; 208 209 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x) 210 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)]) 211 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)]) 212 213 /* 214 * Software state for transmit jobs. 215 */ 216 struct wm_txsoft { 217 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 218 bus_dmamap_t txs_dmamap; /* our DMA map */ 219 int txs_firstdesc; /* first descriptor in packet */ 220 int txs_lastdesc; /* last descriptor in packet */ 221 int txs_ndesc; /* # of descriptors used */ 222 }; 223 224 /* 225 * Software state for receive buffers. Each descriptor gets a 226 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill 227 * more than one buffer, we chain them together. 228 */ 229 struct wm_rxsoft { 230 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 231 bus_dmamap_t rxs_dmamap; /* our DMA map */ 232 }; 233 234 #define WM_LINKUP_TIMEOUT 50 235 236 static uint16_t swfwphysem[] = { 237 SWFW_PHY0_SM, 238 SWFW_PHY1_SM, 239 SWFW_PHY2_SM, 240 SWFW_PHY3_SM 241 }; 242 243 /* 244 * Software state per device. 245 */ 246 struct wm_softc { 247 device_t sc_dev; /* generic device information */ 248 bus_space_tag_t sc_st; /* bus space tag */ 249 bus_space_handle_t sc_sh; /* bus space handle */ 250 bus_size_t sc_ss; /* bus space size */ 251 bus_space_tag_t sc_iot; /* I/O space tag */ 252 bus_space_handle_t sc_ioh; /* I/O space handle */ 253 bus_size_t sc_ios; /* I/O space size */ 254 bus_space_tag_t sc_flasht; /* flash registers space tag */ 255 bus_space_handle_t sc_flashh; /* flash registers space handle */ 256 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 257 258 struct ethercom sc_ethercom; /* ethernet common data */ 259 struct mii_data sc_mii; /* MII/media information */ 260 261 pci_chipset_tag_t sc_pc; 262 pcitag_t sc_pcitag; 263 int sc_bus_speed; /* PCI/PCIX bus speed */ 264 int sc_pcixe_capoff; /* PCI[Xe] capability register offset */ 265 266 const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */ 267 wm_chip_type sc_type; /* MAC type */ 268 int sc_rev; /* MAC revision */ 269 wm_phy_type sc_phytype; /* PHY type */ 270 int sc_funcid; /* unit number of the chip (0 to 3) */ 271 int sc_flags; /* flags; see below */ 272 int sc_if_flags; /* last if_flags */ 273 int sc_flowflags; /* 802.3x flow control flags */ 274 int sc_align_tweak; 275 276 void *sc_ih; /* interrupt cookie */ 277 callout_t sc_tick_ch; /* tick callout */ 278 279 int sc_ee_addrbits; /* EEPROM address bits */ 280 int sc_ich8_flash_base; 281 int sc_ich8_flash_bank_size; 282 int sc_nvm_k1_enabled; 283 284 /* 285 * Software state for the transmit and receive descriptors. 286 */ 287 int sc_txnum; /* must be a power of two */ 288 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX]; 289 struct wm_rxsoft sc_rxsoft[WM_NRXDESC]; 290 291 /* 292 * Control data structures. 293 */ 294 int sc_ntxdesc; /* must be a power of two */ 295 struct wm_control_data_82544 *sc_control_data; 296 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 297 bus_dma_segment_t sc_cd_seg; /* control data segment */ 298 int sc_cd_rseg; /* real number of control segment */ 299 size_t sc_cd_size; /* control data size */ 300 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 301 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs 302 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs 303 #define sc_rxdescs sc_control_data->wcd_rxdescs 304 305 #ifdef WM_EVENT_COUNTERS 306 /* Event counters. */ 307 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */ 308 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */ 309 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */ 310 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */ 311 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */ 312 struct evcnt sc_ev_rxintr; /* Rx interrupts */ 313 struct evcnt sc_ev_linkintr; /* Link interrupts */ 314 315 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */ 316 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */ 317 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */ 318 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */ 319 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */ 320 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */ 321 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */ 322 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */ 323 324 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ 325 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */ 326 327 struct evcnt sc_ev_tu; /* Tx underrun */ 328 329 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ 330 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ 331 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ 332 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ 333 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ 334 #endif /* WM_EVENT_COUNTERS */ 335 336 bus_addr_t sc_tdt_reg; /* offset of TDT register */ 337 338 int sc_txfree; /* number of free Tx descriptors */ 339 int sc_txnext; /* next ready Tx descriptor */ 340 341 int sc_txsfree; /* number of free Tx jobs */ 342 int sc_txsnext; /* next free Tx job */ 343 int sc_txsdirty; /* dirty Tx jobs */ 344 345 /* These 5 variables are used only on the 82547. */ 346 int sc_txfifo_size; /* Tx FIFO size */ 347 int sc_txfifo_head; /* current head of FIFO */ 348 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */ 349 int sc_txfifo_stall; /* Tx FIFO is stalled */ 350 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */ 351 352 bus_addr_t sc_rdt_reg; /* offset of RDT register */ 353 354 int sc_rxptr; /* next ready Rx descriptor/queue ent */ 355 int sc_rxdiscard; 356 int sc_rxlen; 357 struct mbuf *sc_rxhead; 358 struct mbuf *sc_rxtail; 359 struct mbuf **sc_rxtailp; 360 361 uint32_t sc_ctrl; /* prototype CTRL register */ 362 #if 0 363 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ 364 #endif 365 uint32_t sc_icr; /* prototype interrupt bits */ 366 uint32_t sc_itr; /* prototype intr throttling reg */ 367 uint32_t sc_tctl; /* prototype TCTL register */ 368 uint32_t sc_rctl; /* prototype RCTL register */ 369 uint32_t sc_txcw; /* prototype TXCW register */ 370 uint32_t sc_tipg; /* prototype TIPG register */ 371 uint32_t sc_fcrtl; /* prototype FCRTL register */ 372 uint32_t sc_pba; /* prototype PBA register */ 373 374 int sc_tbi_linkup; /* TBI link status */ 375 int sc_tbi_anegticks; /* autonegotiation ticks */ 376 int sc_tbi_ticks; /* tbi ticks */ 377 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */ 378 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */ 379 380 int sc_mchash_type; /* multicast filter offset */ 381 382 krndsource_t rnd_source; /* random source */ 383 }; 384 385 #define WM_RXCHAIN_RESET(sc) \ 386 do { \ 387 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \ 388 *(sc)->sc_rxtailp = NULL; \ 389 (sc)->sc_rxlen = 0; \ 390 } while (/*CONSTCOND*/0) 391 392 #define WM_RXCHAIN_LINK(sc, m) \ 393 do { \ 394 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \ 395 (sc)->sc_rxtailp = &(m)->m_next; \ 396 } while (/*CONSTCOND*/0) 397 398 #ifdef WM_EVENT_COUNTERS 399 #define WM_EVCNT_INCR(ev) (ev)->ev_count++ 400 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val) 401 #else 402 #define WM_EVCNT_INCR(ev) /* nothing */ 403 #define WM_EVCNT_ADD(ev, val) /* nothing */ 404 #endif 405 406 #define CSR_READ(sc, reg) \ 407 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) 408 #define CSR_WRITE(sc, reg, val) \ 409 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) 410 #define CSR_WRITE_FLUSH(sc) \ 411 (void) CSR_READ((sc), WMREG_STATUS) 412 413 #define ICH8_FLASH_READ32(sc, reg) \ 414 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg)) 415 #define ICH8_FLASH_WRITE32(sc, reg, data) \ 416 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) 417 418 #define ICH8_FLASH_READ16(sc, reg) \ 419 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg)) 420 #define ICH8_FLASH_WRITE16(sc, reg, data) \ 421 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) 422 423 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x))) 424 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x))) 425 426 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU) 427 #define WM_CDTXADDR_HI(sc, x) \ 428 (sizeof(bus_addr_t) == 8 ? \ 429 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0) 430 431 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU) 432 #define WM_CDRXADDR_HI(sc, x) \ 433 (sizeof(bus_addr_t) == 8 ? \ 434 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0) 435 436 #define WM_CDTXSYNC(sc, x, n, ops) \ 437 do { \ 438 int __x, __n; \ 439 \ 440 __x = (x); \ 441 __n = (n); \ 442 \ 443 /* If it will wrap around, sync to the end of the ring. */ \ 444 if ((__x + __n) > WM_NTXDESC(sc)) { \ 445 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 446 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \ 447 (WM_NTXDESC(sc) - __x), (ops)); \ 448 __n -= (WM_NTXDESC(sc) - __x); \ 449 __x = 0; \ 450 } \ 451 \ 452 /* Now sync whatever is left. */ \ 453 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 454 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \ 455 } while (/*CONSTCOND*/0) 456 457 #define WM_CDRXSYNC(sc, x, ops) \ 458 do { \ 459 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 460 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \ 461 } while (/*CONSTCOND*/0) 462 463 #define WM_INIT_RXDESC(sc, x) \ 464 do { \ 465 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 466 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \ 467 struct mbuf *__m = __rxs->rxs_mbuf; \ 468 \ 469 /* \ 470 * Note: We scoot the packet forward 2 bytes in the buffer \ 471 * so that the payload after the Ethernet header is aligned \ 472 * to a 4-byte boundary. \ 473 * \ 474 * XXX BRAINDAMAGE ALERT! \ 475 * The stupid chip uses the same size for every buffer, which \ 476 * is set in the Receive Control register. We are using the 2K \ 477 * size option, but what we REALLY want is (2K - 2)! For this \ 478 * reason, we can't "scoot" packets longer than the standard \ 479 * Ethernet MTU. On strict-alignment platforms, if the total \ 480 * size exceeds (2K - 2) we set align_tweak to 0 and let \ 481 * the upper layer copy the headers. \ 482 */ \ 483 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \ 484 \ 485 wm_set_dma_addr(&__rxd->wrx_addr, \ 486 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \ 487 __rxd->wrx_len = 0; \ 488 __rxd->wrx_cksum = 0; \ 489 __rxd->wrx_status = 0; \ 490 __rxd->wrx_errors = 0; \ 491 __rxd->wrx_special = 0; \ 492 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 493 \ 494 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \ 495 } while (/*CONSTCOND*/0) 496 497 static void wm_start(struct ifnet *); 498 static void wm_nq_start(struct ifnet *); 499 static void wm_watchdog(struct ifnet *); 500 static int wm_ifflags_cb(struct ethercom *); 501 static int wm_ioctl(struct ifnet *, u_long, void *); 502 static int wm_init(struct ifnet *); 503 static void wm_stop(struct ifnet *, int); 504 static bool wm_suspend(device_t, const pmf_qual_t *); 505 static bool wm_resume(device_t, const pmf_qual_t *); 506 507 static void wm_reset(struct wm_softc *); 508 static void wm_rxdrain(struct wm_softc *); 509 static int wm_add_rxbuf(struct wm_softc *, int); 510 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *); 511 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *); 512 static int wm_validate_eeprom_checksum(struct wm_softc *); 513 static int wm_check_alt_mac_addr(struct wm_softc *); 514 static int wm_read_mac_addr(struct wm_softc *, uint8_t *); 515 static void wm_tick(void *); 516 517 static void wm_set_filter(struct wm_softc *); 518 static void wm_set_vlan(struct wm_softc *); 519 520 static int wm_intr(void *); 521 static void wm_txintr(struct wm_softc *); 522 static void wm_rxintr(struct wm_softc *); 523 static void wm_linkintr(struct wm_softc *, uint32_t); 524 525 static void wm_tbi_mediainit(struct wm_softc *); 526 static int wm_tbi_mediachange(struct ifnet *); 527 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); 528 529 static void wm_tbi_set_linkled(struct wm_softc *); 530 static void wm_tbi_check_link(struct wm_softc *); 531 532 static void wm_gmii_reset(struct wm_softc *); 533 534 static int wm_gmii_i82543_readreg(device_t, int, int); 535 static void wm_gmii_i82543_writereg(device_t, int, int, int); 536 static int wm_gmii_i82544_readreg(device_t, int, int); 537 static void wm_gmii_i82544_writereg(device_t, int, int, int); 538 static int wm_gmii_i80003_readreg(device_t, int, int); 539 static void wm_gmii_i80003_writereg(device_t, int, int, int); 540 static int wm_gmii_bm_readreg(device_t, int, int); 541 static void wm_gmii_bm_writereg(device_t, int, int, int); 542 static int wm_gmii_hv_readreg(device_t, int, int); 543 static void wm_gmii_hv_writereg(device_t, int, int, int); 544 static int wm_gmii_82580_readreg(device_t, int, int); 545 static void wm_gmii_82580_writereg(device_t, int, int, int); 546 static int wm_sgmii_readreg(device_t, int, int); 547 static void wm_sgmii_writereg(device_t, int, int, int); 548 549 static void wm_gmii_statchg(struct ifnet *); 550 551 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t); 552 static int wm_gmii_mediachange(struct ifnet *); 553 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); 554 555 static int wm_kmrn_readreg(struct wm_softc *, int); 556 static void wm_kmrn_writereg(struct wm_softc *, int, int); 557 558 static void wm_set_spiaddrbits(struct wm_softc *); 559 static int wm_match(device_t, cfdata_t, void *); 560 static void wm_attach(device_t, device_t, void *); 561 static int wm_detach(device_t, int); 562 static int wm_is_onboard_nvm_eeprom(struct wm_softc *); 563 static void wm_get_auto_rd_done(struct wm_softc *); 564 static void wm_lan_init_done(struct wm_softc *); 565 static void wm_get_cfg_done(struct wm_softc *); 566 static int wm_get_swsm_semaphore(struct wm_softc *); 567 static void wm_put_swsm_semaphore(struct wm_softc *); 568 static int wm_poll_eerd_eewr_done(struct wm_softc *, int); 569 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t); 570 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t); 571 static int wm_get_swfwhw_semaphore(struct wm_softc *); 572 static void wm_put_swfwhw_semaphore(struct wm_softc *); 573 static int wm_get_hw_semaphore_82573(struct wm_softc *); 574 static void wm_put_hw_semaphore_82573(struct wm_softc *); 575 576 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *); 577 static int32_t wm_ich8_cycle_init(struct wm_softc *); 578 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t); 579 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, 580 uint32_t, uint16_t *); 581 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *); 582 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *); 583 static void wm_82547_txfifo_stall(void *); 584 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int); 585 static int wm_check_mng_mode(struct wm_softc *); 586 static int wm_check_mng_mode_ich8lan(struct wm_softc *); 587 static int wm_check_mng_mode_82574(struct wm_softc *); 588 static int wm_check_mng_mode_generic(struct wm_softc *); 589 static int wm_enable_mng_pass_thru(struct wm_softc *); 590 static int wm_check_reset_block(struct wm_softc *); 591 static void wm_get_hw_control(struct wm_softc *); 592 static int wm_check_for_link(struct wm_softc *); 593 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *); 594 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *); 595 #ifdef WM_WOL 596 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *); 597 #endif 598 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *); 599 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *); 600 static void wm_k1_gig_workaround_hv(struct wm_softc *, int); 601 static void wm_set_mdio_slow_mode_hv(struct wm_softc *); 602 static void wm_configure_k1_ich8lan(struct wm_softc *, int); 603 static void wm_smbustopci(struct wm_softc *); 604 static void wm_set_pcie_completion_timeout(struct wm_softc *); 605 static void wm_reset_init_script_82575(struct wm_softc *); 606 static void wm_release_manageability(struct wm_softc *); 607 static void wm_release_hw_control(struct wm_softc *); 608 static void wm_get_wakeup(struct wm_softc *); 609 #ifdef WM_WOL 610 static void wm_enable_phy_wakeup(struct wm_softc *); 611 static void wm_enable_wakeup(struct wm_softc *); 612 #endif 613 static void wm_init_manageability(struct wm_softc *); 614 static void wm_set_eee_i350(struct wm_softc *); 615 616 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc), 617 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 618 619 /* 620 * Devices supported by this driver. 621 */ 622 static const struct wm_product { 623 pci_vendor_id_t wmp_vendor; 624 pci_product_id_t wmp_product; 625 const char *wmp_name; 626 wm_chip_type wmp_type; 627 int wmp_flags; 628 #define WMP_F_1000X 0x01 629 #define WMP_F_1000T 0x02 630 #define WMP_F_SERDES 0x04 631 } wm_products[] = { 632 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, 633 "Intel i82542 1000BASE-X Ethernet", 634 WM_T_82542_2_1, WMP_F_1000X }, 635 636 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, 637 "Intel i82543GC 1000BASE-X Ethernet", 638 WM_T_82543, WMP_F_1000X }, 639 640 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, 641 "Intel i82543GC 1000BASE-T Ethernet", 642 WM_T_82543, WMP_F_1000T }, 643 644 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, 645 "Intel i82544EI 1000BASE-T Ethernet", 646 WM_T_82544, WMP_F_1000T }, 647 648 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, 649 "Intel i82544EI 1000BASE-X Ethernet", 650 WM_T_82544, WMP_F_1000X }, 651 652 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, 653 "Intel i82544GC 1000BASE-T Ethernet", 654 WM_T_82544, WMP_F_1000T }, 655 656 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, 657 "Intel i82544GC (LOM) 1000BASE-T Ethernet", 658 WM_T_82544, WMP_F_1000T }, 659 660 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, 661 "Intel i82540EM 1000BASE-T Ethernet", 662 WM_T_82540, WMP_F_1000T }, 663 664 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM, 665 "Intel i82540EM (LOM) 1000BASE-T Ethernet", 666 WM_T_82540, WMP_F_1000T }, 667 668 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM, 669 "Intel i82540EP 1000BASE-T Ethernet", 670 WM_T_82540, WMP_F_1000T }, 671 672 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP, 673 "Intel i82540EP 1000BASE-T Ethernet", 674 WM_T_82540, WMP_F_1000T }, 675 676 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP, 677 "Intel i82540EP 1000BASE-T Ethernet", 678 WM_T_82540, WMP_F_1000T }, 679 680 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER, 681 "Intel i82545EM 1000BASE-T Ethernet", 682 WM_T_82545, WMP_F_1000T }, 683 684 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER, 685 "Intel i82545GM 1000BASE-T Ethernet", 686 WM_T_82545_3, WMP_F_1000T }, 687 688 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER, 689 "Intel i82545GM 1000BASE-X Ethernet", 690 WM_T_82545_3, WMP_F_1000X }, 691 #if 0 692 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES, 693 "Intel i82545GM Gigabit Ethernet (SERDES)", 694 WM_T_82545_3, WMP_F_SERDES }, 695 #endif 696 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER, 697 "Intel i82546EB 1000BASE-T Ethernet", 698 WM_T_82546, WMP_F_1000T }, 699 700 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD, 701 "Intel i82546EB 1000BASE-T Ethernet", 702 WM_T_82546, WMP_F_1000T }, 703 704 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER, 705 "Intel i82545EM 1000BASE-X Ethernet", 706 WM_T_82545, WMP_F_1000X }, 707 708 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER, 709 "Intel i82546EB 1000BASE-X Ethernet", 710 WM_T_82546, WMP_F_1000X }, 711 712 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER, 713 "Intel i82546GB 1000BASE-T Ethernet", 714 WM_T_82546_3, WMP_F_1000T }, 715 716 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER, 717 "Intel i82546GB 1000BASE-X Ethernet", 718 WM_T_82546_3, WMP_F_1000X }, 719 #if 0 720 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES, 721 "Intel i82546GB Gigabit Ethernet (SERDES)", 722 WM_T_82546_3, WMP_F_SERDES }, 723 #endif 724 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER, 725 "i82546GB quad-port Gigabit Ethernet", 726 WM_T_82546_3, WMP_F_1000T }, 727 728 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3, 729 "i82546GB quad-port Gigabit Ethernet (KSP3)", 730 WM_T_82546_3, WMP_F_1000T }, 731 732 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE, 733 "Intel PRO/1000MT (82546GB)", 734 WM_T_82546_3, WMP_F_1000T }, 735 736 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI, 737 "Intel i82541EI 1000BASE-T Ethernet", 738 WM_T_82541, WMP_F_1000T }, 739 740 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM, 741 "Intel i82541ER (LOM) 1000BASE-T Ethernet", 742 WM_T_82541, WMP_F_1000T }, 743 744 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE, 745 "Intel i82541EI Mobile 1000BASE-T Ethernet", 746 WM_T_82541, WMP_F_1000T }, 747 748 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER, 749 "Intel i82541ER 1000BASE-T Ethernet", 750 WM_T_82541_2, WMP_F_1000T }, 751 752 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI, 753 "Intel i82541GI 1000BASE-T Ethernet", 754 WM_T_82541_2, WMP_F_1000T }, 755 756 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE, 757 "Intel i82541GI Mobile 1000BASE-T Ethernet", 758 WM_T_82541_2, WMP_F_1000T }, 759 760 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI, 761 "Intel i82541PI 1000BASE-T Ethernet", 762 WM_T_82541_2, WMP_F_1000T }, 763 764 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI, 765 "Intel i82547EI 1000BASE-T Ethernet", 766 WM_T_82547, WMP_F_1000T }, 767 768 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE, 769 "Intel i82547EI Mobile 1000BASE-T Ethernet", 770 WM_T_82547, WMP_F_1000T }, 771 772 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI, 773 "Intel i82547GI 1000BASE-T Ethernet", 774 WM_T_82547_2, WMP_F_1000T }, 775 776 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER, 777 "Intel PRO/1000 PT (82571EB)", 778 WM_T_82571, WMP_F_1000T }, 779 780 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER, 781 "Intel PRO/1000 PF (82571EB)", 782 WM_T_82571, WMP_F_1000X }, 783 #if 0 784 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES, 785 "Intel PRO/1000 PB (82571EB)", 786 WM_T_82571, WMP_F_SERDES }, 787 #endif 788 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER, 789 "Intel PRO/1000 QT (82571EB)", 790 WM_T_82571, WMP_F_1000T }, 791 792 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER, 793 "Intel i82572EI 1000baseT Ethernet", 794 WM_T_82572, WMP_F_1000T }, 795 796 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER, 797 "Intel PRO/1000 PT Quad Port Server Adapter", 798 WM_T_82571, WMP_F_1000T, }, 799 800 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER, 801 "Intel i82572EI 1000baseX Ethernet", 802 WM_T_82572, WMP_F_1000X }, 803 #if 0 804 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES, 805 "Intel i82572EI Gigabit Ethernet (SERDES)", 806 WM_T_82572, WMP_F_SERDES }, 807 #endif 808 809 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI, 810 "Intel i82572EI 1000baseT Ethernet", 811 WM_T_82572, WMP_F_1000T }, 812 813 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E, 814 "Intel i82573E", 815 WM_T_82573, WMP_F_1000T }, 816 817 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT, 818 "Intel i82573E IAMT", 819 WM_T_82573, WMP_F_1000T }, 820 821 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L, 822 "Intel i82573L Gigabit Ethernet", 823 WM_T_82573, WMP_F_1000T }, 824 825 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L, 826 "Intel i82574L", 827 WM_T_82574, WMP_F_1000T }, 828 829 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V, 830 "Intel i82583V", 831 WM_T_82583, WMP_F_1000T }, 832 833 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT, 834 "i80003 dual 1000baseT Ethernet", 835 WM_T_80003, WMP_F_1000T }, 836 837 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT, 838 "i80003 dual 1000baseX Ethernet", 839 WM_T_80003, WMP_F_1000T }, 840 #if 0 841 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT, 842 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)", 843 WM_T_80003, WMP_F_SERDES }, 844 #endif 845 846 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT, 847 "Intel i80003 1000baseT Ethernet", 848 WM_T_80003, WMP_F_1000T }, 849 #if 0 850 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT, 851 "Intel i80003 Gigabit Ethernet (SERDES)", 852 WM_T_80003, WMP_F_SERDES }, 853 #endif 854 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT, 855 "Intel i82801H (M_AMT) LAN Controller", 856 WM_T_ICH8, WMP_F_1000T }, 857 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT, 858 "Intel i82801H (AMT) LAN Controller", 859 WM_T_ICH8, WMP_F_1000T }, 860 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN, 861 "Intel i82801H LAN Controller", 862 WM_T_ICH8, WMP_F_1000T }, 863 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN, 864 "Intel i82801H (IFE) LAN Controller", 865 WM_T_ICH8, WMP_F_1000T }, 866 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN, 867 "Intel i82801H (M) LAN Controller", 868 WM_T_ICH8, WMP_F_1000T }, 869 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT, 870 "Intel i82801H IFE (GT) LAN Controller", 871 WM_T_ICH8, WMP_F_1000T }, 872 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G, 873 "Intel i82801H IFE (G) LAN Controller", 874 WM_T_ICH8, WMP_F_1000T }, 875 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT, 876 "82801I (AMT) LAN Controller", 877 WM_T_ICH9, WMP_F_1000T }, 878 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE, 879 "82801I LAN Controller", 880 WM_T_ICH9, WMP_F_1000T }, 881 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G, 882 "82801I (G) LAN Controller", 883 WM_T_ICH9, WMP_F_1000T }, 884 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT, 885 "82801I (GT) LAN Controller", 886 WM_T_ICH9, WMP_F_1000T }, 887 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C, 888 "82801I (C) LAN Controller", 889 WM_T_ICH9, WMP_F_1000T }, 890 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M, 891 "82801I mobile LAN Controller", 892 WM_T_ICH9, WMP_F_1000T }, 893 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V, 894 "82801I mobile (V) LAN Controller", 895 WM_T_ICH9, WMP_F_1000T }, 896 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT, 897 "82801I mobile (AMT) LAN Controller", 898 WM_T_ICH9, WMP_F_1000T }, 899 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM, 900 "82567LM-4 LAN Controller", 901 WM_T_ICH9, WMP_F_1000T }, 902 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3, 903 "82567V-3 LAN Controller", 904 WM_T_ICH9, WMP_F_1000T }, 905 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM, 906 "82567LM-2 LAN Controller", 907 WM_T_ICH10, WMP_F_1000T }, 908 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF, 909 "82567LF-2 LAN Controller", 910 WM_T_ICH10, WMP_F_1000T }, 911 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM, 912 "82567LM-3 LAN Controller", 913 WM_T_ICH10, WMP_F_1000T }, 914 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF, 915 "82567LF-3 LAN Controller", 916 WM_T_ICH10, WMP_F_1000T }, 917 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V, 918 "82567V-2 LAN Controller", 919 WM_T_ICH10, WMP_F_1000T }, 920 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V, 921 "82567V-3? LAN Controller", 922 WM_T_ICH10, WMP_F_1000T }, 923 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE, 924 "HANKSVILLE LAN Controller", 925 WM_T_ICH10, WMP_F_1000T }, 926 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM, 927 "PCH LAN (82577LM) Controller", 928 WM_T_PCH, WMP_F_1000T }, 929 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC, 930 "PCH LAN (82577LC) Controller", 931 WM_T_PCH, WMP_F_1000T }, 932 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM, 933 "PCH LAN (82578DM) Controller", 934 WM_T_PCH, WMP_F_1000T }, 935 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC, 936 "PCH LAN (82578DC) Controller", 937 WM_T_PCH, WMP_F_1000T }, 938 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM, 939 "PCH2 LAN (82579LM) Controller", 940 WM_T_PCH2, WMP_F_1000T }, 941 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V, 942 "PCH2 LAN (82579V) Controller", 943 WM_T_PCH2, WMP_F_1000T }, 944 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER, 945 "82575EB dual-1000baseT Ethernet", 946 WM_T_82575, WMP_F_1000T }, 947 #if 0 948 /* 949 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so 950 * disabled for now ... 951 */ 952 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES, 953 "82575EB dual-1000baseX Ethernet (SERDES)", 954 WM_T_82575, WMP_F_SERDES }, 955 #endif 956 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER, 957 "82575GB quad-1000baseT Ethernet", 958 WM_T_82575, WMP_F_1000T }, 959 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM, 960 "82575GB quad-1000baseT Ethernet (PM)", 961 WM_T_82575, WMP_F_1000T }, 962 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER, 963 "82576 1000BaseT Ethernet", 964 WM_T_82576, WMP_F_1000T }, 965 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER, 966 "82576 1000BaseX Ethernet", 967 WM_T_82576, WMP_F_1000X }, 968 #if 0 969 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES, 970 "82576 gigabit Ethernet (SERDES)", 971 WM_T_82576, WMP_F_SERDES }, 972 #endif 973 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER, 974 "82576 quad-1000BaseT Ethernet", 975 WM_T_82576, WMP_F_1000T }, 976 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS, 977 "82576 gigabit Ethernet", 978 WM_T_82576, WMP_F_1000T }, 979 #if 0 980 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES, 981 "82576 gigabit Ethernet (SERDES)", 982 WM_T_82576, WMP_F_SERDES }, 983 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD, 984 "82576 quad-gigabit Ethernet (SERDES)", 985 WM_T_82576, WMP_F_SERDES }, 986 #endif 987 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER, 988 "82580 1000BaseT Ethernet", 989 WM_T_82580, WMP_F_1000T }, 990 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER, 991 "82580 1000BaseX Ethernet", 992 WM_T_82580, WMP_F_1000X }, 993 #if 0 994 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES, 995 "82580 1000BaseT Ethernet (SERDES)", 996 WM_T_82580, WMP_F_SERDES }, 997 #endif 998 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII, 999 "82580 gigabit Ethernet (SGMII)", 1000 WM_T_82580, WMP_F_1000T }, 1001 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL, 1002 "82580 dual-1000BaseT Ethernet", 1003 WM_T_82580, WMP_F_1000T }, 1004 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER, 1005 "82580 1000BaseT Ethernet", 1006 WM_T_82580ER, WMP_F_1000T }, 1007 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL, 1008 "82580 dual-1000BaseT Ethernet", 1009 WM_T_82580ER, WMP_F_1000T }, 1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER, 1011 "82580 quad-1000BaseX Ethernet", 1012 WM_T_82580, WMP_F_1000X }, 1013 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER, 1014 "I350 Gigabit Network Connection", 1015 WM_T_I350, WMP_F_1000T }, 1016 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER, 1017 "I350 Gigabit Fiber Network Connection", 1018 WM_T_I350, WMP_F_1000X }, 1019 #if 0 1020 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES, 1021 "I350 Gigabit Backplane Connection", 1022 WM_T_I350, WMP_F_SERDES }, 1023 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII, 1024 "I350 Gigabit Connection", 1025 WM_T_I350, WMP_F_1000T }, 1026 #endif 1027 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1, 1028 "I210-T1 Ethernet Server Adapter", 1029 WM_T_I210, WMP_F_1000T }, 1030 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1, 1031 "I210 Ethernet (Copper OEM)", 1032 WM_T_I210, WMP_F_1000T }, 1033 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT, 1034 "I210 Ethernet (Copper IT)", 1035 WM_T_I210, WMP_F_1000T }, 1036 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER, 1037 "I210 Gigabit Ethernet (Fiber)", 1038 WM_T_I210, WMP_F_1000X }, 1039 #if 0 1040 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES, 1041 "I210 Gigabit Ethernet (SERDES)", 1042 WM_T_I210, WMP_F_SERDES }, 1043 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII, 1044 "I210 Gigabit Ethernet (SGMII)", 1045 WM_T_I210, WMP_F_SERDES }, 1046 #endif 1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER, 1048 "I211 Ethernet (COPPER)", 1049 WM_T_I211, WMP_F_1000T }, 1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V, 1051 "I217 V Ethernet Connection", 1052 WM_T_PCH_LPT, WMP_F_1000T }, 1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM, 1054 "I217 LM Ethernet Connection", 1055 WM_T_PCH_LPT, WMP_F_1000T }, 1056 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V, 1057 "I218 V Ethernet Connection", 1058 WM_T_PCH_LPT, WMP_F_1000T }, 1059 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM, 1060 "I218 LM Ethernet Connection", 1061 WM_T_PCH_LPT, WMP_F_1000T }, 1062 { 0, 0, 1063 NULL, 1064 0, 0 }, 1065 }; 1066 1067 #ifdef WM_EVENT_COUNTERS 1068 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")]; 1069 #endif /* WM_EVENT_COUNTERS */ 1070 1071 #if 0 /* Not currently used */ 1072 static inline uint32_t 1073 wm_io_read(struct wm_softc *sc, int reg) 1074 { 1075 1076 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 1077 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4)); 1078 } 1079 #endif 1080 1081 static inline void 1082 wm_io_write(struct wm_softc *sc, int reg, uint32_t val) 1083 { 1084 1085 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 1086 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val); 1087 } 1088 1089 static inline void 1090 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off, 1091 uint32_t data) 1092 { 1093 uint32_t regval; 1094 int i; 1095 1096 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT); 1097 1098 CSR_WRITE(sc, reg, regval); 1099 1100 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) { 1101 delay(5); 1102 if (CSR_READ(sc, reg) & SCTL_CTL_READY) 1103 break; 1104 } 1105 if (i == SCTL_CTL_POLL_TIMEOUT) { 1106 aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n", 1107 device_xname(sc->sc_dev), reg); 1108 } 1109 } 1110 1111 static inline void 1112 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v) 1113 { 1114 wa->wa_low = htole32(v & 0xffffffffU); 1115 if (sizeof(bus_addr_t) == 8) 1116 wa->wa_high = htole32((uint64_t) v >> 32); 1117 else 1118 wa->wa_high = 0; 1119 } 1120 1121 static void 1122 wm_set_spiaddrbits(struct wm_softc *sc) 1123 { 1124 uint32_t reg; 1125 1126 sc->sc_flags |= WM_F_EEPROM_SPI; 1127 reg = CSR_READ(sc, WMREG_EECD); 1128 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8; 1129 } 1130 1131 static const struct wm_product * 1132 wm_lookup(const struct pci_attach_args *pa) 1133 { 1134 const struct wm_product *wmp; 1135 1136 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) { 1137 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor && 1138 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product) 1139 return wmp; 1140 } 1141 return NULL; 1142 } 1143 1144 static int 1145 wm_match(device_t parent, cfdata_t cf, void *aux) 1146 { 1147 struct pci_attach_args *pa = aux; 1148 1149 if (wm_lookup(pa) != NULL) 1150 return 1; 1151 1152 return 0; 1153 } 1154 1155 static void 1156 wm_attach(device_t parent, device_t self, void *aux) 1157 { 1158 struct wm_softc *sc = device_private(self); 1159 struct pci_attach_args *pa = aux; 1160 prop_dictionary_t dict; 1161 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1162 pci_chipset_tag_t pc = pa->pa_pc; 1163 pci_intr_handle_t ih; 1164 const char *intrstr = NULL; 1165 const char *eetype, *xname; 1166 bus_space_tag_t memt; 1167 bus_space_handle_t memh; 1168 bus_size_t memsize; 1169 int memh_valid; 1170 int i, error; 1171 const struct wm_product *wmp; 1172 prop_data_t ea; 1173 prop_number_t pn; 1174 uint8_t enaddr[ETHER_ADDR_LEN]; 1175 uint16_t cfg1, cfg2, swdpin, io3; 1176 pcireg_t preg, memtype; 1177 uint16_t eeprom_data, apme_mask; 1178 uint32_t reg; 1179 1180 sc->sc_dev = self; 1181 callout_init(&sc->sc_tick_ch, 0); 1182 1183 sc->sc_wmp = wmp = wm_lookup(pa); 1184 if (wmp == NULL) { 1185 printf("\n"); 1186 panic("wm_attach: impossible"); 1187 } 1188 1189 sc->sc_pc = pa->pa_pc; 1190 sc->sc_pcitag = pa->pa_tag; 1191 1192 if (pci_dma64_available(pa)) 1193 sc->sc_dmat = pa->pa_dmat64; 1194 else 1195 sc->sc_dmat = pa->pa_dmat; 1196 1197 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG)); 1198 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1); 1199 1200 sc->sc_type = wmp->wmp_type; 1201 if (sc->sc_type < WM_T_82543) { 1202 if (sc->sc_rev < 2) { 1203 aprint_error_dev(sc->sc_dev, 1204 "i82542 must be at least rev. 2\n"); 1205 return; 1206 } 1207 if (sc->sc_rev < 3) 1208 sc->sc_type = WM_T_82542_2_0; 1209 } 1210 1211 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 1212 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER) 1213 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210) 1214 || (sc->sc_type == WM_T_I211)) 1215 sc->sc_flags |= WM_F_NEWQUEUE; 1216 1217 /* Set device properties (mactype) */ 1218 dict = device_properties(sc->sc_dev); 1219 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type); 1220 1221 /* 1222 * Map the device. All devices support memory-mapped acccess, 1223 * and it is really required for normal operation. 1224 */ 1225 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA); 1226 switch (memtype) { 1227 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 1228 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 1229 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA, 1230 memtype, 0, &memt, &memh, NULL, &memsize) == 0); 1231 break; 1232 default: 1233 memh_valid = 0; 1234 break; 1235 } 1236 1237 if (memh_valid) { 1238 sc->sc_st = memt; 1239 sc->sc_sh = memh; 1240 sc->sc_ss = memsize; 1241 } else { 1242 aprint_error_dev(sc->sc_dev, 1243 "unable to map device registers\n"); 1244 return; 1245 } 1246 1247 /* 1248 * In addition, i82544 and later support I/O mapped indirect 1249 * register access. It is not desirable (nor supported in 1250 * this driver) to use it for normal operation, though it is 1251 * required to work around bugs in some chip versions. 1252 */ 1253 if (sc->sc_type >= WM_T_82544) { 1254 /* First we have to find the I/O BAR. */ 1255 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) { 1256 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i); 1257 if (memtype == PCI_MAPREG_TYPE_IO) 1258 break; 1259 if (PCI_MAPREG_MEM_TYPE(memtype) == 1260 PCI_MAPREG_MEM_TYPE_64BIT) 1261 i += 4; /* skip high bits, too */ 1262 } 1263 if (i < PCI_MAPREG_END) { 1264 /* 1265 * We found PCI_MAPREG_TYPE_IO. Note that 82580 1266 * (and newer?) chip has no PCI_MAPREG_TYPE_IO. 1267 * It's no problem because newer chips has no this 1268 * bug. 1269 * 1270 * The i8254x doesn't apparently respond when the 1271 * I/O BAR is 0, which looks somewhat like it's not 1272 * been configured. 1273 */ 1274 preg = pci_conf_read(pc, pa->pa_tag, i); 1275 if (PCI_MAPREG_MEM_ADDR(preg) == 0) { 1276 aprint_error_dev(sc->sc_dev, 1277 "WARNING: I/O BAR at zero.\n"); 1278 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO, 1279 0, &sc->sc_iot, &sc->sc_ioh, 1280 NULL, &sc->sc_ios) == 0) { 1281 sc->sc_flags |= WM_F_IOH_VALID; 1282 } else { 1283 aprint_error_dev(sc->sc_dev, 1284 "WARNING: unable to map I/O space\n"); 1285 } 1286 } 1287 1288 } 1289 1290 /* Enable bus mastering. Disable MWI on the i82542 2.0. */ 1291 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 1292 preg |= PCI_COMMAND_MASTER_ENABLE; 1293 if (sc->sc_type < WM_T_82542_2_1) 1294 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE; 1295 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg); 1296 1297 /* power up chip */ 1298 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, 1299 NULL)) && error != EOPNOTSUPP) { 1300 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error); 1301 return; 1302 } 1303 1304 /* 1305 * Map and establish our interrupt. 1306 */ 1307 if (pci_intr_map(pa, &ih)) { 1308 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n"); 1309 return; 1310 } 1311 intrstr = pci_intr_string(pc, ih); 1312 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc); 1313 if (sc->sc_ih == NULL) { 1314 aprint_error_dev(sc->sc_dev, "unable to establish interrupt"); 1315 if (intrstr != NULL) 1316 aprint_error(" at %s", intrstr); 1317 aprint_error("\n"); 1318 return; 1319 } 1320 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); 1321 1322 /* 1323 * Check the function ID (unit number of the chip). 1324 */ 1325 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3) 1326 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003) 1327 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 1328 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER) 1329 || (sc->sc_type == WM_T_I350)) 1330 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS) 1331 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK; 1332 else 1333 sc->sc_funcid = 0; 1334 1335 /* 1336 * Determine a few things about the bus we're connected to. 1337 */ 1338 if (sc->sc_type < WM_T_82543) { 1339 /* We don't really know the bus characteristics here. */ 1340 sc->sc_bus_speed = 33; 1341 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) { 1342 /* 1343 * CSA (Communication Streaming Architecture) is about as fast 1344 * a 32-bit 66MHz PCI Bus. 1345 */ 1346 sc->sc_flags |= WM_F_CSA; 1347 sc->sc_bus_speed = 66; 1348 aprint_verbose_dev(sc->sc_dev, 1349 "Communication Streaming Architecture\n"); 1350 if (sc->sc_type == WM_T_82547) { 1351 callout_init(&sc->sc_txfifo_ch, 0); 1352 callout_setfunc(&sc->sc_txfifo_ch, 1353 wm_82547_txfifo_stall, sc); 1354 aprint_verbose_dev(sc->sc_dev, 1355 "using 82547 Tx FIFO stall work-around\n"); 1356 } 1357 } else if (sc->sc_type >= WM_T_82571) { 1358 sc->sc_flags |= WM_F_PCIE; 1359 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9) 1360 && (sc->sc_type != WM_T_ICH10) 1361 && (sc->sc_type != WM_T_PCH) 1362 && (sc->sc_type != WM_T_PCH2) 1363 && (sc->sc_type != WM_T_PCH_LPT)) { 1364 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE; 1365 /* ICH* and PCH* have no PCIe capability registers */ 1366 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 1367 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff, 1368 NULL) == 0) 1369 aprint_error_dev(sc->sc_dev, 1370 "unable to find PCIe capability\n"); 1371 } 1372 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n"); 1373 } else { 1374 reg = CSR_READ(sc, WMREG_STATUS); 1375 if (reg & STATUS_BUS64) 1376 sc->sc_flags |= WM_F_BUS64; 1377 if ((reg & STATUS_PCIX_MODE) != 0) { 1378 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb; 1379 1380 sc->sc_flags |= WM_F_PCIX; 1381 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 1382 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0) 1383 aprint_error_dev(sc->sc_dev, 1384 "unable to find PCIX capability\n"); 1385 else if (sc->sc_type != WM_T_82545_3 && 1386 sc->sc_type != WM_T_82546_3) { 1387 /* 1388 * Work around a problem caused by the BIOS 1389 * setting the max memory read byte count 1390 * incorrectly. 1391 */ 1392 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, 1393 sc->sc_pcixe_capoff + PCIX_CMD); 1394 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag, 1395 sc->sc_pcixe_capoff + PCIX_STATUS); 1396 1397 bytecnt = 1398 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >> 1399 PCIX_CMD_BYTECNT_SHIFT; 1400 maxb = 1401 (pcix_sts & PCIX_STATUS_MAXB_MASK) >> 1402 PCIX_STATUS_MAXB_SHIFT; 1403 if (bytecnt > maxb) { 1404 aprint_verbose_dev(sc->sc_dev, 1405 "resetting PCI-X MMRBC: %d -> %d\n", 1406 512 << bytecnt, 512 << maxb); 1407 pcix_cmd = (pcix_cmd & 1408 ~PCIX_CMD_BYTECNT_MASK) | 1409 (maxb << PCIX_CMD_BYTECNT_SHIFT); 1410 pci_conf_write(pa->pa_pc, pa->pa_tag, 1411 sc->sc_pcixe_capoff + PCIX_CMD, 1412 pcix_cmd); 1413 } 1414 } 1415 } 1416 /* 1417 * The quad port adapter is special; it has a PCIX-PCIX 1418 * bridge on the board, and can run the secondary bus at 1419 * a higher speed. 1420 */ 1421 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) { 1422 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120 1423 : 66; 1424 } else if (sc->sc_flags & WM_F_PCIX) { 1425 switch (reg & STATUS_PCIXSPD_MASK) { 1426 case STATUS_PCIXSPD_50_66: 1427 sc->sc_bus_speed = 66; 1428 break; 1429 case STATUS_PCIXSPD_66_100: 1430 sc->sc_bus_speed = 100; 1431 break; 1432 case STATUS_PCIXSPD_100_133: 1433 sc->sc_bus_speed = 133; 1434 break; 1435 default: 1436 aprint_error_dev(sc->sc_dev, 1437 "unknown PCIXSPD %d; assuming 66MHz\n", 1438 reg & STATUS_PCIXSPD_MASK); 1439 sc->sc_bus_speed = 66; 1440 break; 1441 } 1442 } else 1443 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33; 1444 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n", 1445 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed, 1446 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI"); 1447 } 1448 1449 /* 1450 * Allocate the control data structures, and create and load the 1451 * DMA map for it. 1452 * 1453 * NOTE: All Tx descriptors must be in the same 4G segment of 1454 * memory. So must Rx descriptors. We simplify by allocating 1455 * both sets within the same 4G segment. 1456 */ 1457 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ? 1458 WM_NTXDESC_82542 : WM_NTXDESC_82544; 1459 sc->sc_cd_size = sc->sc_type < WM_T_82544 ? 1460 sizeof(struct wm_control_data_82542) : 1461 sizeof(struct wm_control_data_82544); 1462 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE, 1463 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1, 1464 &sc->sc_cd_rseg, 0)) != 0) { 1465 aprint_error_dev(sc->sc_dev, 1466 "unable to allocate control data, error = %d\n", 1467 error); 1468 goto fail_0; 1469 } 1470 1471 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg, 1472 sc->sc_cd_rseg, sc->sc_cd_size, 1473 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) { 1474 aprint_error_dev(sc->sc_dev, 1475 "unable to map control data, error = %d\n", error); 1476 goto fail_1; 1477 } 1478 1479 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1, 1480 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) { 1481 aprint_error_dev(sc->sc_dev, 1482 "unable to create control data DMA map, error = %d\n", 1483 error); 1484 goto fail_2; 1485 } 1486 1487 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 1488 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) { 1489 aprint_error_dev(sc->sc_dev, 1490 "unable to load control data DMA map, error = %d\n", 1491 error); 1492 goto fail_3; 1493 } 1494 1495 /* 1496 * Create the transmit buffer DMA maps. 1497 */ 1498 WM_TXQUEUELEN(sc) = 1499 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ? 1500 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX; 1501 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 1502 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA, 1503 WM_NTXSEGS, WTX_MAX_LEN, 0, 0, 1504 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 1505 aprint_error_dev(sc->sc_dev, 1506 "unable to create Tx DMA map %d, error = %d\n", 1507 i, error); 1508 goto fail_4; 1509 } 1510 } 1511 1512 /* 1513 * Create the receive buffer DMA maps. 1514 */ 1515 for (i = 0; i < WM_NRXDESC; i++) { 1516 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1517 MCLBYTES, 0, 0, 1518 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 1519 aprint_error_dev(sc->sc_dev, 1520 "unable to create Rx DMA map %d error = %d\n", 1521 i, error); 1522 goto fail_5; 1523 } 1524 sc->sc_rxsoft[i].rxs_mbuf = NULL; 1525 } 1526 1527 /* clear interesting stat counters */ 1528 CSR_READ(sc, WMREG_COLC); 1529 CSR_READ(sc, WMREG_RXERRC); 1530 1531 /* get PHY control from SMBus to PCIe */ 1532 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2) 1533 || (sc->sc_type == WM_T_PCH_LPT)) 1534 wm_smbustopci(sc); 1535 1536 /* 1537 * Reset the chip to a known state. 1538 */ 1539 wm_reset(sc); 1540 1541 /* 1542 * Get some information about the EEPROM. 1543 */ 1544 switch (sc->sc_type) { 1545 case WM_T_82542_2_0: 1546 case WM_T_82542_2_1: 1547 case WM_T_82543: 1548 case WM_T_82544: 1549 /* Microwire */ 1550 sc->sc_ee_addrbits = 6; 1551 break; 1552 case WM_T_82540: 1553 case WM_T_82545: 1554 case WM_T_82545_3: 1555 case WM_T_82546: 1556 case WM_T_82546_3: 1557 /* Microwire */ 1558 reg = CSR_READ(sc, WMREG_EECD); 1559 if (reg & EECD_EE_SIZE) 1560 sc->sc_ee_addrbits = 8; 1561 else 1562 sc->sc_ee_addrbits = 6; 1563 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE; 1564 break; 1565 case WM_T_82541: 1566 case WM_T_82541_2: 1567 case WM_T_82547: 1568 case WM_T_82547_2: 1569 reg = CSR_READ(sc, WMREG_EECD); 1570 if (reg & EECD_EE_TYPE) { 1571 /* SPI */ 1572 wm_set_spiaddrbits(sc); 1573 } else 1574 /* Microwire */ 1575 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6; 1576 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE; 1577 break; 1578 case WM_T_82571: 1579 case WM_T_82572: 1580 /* SPI */ 1581 wm_set_spiaddrbits(sc); 1582 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE; 1583 break; 1584 case WM_T_82573: 1585 case WM_T_82574: 1586 case WM_T_82583: 1587 if (wm_is_onboard_nvm_eeprom(sc) == 0) 1588 sc->sc_flags |= WM_F_EEPROM_FLASH; 1589 else { 1590 /* SPI */ 1591 wm_set_spiaddrbits(sc); 1592 } 1593 sc->sc_flags |= WM_F_EEPROM_EERDEEWR; 1594 break; 1595 case WM_T_82575: 1596 case WM_T_82576: 1597 case WM_T_82580: 1598 case WM_T_82580ER: 1599 case WM_T_I350: 1600 case WM_T_80003: 1601 /* SPI */ 1602 wm_set_spiaddrbits(sc); 1603 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC; 1604 break; 1605 case WM_T_ICH8: 1606 case WM_T_ICH9: 1607 case WM_T_ICH10: 1608 case WM_T_PCH: 1609 case WM_T_PCH2: 1610 case WM_T_PCH_LPT: 1611 /* FLASH */ 1612 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC; 1613 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH); 1614 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0, 1615 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) { 1616 aprint_error_dev(sc->sc_dev, 1617 "can't map FLASH registers\n"); 1618 return; 1619 } 1620 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG); 1621 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) * 1622 ICH_FLASH_SECTOR_SIZE; 1623 sc->sc_ich8_flash_bank_size = 1624 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1; 1625 sc->sc_ich8_flash_bank_size -= 1626 (reg & ICH_GFPREG_BASE_MASK); 1627 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE; 1628 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t); 1629 break; 1630 case WM_T_I210: 1631 case WM_T_I211: 1632 sc->sc_flags |= WM_F_EEPROM_FLASH_HW; 1633 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC; 1634 break; 1635 default: 1636 break; 1637 } 1638 1639 /* 1640 * Defer printing the EEPROM type until after verifying the checksum 1641 * This allows the EEPROM type to be printed correctly in the case 1642 * that no EEPROM is attached. 1643 */ 1644 /* 1645 * Validate the EEPROM checksum. If the checksum fails, flag 1646 * this for later, so we can fail future reads from the EEPROM. 1647 */ 1648 if (wm_validate_eeprom_checksum(sc)) { 1649 /* 1650 * Read twice again because some PCI-e parts fail the 1651 * first check due to the link being in sleep state. 1652 */ 1653 if (wm_validate_eeprom_checksum(sc)) 1654 sc->sc_flags |= WM_F_EEPROM_INVALID; 1655 } 1656 1657 /* Set device properties (macflags) */ 1658 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags); 1659 1660 if (sc->sc_flags & WM_F_EEPROM_INVALID) 1661 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n"); 1662 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) { 1663 aprint_verbose_dev(sc->sc_dev, "FLASH(HW)\n"); 1664 } else if (sc->sc_flags & WM_F_EEPROM_FLASH) { 1665 aprint_verbose_dev(sc->sc_dev, "FLASH\n"); 1666 } else { 1667 if (sc->sc_flags & WM_F_EEPROM_SPI) 1668 eetype = "SPI"; 1669 else 1670 eetype = "MicroWire"; 1671 aprint_verbose_dev(sc->sc_dev, 1672 "%u word (%d address bits) %s EEPROM\n", 1673 1U << sc->sc_ee_addrbits, 1674 sc->sc_ee_addrbits, eetype); 1675 } 1676 1677 switch (sc->sc_type) { 1678 case WM_T_82571: 1679 case WM_T_82572: 1680 case WM_T_82573: 1681 case WM_T_82574: 1682 case WM_T_82583: 1683 case WM_T_80003: 1684 case WM_T_ICH8: 1685 case WM_T_ICH9: 1686 case WM_T_ICH10: 1687 case WM_T_PCH: 1688 case WM_T_PCH2: 1689 case WM_T_PCH_LPT: 1690 if (wm_check_mng_mode(sc) != 0) 1691 wm_get_hw_control(sc); 1692 break; 1693 default: 1694 break; 1695 } 1696 wm_get_wakeup(sc); 1697 /* 1698 * Read the Ethernet address from the EEPROM, if not first found 1699 * in device properties. 1700 */ 1701 ea = prop_dictionary_get(dict, "mac-address"); 1702 if (ea != NULL) { 1703 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 1704 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 1705 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN); 1706 } else { 1707 if (wm_read_mac_addr(sc, enaddr) != 0) { 1708 aprint_error_dev(sc->sc_dev, 1709 "unable to read Ethernet address\n"); 1710 return; 1711 } 1712 } 1713 1714 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 1715 ether_sprintf(enaddr)); 1716 1717 /* 1718 * Read the config info from the EEPROM, and set up various 1719 * bits in the control registers based on their contents. 1720 */ 1721 pn = prop_dictionary_get(dict, "i82543-cfg1"); 1722 if (pn != NULL) { 1723 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1724 cfg1 = (uint16_t) prop_number_integer_value(pn); 1725 } else { 1726 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) { 1727 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n"); 1728 return; 1729 } 1730 } 1731 1732 pn = prop_dictionary_get(dict, "i82543-cfg2"); 1733 if (pn != NULL) { 1734 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1735 cfg2 = (uint16_t) prop_number_integer_value(pn); 1736 } else { 1737 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) { 1738 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n"); 1739 return; 1740 } 1741 } 1742 1743 /* check for WM_F_WOL */ 1744 switch (sc->sc_type) { 1745 case WM_T_82542_2_0: 1746 case WM_T_82542_2_1: 1747 case WM_T_82543: 1748 /* dummy? */ 1749 eeprom_data = 0; 1750 apme_mask = EEPROM_CFG3_APME; 1751 break; 1752 case WM_T_82544: 1753 apme_mask = EEPROM_CFG2_82544_APM_EN; 1754 eeprom_data = cfg2; 1755 break; 1756 case WM_T_82546: 1757 case WM_T_82546_3: 1758 case WM_T_82571: 1759 case WM_T_82572: 1760 case WM_T_82573: 1761 case WM_T_82574: 1762 case WM_T_82583: 1763 case WM_T_80003: 1764 default: 1765 apme_mask = EEPROM_CFG3_APME; 1766 wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB 1767 : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data); 1768 break; 1769 case WM_T_82575: 1770 case WM_T_82576: 1771 case WM_T_82580: 1772 case WM_T_82580ER: 1773 case WM_T_I350: 1774 case WM_T_ICH8: 1775 case WM_T_ICH9: 1776 case WM_T_ICH10: 1777 case WM_T_PCH: 1778 case WM_T_PCH2: 1779 case WM_T_PCH_LPT: 1780 /* XXX The funcid should be checked on some devices */ 1781 apme_mask = WUC_APME; 1782 eeprom_data = CSR_READ(sc, WMREG_WUC); 1783 break; 1784 } 1785 1786 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */ 1787 if ((eeprom_data & apme_mask) != 0) 1788 sc->sc_flags |= WM_F_WOL; 1789 #ifdef WM_DEBUG 1790 if ((sc->sc_flags & WM_F_WOL) != 0) 1791 printf("WOL\n"); 1792 #endif 1793 1794 /* 1795 * XXX need special handling for some multiple port cards 1796 * to disable a paticular port. 1797 */ 1798 1799 if (sc->sc_type >= WM_T_82544) { 1800 pn = prop_dictionary_get(dict, "i82543-swdpin"); 1801 if (pn != NULL) { 1802 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1803 swdpin = (uint16_t) prop_number_integer_value(pn); 1804 } else { 1805 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) { 1806 aprint_error_dev(sc->sc_dev, 1807 "unable to read SWDPIN\n"); 1808 return; 1809 } 1810 } 1811 } 1812 1813 if (cfg1 & EEPROM_CFG1_ILOS) 1814 sc->sc_ctrl |= CTRL_ILOS; 1815 if (sc->sc_type >= WM_T_82544) { 1816 sc->sc_ctrl |= 1817 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) << 1818 CTRL_SWDPIO_SHIFT; 1819 sc->sc_ctrl |= 1820 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) << 1821 CTRL_SWDPINS_SHIFT; 1822 } else { 1823 sc->sc_ctrl |= 1824 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) << 1825 CTRL_SWDPIO_SHIFT; 1826 } 1827 1828 #if 0 1829 if (sc->sc_type >= WM_T_82544) { 1830 if (cfg1 & EEPROM_CFG1_IPS0) 1831 sc->sc_ctrl_ext |= CTRL_EXT_IPS; 1832 if (cfg1 & EEPROM_CFG1_IPS1) 1833 sc->sc_ctrl_ext |= CTRL_EXT_IPS1; 1834 sc->sc_ctrl_ext |= 1835 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) << 1836 CTRL_EXT_SWDPIO_SHIFT; 1837 sc->sc_ctrl_ext |= 1838 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) << 1839 CTRL_EXT_SWDPINS_SHIFT; 1840 } else { 1841 sc->sc_ctrl_ext |= 1842 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) << 1843 CTRL_EXT_SWDPIO_SHIFT; 1844 } 1845 #endif 1846 1847 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 1848 #if 0 1849 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 1850 #endif 1851 1852 /* 1853 * Set up some register offsets that are different between 1854 * the i82542 and the i82543 and later chips. 1855 */ 1856 if (sc->sc_type < WM_T_82543) { 1857 sc->sc_rdt_reg = WMREG_OLD_RDT0; 1858 sc->sc_tdt_reg = WMREG_OLD_TDT; 1859 } else { 1860 sc->sc_rdt_reg = WMREG_RDT; 1861 sc->sc_tdt_reg = WMREG_TDT; 1862 } 1863 1864 if (sc->sc_type == WM_T_PCH) { 1865 uint16_t val; 1866 1867 /* Save the NVM K1 bit setting */ 1868 wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val); 1869 1870 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0) 1871 sc->sc_nvm_k1_enabled = 1; 1872 else 1873 sc->sc_nvm_k1_enabled = 0; 1874 } 1875 1876 /* 1877 * Determine if we're TBI,GMII or SGMII mode, and initialize the 1878 * media structures accordingly. 1879 */ 1880 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9 1881 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH 1882 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT 1883 || sc->sc_type == WM_T_82573 1884 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) { 1885 /* STATUS_TBIMODE reserved/reused, can't rely on it */ 1886 wm_gmii_mediainit(sc, wmp->wmp_product); 1887 } else if (sc->sc_type < WM_T_82543 || 1888 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) { 1889 if (wmp->wmp_flags & WMP_F_1000T) 1890 aprint_error_dev(sc->sc_dev, 1891 "WARNING: TBIMODE set on 1000BASE-T product!\n"); 1892 wm_tbi_mediainit(sc); 1893 } else { 1894 switch (sc->sc_type) { 1895 case WM_T_82575: 1896 case WM_T_82576: 1897 case WM_T_82580: 1898 case WM_T_82580ER: 1899 case WM_T_I350: 1900 case WM_T_I210: 1901 case WM_T_I211: 1902 reg = CSR_READ(sc, WMREG_CTRL_EXT); 1903 switch (reg & CTRL_EXT_LINK_MODE_MASK) { 1904 case CTRL_EXT_LINK_MODE_SGMII: 1905 aprint_verbose_dev(sc->sc_dev, "SGMII\n"); 1906 sc->sc_flags |= WM_F_SGMII; 1907 CSR_WRITE(sc, WMREG_CTRL_EXT, 1908 reg | CTRL_EXT_I2C_ENA); 1909 wm_gmii_mediainit(sc, wmp->wmp_product); 1910 break; 1911 case CTRL_EXT_LINK_MODE_1000KX: 1912 case CTRL_EXT_LINK_MODE_PCIE_SERDES: 1913 aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n"); 1914 CSR_WRITE(sc, WMREG_CTRL_EXT, 1915 reg | CTRL_EXT_I2C_ENA); 1916 panic("not supported yet\n"); 1917 break; 1918 case CTRL_EXT_LINK_MODE_GMII: 1919 default: 1920 CSR_WRITE(sc, WMREG_CTRL_EXT, 1921 reg & ~CTRL_EXT_I2C_ENA); 1922 wm_gmii_mediainit(sc, wmp->wmp_product); 1923 break; 1924 } 1925 break; 1926 default: 1927 if (wmp->wmp_flags & WMP_F_1000X) 1928 aprint_error_dev(sc->sc_dev, 1929 "WARNING: TBIMODE clear on 1000BASE-X product!\n"); 1930 wm_gmii_mediainit(sc, wmp->wmp_product); 1931 } 1932 } 1933 1934 ifp = &sc->sc_ethercom.ec_if; 1935 xname = device_xname(sc->sc_dev); 1936 strlcpy(ifp->if_xname, xname, IFNAMSIZ); 1937 ifp->if_softc = sc; 1938 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1939 ifp->if_ioctl = wm_ioctl; 1940 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 1941 ifp->if_start = wm_nq_start; 1942 else 1943 ifp->if_start = wm_start; 1944 ifp->if_watchdog = wm_watchdog; 1945 ifp->if_init = wm_init; 1946 ifp->if_stop = wm_stop; 1947 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN)); 1948 IFQ_SET_READY(&ifp->if_snd); 1949 1950 /* Check for jumbo frame */ 1951 switch (sc->sc_type) { 1952 case WM_T_82573: 1953 /* XXX limited to 9234 if ASPM is disabled */ 1954 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3); 1955 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0) 1956 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1957 break; 1958 case WM_T_82571: 1959 case WM_T_82572: 1960 case WM_T_82574: 1961 case WM_T_82575: 1962 case WM_T_82576: 1963 case WM_T_82580: 1964 case WM_T_82580ER: 1965 case WM_T_I350: 1966 case WM_T_I210: 1967 case WM_T_I211: 1968 case WM_T_80003: 1969 case WM_T_ICH9: 1970 case WM_T_ICH10: 1971 case WM_T_PCH2: /* PCH2 supports 9K frame size */ 1972 case WM_T_PCH_LPT: 1973 /* XXX limited to 9234 */ 1974 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1975 break; 1976 case WM_T_PCH: 1977 /* XXX limited to 4096 */ 1978 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1979 break; 1980 case WM_T_82542_2_0: 1981 case WM_T_82542_2_1: 1982 case WM_T_82583: 1983 case WM_T_ICH8: 1984 /* No support for jumbo frame */ 1985 break; 1986 default: 1987 /* ETHER_MAX_LEN_JUMBO */ 1988 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1989 break; 1990 } 1991 1992 /* 1993 * If we're a i82543 or greater, we can support VLANs. 1994 */ 1995 if (sc->sc_type >= WM_T_82543) 1996 sc->sc_ethercom.ec_capabilities |= 1997 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING; 1998 1999 /* 2000 * We can perform TCPv4 and UDPv4 checkums in-bound. Only 2001 * on i82543 and later. 2002 */ 2003 if (sc->sc_type >= WM_T_82543) { 2004 ifp->if_capabilities |= 2005 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 2006 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 2007 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx | 2008 IFCAP_CSUM_TCPv6_Tx | 2009 IFCAP_CSUM_UDPv6_Tx; 2010 } 2011 2012 /* 2013 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL. 2014 * 2015 * 82541GI (8086:1076) ... no 2016 * 82572EI (8086:10b9) ... yes 2017 */ 2018 if (sc->sc_type >= WM_T_82571) { 2019 ifp->if_capabilities |= 2020 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 2021 } 2022 2023 /* 2024 * If we're a i82544 or greater (except i82547), we can do 2025 * TCP segmentation offload. 2026 */ 2027 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) { 2028 ifp->if_capabilities |= IFCAP_TSOv4; 2029 } 2030 2031 if (sc->sc_type >= WM_T_82571) { 2032 ifp->if_capabilities |= IFCAP_TSOv6; 2033 } 2034 2035 /* 2036 * Attach the interface. 2037 */ 2038 if_attach(ifp); 2039 ether_ifattach(ifp, enaddr); 2040 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb); 2041 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0); 2042 2043 #ifdef WM_EVENT_COUNTERS 2044 /* Attach event counters. */ 2045 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC, 2046 NULL, xname, "txsstall"); 2047 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC, 2048 NULL, xname, "txdstall"); 2049 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC, 2050 NULL, xname, "txfifo_stall"); 2051 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR, 2052 NULL, xname, "txdw"); 2053 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR, 2054 NULL, xname, "txqe"); 2055 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR, 2056 NULL, xname, "rxintr"); 2057 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR, 2058 NULL, xname, "linkintr"); 2059 2060 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC, 2061 NULL, xname, "rxipsum"); 2062 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC, 2063 NULL, xname, "rxtusum"); 2064 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC, 2065 NULL, xname, "txipsum"); 2066 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC, 2067 NULL, xname, "txtusum"); 2068 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC, 2069 NULL, xname, "txtusum6"); 2070 2071 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC, 2072 NULL, xname, "txtso"); 2073 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC, 2074 NULL, xname, "txtso6"); 2075 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC, 2076 NULL, xname, "txtsopain"); 2077 2078 for (i = 0; i < WM_NTXSEGS; i++) { 2079 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i); 2080 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC, 2081 NULL, xname, wm_txseg_evcnt_names[i]); 2082 } 2083 2084 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC, 2085 NULL, xname, "txdrop"); 2086 2087 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC, 2088 NULL, xname, "tu"); 2089 2090 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC, 2091 NULL, xname, "tx_xoff"); 2092 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC, 2093 NULL, xname, "tx_xon"); 2094 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC, 2095 NULL, xname, "rx_xoff"); 2096 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC, 2097 NULL, xname, "rx_xon"); 2098 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC, 2099 NULL, xname, "rx_macctl"); 2100 #endif /* WM_EVENT_COUNTERS */ 2101 2102 if (pmf_device_register(self, wm_suspend, wm_resume)) 2103 pmf_class_network_register(self, ifp); 2104 else 2105 aprint_error_dev(self, "couldn't establish power handler\n"); 2106 2107 return; 2108 2109 /* 2110 * Free any resources we've allocated during the failed attach 2111 * attempt. Do this in reverse order and fall through. 2112 */ 2113 fail_5: 2114 for (i = 0; i < WM_NRXDESC; i++) { 2115 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 2116 bus_dmamap_destroy(sc->sc_dmat, 2117 sc->sc_rxsoft[i].rxs_dmamap); 2118 } 2119 fail_4: 2120 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 2121 if (sc->sc_txsoft[i].txs_dmamap != NULL) 2122 bus_dmamap_destroy(sc->sc_dmat, 2123 sc->sc_txsoft[i].txs_dmamap); 2124 } 2125 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 2126 fail_3: 2127 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 2128 fail_2: 2129 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 2130 sc->sc_cd_size); 2131 fail_1: 2132 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg); 2133 fail_0: 2134 return; 2135 } 2136 2137 static int 2138 wm_detach(device_t self, int flags __unused) 2139 { 2140 struct wm_softc *sc = device_private(self); 2141 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2142 int i, s; 2143 2144 s = splnet(); 2145 /* Stop the interface. Callouts are stopped in it. */ 2146 wm_stop(ifp, 1); 2147 splx(s); 2148 2149 pmf_device_deregister(self); 2150 2151 /* Tell the firmware about the release */ 2152 wm_release_manageability(sc); 2153 wm_release_hw_control(sc); 2154 2155 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 2156 2157 /* Delete all remaining media. */ 2158 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 2159 2160 ether_ifdetach(ifp); 2161 if_detach(ifp); 2162 2163 2164 /* Unload RX dmamaps and free mbufs */ 2165 wm_rxdrain(sc); 2166 2167 /* Free dmamap. It's the same as the end of the wm_attach() function */ 2168 for (i = 0; i < WM_NRXDESC; i++) { 2169 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 2170 bus_dmamap_destroy(sc->sc_dmat, 2171 sc->sc_rxsoft[i].rxs_dmamap); 2172 } 2173 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 2174 if (sc->sc_txsoft[i].txs_dmamap != NULL) 2175 bus_dmamap_destroy(sc->sc_dmat, 2176 sc->sc_txsoft[i].txs_dmamap); 2177 } 2178 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 2179 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 2180 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 2181 sc->sc_cd_size); 2182 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg); 2183 2184 /* Disestablish the interrupt handler */ 2185 if (sc->sc_ih != NULL) { 2186 pci_intr_disestablish(sc->sc_pc, sc->sc_ih); 2187 sc->sc_ih = NULL; 2188 } 2189 2190 /* Unmap the registers */ 2191 if (sc->sc_ss) { 2192 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss); 2193 sc->sc_ss = 0; 2194 } 2195 2196 if (sc->sc_ios) { 2197 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios); 2198 sc->sc_ios = 0; 2199 } 2200 2201 return 0; 2202 } 2203 2204 /* 2205 * wm_tx_offload: 2206 * 2207 * Set up TCP/IP checksumming parameters for the 2208 * specified packet. 2209 */ 2210 static int 2211 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp, 2212 uint8_t *fieldsp) 2213 { 2214 struct mbuf *m0 = txs->txs_mbuf; 2215 struct livengood_tcpip_ctxdesc *t; 2216 uint32_t ipcs, tucs, cmd, cmdlen, seg; 2217 uint32_t ipcse; 2218 struct ether_header *eh; 2219 int offset, iphl; 2220 uint8_t fields; 2221 2222 /* 2223 * XXX It would be nice if the mbuf pkthdr had offset 2224 * fields for the protocol headers. 2225 */ 2226 2227 eh = mtod(m0, struct ether_header *); 2228 switch (htons(eh->ether_type)) { 2229 case ETHERTYPE_IP: 2230 case ETHERTYPE_IPV6: 2231 offset = ETHER_HDR_LEN; 2232 break; 2233 2234 case ETHERTYPE_VLAN: 2235 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2236 break; 2237 2238 default: 2239 /* 2240 * Don't support this protocol or encapsulation. 2241 */ 2242 *fieldsp = 0; 2243 *cmdp = 0; 2244 return 0; 2245 } 2246 2247 if ((m0->m_pkthdr.csum_flags & 2248 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) { 2249 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 2250 } else { 2251 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data); 2252 } 2253 ipcse = offset + iphl - 1; 2254 2255 cmd = WTX_CMD_DEXT | WTX_DTYP_D; 2256 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE; 2257 seg = 0; 2258 fields = 0; 2259 2260 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) { 2261 int hlen = offset + iphl; 2262 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 2263 2264 if (__predict_false(m0->m_len < 2265 (hlen + sizeof(struct tcphdr)))) { 2266 /* 2267 * TCP/IP headers are not in the first mbuf; we need 2268 * to do this the slow and painful way. Let's just 2269 * hope this doesn't happen very often. 2270 */ 2271 struct tcphdr th; 2272 2273 WM_EVCNT_INCR(&sc->sc_ev_txtsopain); 2274 2275 m_copydata(m0, hlen, sizeof(th), &th); 2276 if (v4) { 2277 struct ip ip; 2278 2279 m_copydata(m0, offset, sizeof(ip), &ip); 2280 ip.ip_len = 0; 2281 m_copyback(m0, 2282 offset + offsetof(struct ip, ip_len), 2283 sizeof(ip.ip_len), &ip.ip_len); 2284 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 2285 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 2286 } else { 2287 struct ip6_hdr ip6; 2288 2289 m_copydata(m0, offset, sizeof(ip6), &ip6); 2290 ip6.ip6_plen = 0; 2291 m_copyback(m0, 2292 offset + offsetof(struct ip6_hdr, ip6_plen), 2293 sizeof(ip6.ip6_plen), &ip6.ip6_plen); 2294 th.th_sum = in6_cksum_phdr(&ip6.ip6_src, 2295 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP)); 2296 } 2297 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 2298 sizeof(th.th_sum), &th.th_sum); 2299 2300 hlen += th.th_off << 2; 2301 } else { 2302 /* 2303 * TCP/IP headers are in the first mbuf; we can do 2304 * this the easy way. 2305 */ 2306 struct tcphdr *th; 2307 2308 if (v4) { 2309 struct ip *ip = 2310 (void *)(mtod(m0, char *) + offset); 2311 th = (void *)(mtod(m0, char *) + hlen); 2312 2313 ip->ip_len = 0; 2314 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 2315 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 2316 } else { 2317 struct ip6_hdr *ip6 = 2318 (void *)(mtod(m0, char *) + offset); 2319 th = (void *)(mtod(m0, char *) + hlen); 2320 2321 ip6->ip6_plen = 0; 2322 th->th_sum = in6_cksum_phdr(&ip6->ip6_src, 2323 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); 2324 } 2325 hlen += th->th_off << 2; 2326 } 2327 2328 if (v4) { 2329 WM_EVCNT_INCR(&sc->sc_ev_txtso); 2330 cmdlen |= WTX_TCPIP_CMD_IP; 2331 } else { 2332 WM_EVCNT_INCR(&sc->sc_ev_txtso6); 2333 ipcse = 0; 2334 } 2335 cmd |= WTX_TCPIP_CMD_TSE; 2336 cmdlen |= WTX_TCPIP_CMD_TSE | 2337 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen); 2338 seg = WTX_TCPIP_SEG_HDRLEN(hlen) | 2339 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz); 2340 } 2341 2342 /* 2343 * NOTE: Even if we're not using the IP or TCP/UDP checksum 2344 * offload feature, if we load the context descriptor, we 2345 * MUST provide valid values for IPCSS and TUCSS fields. 2346 */ 2347 2348 ipcs = WTX_TCPIP_IPCSS(offset) | 2349 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) | 2350 WTX_TCPIP_IPCSE(ipcse); 2351 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) { 2352 WM_EVCNT_INCR(&sc->sc_ev_txipsum); 2353 fields |= WTX_IXSM; 2354 } 2355 2356 offset += iphl; 2357 2358 if (m0->m_pkthdr.csum_flags & 2359 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) { 2360 WM_EVCNT_INCR(&sc->sc_ev_txtusum); 2361 fields |= WTX_TXSM; 2362 tucs = WTX_TCPIP_TUCSS(offset) | 2363 WTX_TCPIP_TUCSO(offset + 2364 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) | 2365 WTX_TCPIP_TUCSE(0) /* rest of packet */; 2366 } else if ((m0->m_pkthdr.csum_flags & 2367 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) { 2368 WM_EVCNT_INCR(&sc->sc_ev_txtusum6); 2369 fields |= WTX_TXSM; 2370 tucs = WTX_TCPIP_TUCSS(offset) | 2371 WTX_TCPIP_TUCSO(offset + 2372 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) | 2373 WTX_TCPIP_TUCSE(0) /* rest of packet */; 2374 } else { 2375 /* Just initialize it to a valid TCP context. */ 2376 tucs = WTX_TCPIP_TUCSS(offset) | 2377 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) | 2378 WTX_TCPIP_TUCSE(0) /* rest of packet */; 2379 } 2380 2381 /* Fill in the context descriptor. */ 2382 t = (struct livengood_tcpip_ctxdesc *) 2383 &sc->sc_txdescs[sc->sc_txnext]; 2384 t->tcpip_ipcs = htole32(ipcs); 2385 t->tcpip_tucs = htole32(tucs); 2386 t->tcpip_cmdlen = htole32(cmdlen); 2387 t->tcpip_seg = htole32(seg); 2388 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE); 2389 2390 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext); 2391 txs->txs_ndesc++; 2392 2393 *cmdp = cmd; 2394 *fieldsp = fields; 2395 2396 return 0; 2397 } 2398 2399 static void 2400 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0) 2401 { 2402 struct mbuf *m; 2403 int i; 2404 2405 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev)); 2406 for (m = m0, i = 0; m != NULL; m = m->m_next, i++) 2407 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, " 2408 "m_flags = 0x%08x\n", device_xname(sc->sc_dev), 2409 m->m_data, m->m_len, m->m_flags); 2410 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev), 2411 i, i == 1 ? "" : "s"); 2412 } 2413 2414 /* 2415 * wm_82547_txfifo_stall: 2416 * 2417 * Callout used to wait for the 82547 Tx FIFO to drain, 2418 * reset the FIFO pointers, and restart packet transmission. 2419 */ 2420 static void 2421 wm_82547_txfifo_stall(void *arg) 2422 { 2423 struct wm_softc *sc = arg; 2424 int s; 2425 2426 s = splnet(); 2427 2428 if (sc->sc_txfifo_stall) { 2429 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) && 2430 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) && 2431 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) { 2432 /* 2433 * Packets have drained. Stop transmitter, reset 2434 * FIFO pointers, restart transmitter, and kick 2435 * the packet queue. 2436 */ 2437 uint32_t tctl = CSR_READ(sc, WMREG_TCTL); 2438 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN); 2439 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr); 2440 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr); 2441 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr); 2442 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr); 2443 CSR_WRITE(sc, WMREG_TCTL, tctl); 2444 CSR_WRITE_FLUSH(sc); 2445 2446 sc->sc_txfifo_head = 0; 2447 sc->sc_txfifo_stall = 0; 2448 wm_start(&sc->sc_ethercom.ec_if); 2449 } else { 2450 /* 2451 * Still waiting for packets to drain; try again in 2452 * another tick. 2453 */ 2454 callout_schedule(&sc->sc_txfifo_ch, 1); 2455 } 2456 } 2457 2458 splx(s); 2459 } 2460 2461 static void 2462 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on) 2463 { 2464 uint32_t reg; 2465 2466 reg = CSR_READ(sc, WMREG_EXTCNFCTR); 2467 2468 if (on != 0) 2469 reg |= EXTCNFCTR_GATE_PHY_CFG; 2470 else 2471 reg &= ~EXTCNFCTR_GATE_PHY_CFG; 2472 2473 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg); 2474 } 2475 2476 /* 2477 * wm_82547_txfifo_bugchk: 2478 * 2479 * Check for bug condition in the 82547 Tx FIFO. We need to 2480 * prevent enqueueing a packet that would wrap around the end 2481 * if the Tx FIFO ring buffer, otherwise the chip will croak. 2482 * 2483 * We do this by checking the amount of space before the end 2484 * of the Tx FIFO buffer. If the packet will not fit, we "stall" 2485 * the Tx FIFO, wait for all remaining packets to drain, reset 2486 * the internal FIFO pointers to the beginning, and restart 2487 * transmission on the interface. 2488 */ 2489 #define WM_FIFO_HDR 0x10 2490 #define WM_82547_PAD_LEN 0x3e0 2491 static int 2492 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0) 2493 { 2494 int space = sc->sc_txfifo_size - sc->sc_txfifo_head; 2495 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR); 2496 2497 /* Just return if already stalled. */ 2498 if (sc->sc_txfifo_stall) 2499 return 1; 2500 2501 if (sc->sc_mii.mii_media_active & IFM_FDX) { 2502 /* Stall only occurs in half-duplex mode. */ 2503 goto send_packet; 2504 } 2505 2506 if (len >= WM_82547_PAD_LEN + space) { 2507 sc->sc_txfifo_stall = 1; 2508 callout_schedule(&sc->sc_txfifo_ch, 1); 2509 return 1; 2510 } 2511 2512 send_packet: 2513 sc->sc_txfifo_head += len; 2514 if (sc->sc_txfifo_head >= sc->sc_txfifo_size) 2515 sc->sc_txfifo_head -= sc->sc_txfifo_size; 2516 2517 return 0; 2518 } 2519 2520 /* 2521 * wm_start: [ifnet interface function] 2522 * 2523 * Start packet transmission on the interface. 2524 */ 2525 static void 2526 wm_start(struct ifnet *ifp) 2527 { 2528 struct wm_softc *sc = ifp->if_softc; 2529 struct mbuf *m0; 2530 struct m_tag *mtag; 2531 struct wm_txsoft *txs; 2532 bus_dmamap_t dmamap; 2533 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso; 2534 bus_addr_t curaddr; 2535 bus_size_t seglen, curlen; 2536 uint32_t cksumcmd; 2537 uint8_t cksumfields; 2538 2539 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 2540 return; 2541 2542 /* 2543 * Remember the previous number of free descriptors. 2544 */ 2545 ofree = sc->sc_txfree; 2546 2547 /* 2548 * Loop through the send queue, setting up transmit descriptors 2549 * until we drain the queue, or use up all available transmit 2550 * descriptors. 2551 */ 2552 for (;;) { 2553 /* Grab a packet off the queue. */ 2554 IFQ_POLL(&ifp->if_snd, m0); 2555 if (m0 == NULL) 2556 break; 2557 2558 DPRINTF(WM_DEBUG_TX, 2559 ("%s: TX: have packet to transmit: %p\n", 2560 device_xname(sc->sc_dev), m0)); 2561 2562 /* Get a work queue entry. */ 2563 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) { 2564 wm_txintr(sc); 2565 if (sc->sc_txsfree == 0) { 2566 DPRINTF(WM_DEBUG_TX, 2567 ("%s: TX: no free job descriptors\n", 2568 device_xname(sc->sc_dev))); 2569 WM_EVCNT_INCR(&sc->sc_ev_txsstall); 2570 break; 2571 } 2572 } 2573 2574 txs = &sc->sc_txsoft[sc->sc_txsnext]; 2575 dmamap = txs->txs_dmamap; 2576 2577 use_tso = (m0->m_pkthdr.csum_flags & 2578 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0; 2579 2580 /* 2581 * So says the Linux driver: 2582 * The controller does a simple calculation to make sure 2583 * there is enough room in the FIFO before initiating the 2584 * DMA for each buffer. The calc is: 2585 * 4 = ceil(buffer len / MSS) 2586 * To make sure we don't overrun the FIFO, adjust the max 2587 * buffer len if the MSS drops. 2588 */ 2589 dmamap->dm_maxsegsz = 2590 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN) 2591 ? m0->m_pkthdr.segsz << 2 2592 : WTX_MAX_LEN; 2593 2594 /* 2595 * Load the DMA map. If this fails, the packet either 2596 * didn't fit in the allotted number of segments, or we 2597 * were short on resources. For the too-many-segments 2598 * case, we simply report an error and drop the packet, 2599 * since we can't sanely copy a jumbo packet to a single 2600 * buffer. 2601 */ 2602 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 2603 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 2604 if (error) { 2605 if (error == EFBIG) { 2606 WM_EVCNT_INCR(&sc->sc_ev_txdrop); 2607 log(LOG_ERR, "%s: Tx packet consumes too many " 2608 "DMA segments, dropping...\n", 2609 device_xname(sc->sc_dev)); 2610 IFQ_DEQUEUE(&ifp->if_snd, m0); 2611 wm_dump_mbuf_chain(sc, m0); 2612 m_freem(m0); 2613 continue; 2614 } 2615 /* 2616 * Short on resources, just stop for now. 2617 */ 2618 DPRINTF(WM_DEBUG_TX, 2619 ("%s: TX: dmamap load failed: %d\n", 2620 device_xname(sc->sc_dev), error)); 2621 break; 2622 } 2623 2624 segs_needed = dmamap->dm_nsegs; 2625 if (use_tso) { 2626 /* For sentinel descriptor; see below. */ 2627 segs_needed++; 2628 } 2629 2630 /* 2631 * Ensure we have enough descriptors free to describe 2632 * the packet. Note, we always reserve one descriptor 2633 * at the end of the ring due to the semantics of the 2634 * TDT register, plus one more in the event we need 2635 * to load offload context. 2636 */ 2637 if (segs_needed > sc->sc_txfree - 2) { 2638 /* 2639 * Not enough free descriptors to transmit this 2640 * packet. We haven't committed anything yet, 2641 * so just unload the DMA map, put the packet 2642 * pack on the queue, and punt. Notify the upper 2643 * layer that there are no more slots left. 2644 */ 2645 DPRINTF(WM_DEBUG_TX, 2646 ("%s: TX: need %d (%d) descriptors, have %d\n", 2647 device_xname(sc->sc_dev), dmamap->dm_nsegs, 2648 segs_needed, sc->sc_txfree - 1)); 2649 ifp->if_flags |= IFF_OACTIVE; 2650 bus_dmamap_unload(sc->sc_dmat, dmamap); 2651 WM_EVCNT_INCR(&sc->sc_ev_txdstall); 2652 break; 2653 } 2654 2655 /* 2656 * Check for 82547 Tx FIFO bug. We need to do this 2657 * once we know we can transmit the packet, since we 2658 * do some internal FIFO space accounting here. 2659 */ 2660 if (sc->sc_type == WM_T_82547 && 2661 wm_82547_txfifo_bugchk(sc, m0)) { 2662 DPRINTF(WM_DEBUG_TX, 2663 ("%s: TX: 82547 Tx FIFO bug detected\n", 2664 device_xname(sc->sc_dev))); 2665 ifp->if_flags |= IFF_OACTIVE; 2666 bus_dmamap_unload(sc->sc_dmat, dmamap); 2667 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall); 2668 break; 2669 } 2670 2671 IFQ_DEQUEUE(&ifp->if_snd, m0); 2672 2673 /* 2674 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 2675 */ 2676 2677 DPRINTF(WM_DEBUG_TX, 2678 ("%s: TX: packet has %d (%d) DMA segments\n", 2679 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed)); 2680 2681 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]); 2682 2683 /* 2684 * Store a pointer to the packet so that we can free it 2685 * later. 2686 * 2687 * Initially, we consider the number of descriptors the 2688 * packet uses the number of DMA segments. This may be 2689 * incremented by 1 if we do checksum offload (a descriptor 2690 * is used to set the checksum context). 2691 */ 2692 txs->txs_mbuf = m0; 2693 txs->txs_firstdesc = sc->sc_txnext; 2694 txs->txs_ndesc = segs_needed; 2695 2696 /* Set up offload parameters for this packet. */ 2697 if (m0->m_pkthdr.csum_flags & 2698 (M_CSUM_TSOv4|M_CSUM_TSOv6| 2699 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4| 2700 M_CSUM_TCPv6|M_CSUM_UDPv6)) { 2701 if (wm_tx_offload(sc, txs, &cksumcmd, 2702 &cksumfields) != 0) { 2703 /* Error message already displayed. */ 2704 bus_dmamap_unload(sc->sc_dmat, dmamap); 2705 continue; 2706 } 2707 } else { 2708 cksumcmd = 0; 2709 cksumfields = 0; 2710 } 2711 2712 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS; 2713 2714 /* Sync the DMA map. */ 2715 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 2716 BUS_DMASYNC_PREWRITE); 2717 2718 /* 2719 * Initialize the transmit descriptor. 2720 */ 2721 for (nexttx = sc->sc_txnext, seg = 0; 2722 seg < dmamap->dm_nsegs; seg++) { 2723 for (seglen = dmamap->dm_segs[seg].ds_len, 2724 curaddr = dmamap->dm_segs[seg].ds_addr; 2725 seglen != 0; 2726 curaddr += curlen, seglen -= curlen, 2727 nexttx = WM_NEXTTX(sc, nexttx)) { 2728 curlen = seglen; 2729 2730 /* 2731 * So says the Linux driver: 2732 * Work around for premature descriptor 2733 * write-backs in TSO mode. Append a 2734 * 4-byte sentinel descriptor. 2735 */ 2736 if (use_tso && 2737 seg == dmamap->dm_nsegs - 1 && 2738 curlen > 8) 2739 curlen -= 4; 2740 2741 wm_set_dma_addr( 2742 &sc->sc_txdescs[nexttx].wtx_addr, 2743 curaddr); 2744 sc->sc_txdescs[nexttx].wtx_cmdlen = 2745 htole32(cksumcmd | curlen); 2746 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 2747 0; 2748 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 2749 cksumfields; 2750 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0; 2751 lasttx = nexttx; 2752 2753 DPRINTF(WM_DEBUG_TX, 2754 ("%s: TX: desc %d: low %#" PRIx64 ", " 2755 "len %#04zx\n", 2756 device_xname(sc->sc_dev), nexttx, 2757 (uint64_t)curaddr, curlen)); 2758 } 2759 } 2760 2761 KASSERT(lasttx != -1); 2762 2763 /* 2764 * Set up the command byte on the last descriptor of 2765 * the packet. If we're in the interrupt delay window, 2766 * delay the interrupt. 2767 */ 2768 sc->sc_txdescs[lasttx].wtx_cmdlen |= 2769 htole32(WTX_CMD_EOP | WTX_CMD_RS); 2770 2771 /* 2772 * If VLANs are enabled and the packet has a VLAN tag, set 2773 * up the descriptor to encapsulate the packet for us. 2774 * 2775 * This is only valid on the last descriptor of the packet. 2776 */ 2777 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) { 2778 sc->sc_txdescs[lasttx].wtx_cmdlen |= 2779 htole32(WTX_CMD_VLE); 2780 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan 2781 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff); 2782 } 2783 2784 txs->txs_lastdesc = lasttx; 2785 2786 DPRINTF(WM_DEBUG_TX, 2787 ("%s: TX: desc %d: cmdlen 0x%08x\n", 2788 device_xname(sc->sc_dev), 2789 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen))); 2790 2791 /* Sync the descriptors we're using. */ 2792 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc, 2793 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2794 2795 /* Give the packet to the chip. */ 2796 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx); 2797 2798 DPRINTF(WM_DEBUG_TX, 2799 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx)); 2800 2801 DPRINTF(WM_DEBUG_TX, 2802 ("%s: TX: finished transmitting packet, job %d\n", 2803 device_xname(sc->sc_dev), sc->sc_txsnext)); 2804 2805 /* Advance the tx pointer. */ 2806 sc->sc_txfree -= txs->txs_ndesc; 2807 sc->sc_txnext = nexttx; 2808 2809 sc->sc_txsfree--; 2810 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext); 2811 2812 /* Pass the packet to any BPF listeners. */ 2813 bpf_mtap(ifp, m0); 2814 } 2815 2816 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) { 2817 /* No more slots; notify upper layer. */ 2818 ifp->if_flags |= IFF_OACTIVE; 2819 } 2820 2821 if (sc->sc_txfree != ofree) { 2822 /* Set a watchdog timer in case the chip flakes out. */ 2823 ifp->if_timer = 5; 2824 } 2825 } 2826 2827 /* 2828 * wm_nq_tx_offload: 2829 * 2830 * Set up TCP/IP checksumming parameters for the 2831 * specified packet, for NEWQUEUE devices 2832 */ 2833 static int 2834 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, 2835 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum) 2836 { 2837 struct mbuf *m0 = txs->txs_mbuf; 2838 struct m_tag *mtag; 2839 uint32_t vl_len, mssidx, cmdc; 2840 struct ether_header *eh; 2841 int offset, iphl; 2842 2843 /* 2844 * XXX It would be nice if the mbuf pkthdr had offset 2845 * fields for the protocol headers. 2846 */ 2847 *cmdlenp = 0; 2848 *fieldsp = 0; 2849 2850 eh = mtod(m0, struct ether_header *); 2851 switch (htons(eh->ether_type)) { 2852 case ETHERTYPE_IP: 2853 case ETHERTYPE_IPV6: 2854 offset = ETHER_HDR_LEN; 2855 break; 2856 2857 case ETHERTYPE_VLAN: 2858 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2859 break; 2860 2861 default: 2862 /* 2863 * Don't support this protocol or encapsulation. 2864 */ 2865 *do_csum = false; 2866 return 0; 2867 } 2868 *do_csum = true; 2869 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS; 2870 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT; 2871 2872 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT); 2873 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0); 2874 2875 if ((m0->m_pkthdr.csum_flags & 2876 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) { 2877 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 2878 } else { 2879 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data); 2880 } 2881 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT); 2882 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0); 2883 2884 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) { 2885 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK) 2886 << NQTXC_VLLEN_VLAN_SHIFT); 2887 *cmdlenp |= NQTX_CMD_VLE; 2888 } 2889 2890 mssidx = 0; 2891 2892 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) { 2893 int hlen = offset + iphl; 2894 int tcp_hlen; 2895 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 2896 2897 if (__predict_false(m0->m_len < 2898 (hlen + sizeof(struct tcphdr)))) { 2899 /* 2900 * TCP/IP headers are not in the first mbuf; we need 2901 * to do this the slow and painful way. Let's just 2902 * hope this doesn't happen very often. 2903 */ 2904 struct tcphdr th; 2905 2906 WM_EVCNT_INCR(&sc->sc_ev_txtsopain); 2907 2908 m_copydata(m0, hlen, sizeof(th), &th); 2909 if (v4) { 2910 struct ip ip; 2911 2912 m_copydata(m0, offset, sizeof(ip), &ip); 2913 ip.ip_len = 0; 2914 m_copyback(m0, 2915 offset + offsetof(struct ip, ip_len), 2916 sizeof(ip.ip_len), &ip.ip_len); 2917 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 2918 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 2919 } else { 2920 struct ip6_hdr ip6; 2921 2922 m_copydata(m0, offset, sizeof(ip6), &ip6); 2923 ip6.ip6_plen = 0; 2924 m_copyback(m0, 2925 offset + offsetof(struct ip6_hdr, ip6_plen), 2926 sizeof(ip6.ip6_plen), &ip6.ip6_plen); 2927 th.th_sum = in6_cksum_phdr(&ip6.ip6_src, 2928 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP)); 2929 } 2930 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 2931 sizeof(th.th_sum), &th.th_sum); 2932 2933 tcp_hlen = th.th_off << 2; 2934 } else { 2935 /* 2936 * TCP/IP headers are in the first mbuf; we can do 2937 * this the easy way. 2938 */ 2939 struct tcphdr *th; 2940 2941 if (v4) { 2942 struct ip *ip = 2943 (void *)(mtod(m0, char *) + offset); 2944 th = (void *)(mtod(m0, char *) + hlen); 2945 2946 ip->ip_len = 0; 2947 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 2948 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 2949 } else { 2950 struct ip6_hdr *ip6 = 2951 (void *)(mtod(m0, char *) + offset); 2952 th = (void *)(mtod(m0, char *) + hlen); 2953 2954 ip6->ip6_plen = 0; 2955 th->th_sum = in6_cksum_phdr(&ip6->ip6_src, 2956 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); 2957 } 2958 tcp_hlen = th->th_off << 2; 2959 } 2960 hlen += tcp_hlen; 2961 *cmdlenp |= NQTX_CMD_TSE; 2962 2963 if (v4) { 2964 WM_EVCNT_INCR(&sc->sc_ev_txtso); 2965 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM; 2966 } else { 2967 WM_EVCNT_INCR(&sc->sc_ev_txtso6); 2968 *fieldsp |= NQTXD_FIELDS_TUXSM; 2969 } 2970 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT); 2971 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0); 2972 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT); 2973 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0); 2974 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT); 2975 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0); 2976 } else { 2977 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT); 2978 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0); 2979 } 2980 2981 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) { 2982 *fieldsp |= NQTXD_FIELDS_IXSM; 2983 cmdc |= NQTXC_CMD_IP4; 2984 } 2985 2986 if (m0->m_pkthdr.csum_flags & 2987 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) { 2988 WM_EVCNT_INCR(&sc->sc_ev_txtusum); 2989 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) { 2990 cmdc |= NQTXC_CMD_TCP; 2991 } else { 2992 cmdc |= NQTXC_CMD_UDP; 2993 } 2994 cmdc |= NQTXC_CMD_IP4; 2995 *fieldsp |= NQTXD_FIELDS_TUXSM; 2996 } 2997 if (m0->m_pkthdr.csum_flags & 2998 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) { 2999 WM_EVCNT_INCR(&sc->sc_ev_txtusum6); 3000 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) { 3001 cmdc |= NQTXC_CMD_TCP; 3002 } else { 3003 cmdc |= NQTXC_CMD_UDP; 3004 } 3005 cmdc |= NQTXC_CMD_IP6; 3006 *fieldsp |= NQTXD_FIELDS_TUXSM; 3007 } 3008 3009 /* Fill in the context descriptor. */ 3010 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len = 3011 htole32(vl_len); 3012 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0; 3013 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd = 3014 htole32(cmdc); 3015 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx = 3016 htole32(mssidx); 3017 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE); 3018 DPRINTF(WM_DEBUG_TX, 3019 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev), 3020 sc->sc_txnext, 0, vl_len)); 3021 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc)); 3022 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext); 3023 txs->txs_ndesc++; 3024 return 0; 3025 } 3026 3027 /* 3028 * wm_nq_start: [ifnet interface function] 3029 * 3030 * Start packet transmission on the interface for NEWQUEUE devices 3031 */ 3032 static void 3033 wm_nq_start(struct ifnet *ifp) 3034 { 3035 struct wm_softc *sc = ifp->if_softc; 3036 struct mbuf *m0; 3037 struct m_tag *mtag; 3038 struct wm_txsoft *txs; 3039 bus_dmamap_t dmamap; 3040 int error, nexttx, lasttx = -1, seg, segs_needed; 3041 bool do_csum, sent; 3042 3043 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 3044 return; 3045 3046 sent = false; 3047 3048 /* 3049 * Loop through the send queue, setting up transmit descriptors 3050 * until we drain the queue, or use up all available transmit 3051 * descriptors. 3052 */ 3053 for (;;) { 3054 /* Grab a packet off the queue. */ 3055 IFQ_POLL(&ifp->if_snd, m0); 3056 if (m0 == NULL) 3057 break; 3058 3059 DPRINTF(WM_DEBUG_TX, 3060 ("%s: TX: have packet to transmit: %p\n", 3061 device_xname(sc->sc_dev), m0)); 3062 3063 /* Get a work queue entry. */ 3064 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) { 3065 wm_txintr(sc); 3066 if (sc->sc_txsfree == 0) { 3067 DPRINTF(WM_DEBUG_TX, 3068 ("%s: TX: no free job descriptors\n", 3069 device_xname(sc->sc_dev))); 3070 WM_EVCNT_INCR(&sc->sc_ev_txsstall); 3071 break; 3072 } 3073 } 3074 3075 txs = &sc->sc_txsoft[sc->sc_txsnext]; 3076 dmamap = txs->txs_dmamap; 3077 3078 /* 3079 * Load the DMA map. If this fails, the packet either 3080 * didn't fit in the allotted number of segments, or we 3081 * were short on resources. For the too-many-segments 3082 * case, we simply report an error and drop the packet, 3083 * since we can't sanely copy a jumbo packet to a single 3084 * buffer. 3085 */ 3086 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 3087 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 3088 if (error) { 3089 if (error == EFBIG) { 3090 WM_EVCNT_INCR(&sc->sc_ev_txdrop); 3091 log(LOG_ERR, "%s: Tx packet consumes too many " 3092 "DMA segments, dropping...\n", 3093 device_xname(sc->sc_dev)); 3094 IFQ_DEQUEUE(&ifp->if_snd, m0); 3095 wm_dump_mbuf_chain(sc, m0); 3096 m_freem(m0); 3097 continue; 3098 } 3099 /* 3100 * Short on resources, just stop for now. 3101 */ 3102 DPRINTF(WM_DEBUG_TX, 3103 ("%s: TX: dmamap load failed: %d\n", 3104 device_xname(sc->sc_dev), error)); 3105 break; 3106 } 3107 3108 segs_needed = dmamap->dm_nsegs; 3109 3110 /* 3111 * Ensure we have enough descriptors free to describe 3112 * the packet. Note, we always reserve one descriptor 3113 * at the end of the ring due to the semantics of the 3114 * TDT register, plus one more in the event we need 3115 * to load offload context. 3116 */ 3117 if (segs_needed > sc->sc_txfree - 2) { 3118 /* 3119 * Not enough free descriptors to transmit this 3120 * packet. We haven't committed anything yet, 3121 * so just unload the DMA map, put the packet 3122 * pack on the queue, and punt. Notify the upper 3123 * layer that there are no more slots left. 3124 */ 3125 DPRINTF(WM_DEBUG_TX, 3126 ("%s: TX: need %d (%d) descriptors, have %d\n", 3127 device_xname(sc->sc_dev), dmamap->dm_nsegs, 3128 segs_needed, sc->sc_txfree - 1)); 3129 ifp->if_flags |= IFF_OACTIVE; 3130 bus_dmamap_unload(sc->sc_dmat, dmamap); 3131 WM_EVCNT_INCR(&sc->sc_ev_txdstall); 3132 break; 3133 } 3134 3135 IFQ_DEQUEUE(&ifp->if_snd, m0); 3136 3137 /* 3138 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 3139 */ 3140 3141 DPRINTF(WM_DEBUG_TX, 3142 ("%s: TX: packet has %d (%d) DMA segments\n", 3143 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed)); 3144 3145 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]); 3146 3147 /* 3148 * Store a pointer to the packet so that we can free it 3149 * later. 3150 * 3151 * Initially, we consider the number of descriptors the 3152 * packet uses the number of DMA segments. This may be 3153 * incremented by 1 if we do checksum offload (a descriptor 3154 * is used to set the checksum context). 3155 */ 3156 txs->txs_mbuf = m0; 3157 txs->txs_firstdesc = sc->sc_txnext; 3158 txs->txs_ndesc = segs_needed; 3159 3160 /* Set up offload parameters for this packet. */ 3161 uint32_t cmdlen, fields, dcmdlen; 3162 if (m0->m_pkthdr.csum_flags & 3163 (M_CSUM_TSOv4|M_CSUM_TSOv6| 3164 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4| 3165 M_CSUM_TCPv6|M_CSUM_UDPv6)) { 3166 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields, 3167 &do_csum) != 0) { 3168 /* Error message already displayed. */ 3169 bus_dmamap_unload(sc->sc_dmat, dmamap); 3170 continue; 3171 } 3172 } else { 3173 do_csum = false; 3174 cmdlen = 0; 3175 fields = 0; 3176 } 3177 3178 /* Sync the DMA map. */ 3179 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 3180 BUS_DMASYNC_PREWRITE); 3181 3182 /* 3183 * Initialize the first transmit descriptor. 3184 */ 3185 nexttx = sc->sc_txnext; 3186 if (!do_csum) { 3187 /* setup a legacy descriptor */ 3188 wm_set_dma_addr( 3189 &sc->sc_txdescs[nexttx].wtx_addr, 3190 dmamap->dm_segs[0].ds_addr); 3191 sc->sc_txdescs[nexttx].wtx_cmdlen = 3192 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len); 3193 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0; 3194 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0; 3195 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != 3196 NULL) { 3197 sc->sc_txdescs[nexttx].wtx_cmdlen |= 3198 htole32(WTX_CMD_VLE); 3199 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 3200 htole16(VLAN_TAG_VALUE(mtag) & 0xffff); 3201 } else { 3202 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0; 3203 } 3204 dcmdlen = 0; 3205 } else { 3206 /* setup an advanced data descriptor */ 3207 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr = 3208 htole64(dmamap->dm_segs[0].ds_addr); 3209 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0); 3210 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen = 3211 htole32(dmamap->dm_segs[0].ds_len | cmdlen ); 3212 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 3213 htole32(fields); 3214 DPRINTF(WM_DEBUG_TX, 3215 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n", 3216 device_xname(sc->sc_dev), nexttx, 3217 (uint64_t)dmamap->dm_segs[0].ds_addr)); 3218 DPRINTF(WM_DEBUG_TX, 3219 ("\t 0x%08x%08x\n", fields, 3220 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen)); 3221 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT; 3222 } 3223 3224 lasttx = nexttx; 3225 nexttx = WM_NEXTTX(sc, nexttx); 3226 /* 3227 * fill in the next descriptors. legacy or adcanced format 3228 * is the same here 3229 */ 3230 for (seg = 1; seg < dmamap->dm_nsegs; 3231 seg++, nexttx = WM_NEXTTX(sc, nexttx)) { 3232 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr = 3233 htole64(dmamap->dm_segs[seg].ds_addr); 3234 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen = 3235 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len); 3236 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0); 3237 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0; 3238 lasttx = nexttx; 3239 3240 DPRINTF(WM_DEBUG_TX, 3241 ("%s: TX: desc %d: %#" PRIx64 ", " 3242 "len %#04zx\n", 3243 device_xname(sc->sc_dev), nexttx, 3244 (uint64_t)dmamap->dm_segs[seg].ds_addr, 3245 dmamap->dm_segs[seg].ds_len)); 3246 } 3247 3248 KASSERT(lasttx != -1); 3249 3250 /* 3251 * Set up the command byte on the last descriptor of 3252 * the packet. If we're in the interrupt delay window, 3253 * delay the interrupt. 3254 */ 3255 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) == 3256 (NQTX_CMD_EOP | NQTX_CMD_RS)); 3257 sc->sc_txdescs[lasttx].wtx_cmdlen |= 3258 htole32(WTX_CMD_EOP | WTX_CMD_RS); 3259 3260 txs->txs_lastdesc = lasttx; 3261 3262 DPRINTF(WM_DEBUG_TX, 3263 ("%s: TX: desc %d: cmdlen 0x%08x\n", 3264 device_xname(sc->sc_dev), 3265 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen))); 3266 3267 /* Sync the descriptors we're using. */ 3268 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc, 3269 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3270 3271 /* Give the packet to the chip. */ 3272 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx); 3273 sent = true; 3274 3275 DPRINTF(WM_DEBUG_TX, 3276 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx)); 3277 3278 DPRINTF(WM_DEBUG_TX, 3279 ("%s: TX: finished transmitting packet, job %d\n", 3280 device_xname(sc->sc_dev), sc->sc_txsnext)); 3281 3282 /* Advance the tx pointer. */ 3283 sc->sc_txfree -= txs->txs_ndesc; 3284 sc->sc_txnext = nexttx; 3285 3286 sc->sc_txsfree--; 3287 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext); 3288 3289 /* Pass the packet to any BPF listeners. */ 3290 bpf_mtap(ifp, m0); 3291 } 3292 3293 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) { 3294 /* No more slots; notify upper layer. */ 3295 ifp->if_flags |= IFF_OACTIVE; 3296 } 3297 3298 if (sent) { 3299 /* Set a watchdog timer in case the chip flakes out. */ 3300 ifp->if_timer = 5; 3301 } 3302 } 3303 3304 /* 3305 * wm_watchdog: [ifnet interface function] 3306 * 3307 * Watchdog timer handler. 3308 */ 3309 static void 3310 wm_watchdog(struct ifnet *ifp) 3311 { 3312 struct wm_softc *sc = ifp->if_softc; 3313 3314 /* 3315 * Since we're using delayed interrupts, sweep up 3316 * before we report an error. 3317 */ 3318 wm_txintr(sc); 3319 3320 if (sc->sc_txfree != WM_NTXDESC(sc)) { 3321 #ifdef WM_DEBUG 3322 int i, j; 3323 struct wm_txsoft *txs; 3324 #endif 3325 log(LOG_ERR, 3326 "%s: device timeout (txfree %d txsfree %d txnext %d)\n", 3327 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree, 3328 sc->sc_txnext); 3329 ifp->if_oerrors++; 3330 #ifdef WM_DEBUG 3331 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ; 3332 i = WM_NEXTTXS(sc, i)) { 3333 txs = &sc->sc_txsoft[i]; 3334 printf("txs %d tx %d -> %d\n", 3335 i, txs->txs_firstdesc, txs->txs_lastdesc); 3336 for (j = txs->txs_firstdesc; ; 3337 j = WM_NEXTTX(sc, j)) { 3338 printf("\tdesc %d: 0x%" PRIx64 "\n", j, 3339 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr); 3340 printf("\t %#08x%08x\n", 3341 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields, 3342 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen); 3343 if (j == txs->txs_lastdesc) 3344 break; 3345 } 3346 } 3347 #endif 3348 /* Reset the interface. */ 3349 (void) wm_init(ifp); 3350 } 3351 3352 /* Try to get more packets going. */ 3353 ifp->if_start(ifp); 3354 } 3355 3356 static int 3357 wm_ifflags_cb(struct ethercom *ec) 3358 { 3359 struct ifnet *ifp = &ec->ec_if; 3360 struct wm_softc *sc = ifp->if_softc; 3361 int change = ifp->if_flags ^ sc->sc_if_flags; 3362 3363 if (change != 0) 3364 sc->sc_if_flags = ifp->if_flags; 3365 3366 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) 3367 return ENETRESET; 3368 3369 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 3370 wm_set_filter(sc); 3371 3372 wm_set_vlan(sc); 3373 3374 return 0; 3375 } 3376 3377 /* 3378 * wm_ioctl: [ifnet interface function] 3379 * 3380 * Handle control requests from the operator. 3381 */ 3382 static int 3383 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data) 3384 { 3385 struct wm_softc *sc = ifp->if_softc; 3386 struct ifreq *ifr = (struct ifreq *) data; 3387 struct ifaddr *ifa = (struct ifaddr *)data; 3388 struct sockaddr_dl *sdl; 3389 int s, error; 3390 3391 s = splnet(); 3392 3393 switch (cmd) { 3394 case SIOCSIFMEDIA: 3395 case SIOCGIFMEDIA: 3396 /* Flow control requires full-duplex mode. */ 3397 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 3398 (ifr->ifr_media & IFM_FDX) == 0) 3399 ifr->ifr_media &= ~IFM_ETH_FMASK; 3400 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 3401 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 3402 /* We can do both TXPAUSE and RXPAUSE. */ 3403 ifr->ifr_media |= 3404 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 3405 } 3406 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 3407 } 3408 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 3409 break; 3410 case SIOCINITIFADDR: 3411 if (ifa->ifa_addr->sa_family == AF_LINK) { 3412 sdl = satosdl(ifp->if_dl->ifa_addr); 3413 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len, 3414 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen); 3415 /* unicast address is first multicast entry */ 3416 wm_set_filter(sc); 3417 error = 0; 3418 break; 3419 } 3420 /*FALLTHROUGH*/ 3421 default: 3422 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) 3423 break; 3424 3425 error = 0; 3426 3427 if (cmd == SIOCSIFCAP) 3428 error = (*ifp->if_init)(ifp); 3429 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 3430 ; 3431 else if (ifp->if_flags & IFF_RUNNING) { 3432 /* 3433 * Multicast list has changed; set the hardware filter 3434 * accordingly. 3435 */ 3436 wm_set_filter(sc); 3437 } 3438 break; 3439 } 3440 3441 /* Try to get more packets going. */ 3442 ifp->if_start(ifp); 3443 3444 splx(s); 3445 return error; 3446 } 3447 3448 /* 3449 * wm_intr: 3450 * 3451 * Interrupt service routine. 3452 */ 3453 static int 3454 wm_intr(void *arg) 3455 { 3456 struct wm_softc *sc = arg; 3457 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3458 uint32_t icr; 3459 int handled = 0; 3460 3461 while (1 /* CONSTCOND */) { 3462 icr = CSR_READ(sc, WMREG_ICR); 3463 if ((icr & sc->sc_icr) == 0) 3464 break; 3465 rnd_add_uint32(&sc->rnd_source, icr); 3466 3467 handled = 1; 3468 3469 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 3470 if (icr & (ICR_RXDMT0|ICR_RXT0)) { 3471 DPRINTF(WM_DEBUG_RX, 3472 ("%s: RX: got Rx intr 0x%08x\n", 3473 device_xname(sc->sc_dev), 3474 icr & (ICR_RXDMT0|ICR_RXT0))); 3475 WM_EVCNT_INCR(&sc->sc_ev_rxintr); 3476 } 3477 #endif 3478 wm_rxintr(sc); 3479 3480 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 3481 if (icr & ICR_TXDW) { 3482 DPRINTF(WM_DEBUG_TX, 3483 ("%s: TX: got TXDW interrupt\n", 3484 device_xname(sc->sc_dev))); 3485 WM_EVCNT_INCR(&sc->sc_ev_txdw); 3486 } 3487 #endif 3488 wm_txintr(sc); 3489 3490 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) { 3491 WM_EVCNT_INCR(&sc->sc_ev_linkintr); 3492 wm_linkintr(sc, icr); 3493 } 3494 3495 if (icr & ICR_RXO) { 3496 #if defined(WM_DEBUG) 3497 log(LOG_WARNING, "%s: Receive overrun\n", 3498 device_xname(sc->sc_dev)); 3499 #endif /* defined(WM_DEBUG) */ 3500 } 3501 } 3502 3503 if (handled) { 3504 /* Try to get more packets going. */ 3505 ifp->if_start(ifp); 3506 } 3507 3508 return handled; 3509 } 3510 3511 /* 3512 * wm_txintr: 3513 * 3514 * Helper; handle transmit interrupts. 3515 */ 3516 static void 3517 wm_txintr(struct wm_softc *sc) 3518 { 3519 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3520 struct wm_txsoft *txs; 3521 uint8_t status; 3522 int i; 3523 3524 ifp->if_flags &= ~IFF_OACTIVE; 3525 3526 /* 3527 * Go through the Tx list and free mbufs for those 3528 * frames which have been transmitted. 3529 */ 3530 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc); 3531 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) { 3532 txs = &sc->sc_txsoft[i]; 3533 3534 DPRINTF(WM_DEBUG_TX, 3535 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i)); 3536 3537 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc, 3538 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3539 3540 status = 3541 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status; 3542 if ((status & WTX_ST_DD) == 0) { 3543 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1, 3544 BUS_DMASYNC_PREREAD); 3545 break; 3546 } 3547 3548 DPRINTF(WM_DEBUG_TX, 3549 ("%s: TX: job %d done: descs %d..%d\n", 3550 device_xname(sc->sc_dev), i, txs->txs_firstdesc, 3551 txs->txs_lastdesc)); 3552 3553 /* 3554 * XXX We should probably be using the statistics 3555 * XXX registers, but I don't know if they exist 3556 * XXX on chips before the i82544. 3557 */ 3558 3559 #ifdef WM_EVENT_COUNTERS 3560 if (status & WTX_ST_TU) 3561 WM_EVCNT_INCR(&sc->sc_ev_tu); 3562 #endif /* WM_EVENT_COUNTERS */ 3563 3564 if (status & (WTX_ST_EC|WTX_ST_LC)) { 3565 ifp->if_oerrors++; 3566 if (status & WTX_ST_LC) 3567 log(LOG_WARNING, "%s: late collision\n", 3568 device_xname(sc->sc_dev)); 3569 else if (status & WTX_ST_EC) { 3570 ifp->if_collisions += 16; 3571 log(LOG_WARNING, "%s: excessive collisions\n", 3572 device_xname(sc->sc_dev)); 3573 } 3574 } else 3575 ifp->if_opackets++; 3576 3577 sc->sc_txfree += txs->txs_ndesc; 3578 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 3579 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 3580 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 3581 m_freem(txs->txs_mbuf); 3582 txs->txs_mbuf = NULL; 3583 } 3584 3585 /* Update the dirty transmit buffer pointer. */ 3586 sc->sc_txsdirty = i; 3587 DPRINTF(WM_DEBUG_TX, 3588 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i)); 3589 3590 /* 3591 * If there are no more pending transmissions, cancel the watchdog 3592 * timer. 3593 */ 3594 if (sc->sc_txsfree == WM_TXQUEUELEN(sc)) 3595 ifp->if_timer = 0; 3596 } 3597 3598 /* 3599 * wm_rxintr: 3600 * 3601 * Helper; handle receive interrupts. 3602 */ 3603 static void 3604 wm_rxintr(struct wm_softc *sc) 3605 { 3606 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3607 struct wm_rxsoft *rxs; 3608 struct mbuf *m; 3609 int i, len; 3610 uint8_t status, errors; 3611 uint16_t vlantag; 3612 3613 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) { 3614 rxs = &sc->sc_rxsoft[i]; 3615 3616 DPRINTF(WM_DEBUG_RX, 3617 ("%s: RX: checking descriptor %d\n", 3618 device_xname(sc->sc_dev), i)); 3619 3620 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3621 3622 status = sc->sc_rxdescs[i].wrx_status; 3623 errors = sc->sc_rxdescs[i].wrx_errors; 3624 len = le16toh(sc->sc_rxdescs[i].wrx_len); 3625 vlantag = sc->sc_rxdescs[i].wrx_special; 3626 3627 if ((status & WRX_ST_DD) == 0) { 3628 /* 3629 * We have processed all of the receive descriptors. 3630 */ 3631 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD); 3632 break; 3633 } 3634 3635 if (__predict_false(sc->sc_rxdiscard)) { 3636 DPRINTF(WM_DEBUG_RX, 3637 ("%s: RX: discarding contents of descriptor %d\n", 3638 device_xname(sc->sc_dev), i)); 3639 WM_INIT_RXDESC(sc, i); 3640 if (status & WRX_ST_EOP) { 3641 /* Reset our state. */ 3642 DPRINTF(WM_DEBUG_RX, 3643 ("%s: RX: resetting rxdiscard -> 0\n", 3644 device_xname(sc->sc_dev))); 3645 sc->sc_rxdiscard = 0; 3646 } 3647 continue; 3648 } 3649 3650 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 3651 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 3652 3653 m = rxs->rxs_mbuf; 3654 3655 /* 3656 * Add a new receive buffer to the ring, unless of 3657 * course the length is zero. Treat the latter as a 3658 * failed mapping. 3659 */ 3660 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) { 3661 /* 3662 * Failed, throw away what we've done so 3663 * far, and discard the rest of the packet. 3664 */ 3665 ifp->if_ierrors++; 3666 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 3667 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 3668 WM_INIT_RXDESC(sc, i); 3669 if ((status & WRX_ST_EOP) == 0) 3670 sc->sc_rxdiscard = 1; 3671 if (sc->sc_rxhead != NULL) 3672 m_freem(sc->sc_rxhead); 3673 WM_RXCHAIN_RESET(sc); 3674 DPRINTF(WM_DEBUG_RX, 3675 ("%s: RX: Rx buffer allocation failed, " 3676 "dropping packet%s\n", device_xname(sc->sc_dev), 3677 sc->sc_rxdiscard ? " (discard)" : "")); 3678 continue; 3679 } 3680 3681 m->m_len = len; 3682 sc->sc_rxlen += len; 3683 DPRINTF(WM_DEBUG_RX, 3684 ("%s: RX: buffer at %p len %d\n", 3685 device_xname(sc->sc_dev), m->m_data, len)); 3686 3687 /* 3688 * If this is not the end of the packet, keep 3689 * looking. 3690 */ 3691 if ((status & WRX_ST_EOP) == 0) { 3692 WM_RXCHAIN_LINK(sc, m); 3693 DPRINTF(WM_DEBUG_RX, 3694 ("%s: RX: not yet EOP, rxlen -> %d\n", 3695 device_xname(sc->sc_dev), sc->sc_rxlen)); 3696 continue; 3697 } 3698 3699 /* 3700 * Okay, we have the entire packet now. The chip is 3701 * configured to include the FCS except I350 and I21[01] 3702 * (not all chips can be configured to strip it), 3703 * so we need to trim it. 3704 * May need to adjust length of previous mbuf in the 3705 * chain if the current mbuf is too short. 3706 * For an eratta, the RCTL_SECRC bit in RCTL register 3707 * is always set in I350, so we don't trim it. 3708 */ 3709 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I210) 3710 && (sc->sc_type != WM_T_I211)) { 3711 if (m->m_len < ETHER_CRC_LEN) { 3712 sc->sc_rxtail->m_len 3713 -= (ETHER_CRC_LEN - m->m_len); 3714 m->m_len = 0; 3715 } else 3716 m->m_len -= ETHER_CRC_LEN; 3717 len = sc->sc_rxlen - ETHER_CRC_LEN; 3718 } else 3719 len = sc->sc_rxlen; 3720 3721 WM_RXCHAIN_LINK(sc, m); 3722 3723 *sc->sc_rxtailp = NULL; 3724 m = sc->sc_rxhead; 3725 3726 WM_RXCHAIN_RESET(sc); 3727 3728 DPRINTF(WM_DEBUG_RX, 3729 ("%s: RX: have entire packet, len -> %d\n", 3730 device_xname(sc->sc_dev), len)); 3731 3732 /* 3733 * If an error occurred, update stats and drop the packet. 3734 */ 3735 if (errors & 3736 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) { 3737 if (errors & WRX_ER_SE) 3738 log(LOG_WARNING, "%s: symbol error\n", 3739 device_xname(sc->sc_dev)); 3740 else if (errors & WRX_ER_SEQ) 3741 log(LOG_WARNING, "%s: receive sequence error\n", 3742 device_xname(sc->sc_dev)); 3743 else if (errors & WRX_ER_CE) 3744 log(LOG_WARNING, "%s: CRC error\n", 3745 device_xname(sc->sc_dev)); 3746 m_freem(m); 3747 continue; 3748 } 3749 3750 /* 3751 * No errors. Receive the packet. 3752 */ 3753 m->m_pkthdr.rcvif = ifp; 3754 m->m_pkthdr.len = len; 3755 3756 /* 3757 * If VLANs are enabled, VLAN packets have been unwrapped 3758 * for us. Associate the tag with the packet. 3759 */ 3760 if ((status & WRX_ST_VP) != 0) { 3761 VLAN_INPUT_TAG(ifp, m, 3762 le16toh(vlantag), 3763 continue); 3764 } 3765 3766 /* 3767 * Set up checksum info for this packet. 3768 */ 3769 if ((status & WRX_ST_IXSM) == 0) { 3770 if (status & WRX_ST_IPCS) { 3771 WM_EVCNT_INCR(&sc->sc_ev_rxipsum); 3772 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 3773 if (errors & WRX_ER_IPE) 3774 m->m_pkthdr.csum_flags |= 3775 M_CSUM_IPv4_BAD; 3776 } 3777 if (status & WRX_ST_TCPCS) { 3778 /* 3779 * Note: we don't know if this was TCP or UDP, 3780 * so we just set both bits, and expect the 3781 * upper layers to deal. 3782 */ 3783 WM_EVCNT_INCR(&sc->sc_ev_rxtusum); 3784 m->m_pkthdr.csum_flags |= 3785 M_CSUM_TCPv4 | M_CSUM_UDPv4 | 3786 M_CSUM_TCPv6 | M_CSUM_UDPv6; 3787 if (errors & WRX_ER_TCPE) 3788 m->m_pkthdr.csum_flags |= 3789 M_CSUM_TCP_UDP_BAD; 3790 } 3791 } 3792 3793 ifp->if_ipackets++; 3794 3795 /* Pass this up to any BPF listeners. */ 3796 bpf_mtap(ifp, m); 3797 3798 /* Pass it on. */ 3799 (*ifp->if_input)(ifp, m); 3800 } 3801 3802 /* Update the receive pointer. */ 3803 sc->sc_rxptr = i; 3804 3805 DPRINTF(WM_DEBUG_RX, 3806 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i)); 3807 } 3808 3809 /* 3810 * wm_linkintr_gmii: 3811 * 3812 * Helper; handle link interrupts for GMII. 3813 */ 3814 static void 3815 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr) 3816 { 3817 3818 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), 3819 __func__)); 3820 3821 if (icr & ICR_LSC) { 3822 DPRINTF(WM_DEBUG_LINK, 3823 ("%s: LINK: LSC -> mii_pollstat\n", 3824 device_xname(sc->sc_dev))); 3825 mii_pollstat(&sc->sc_mii); 3826 if (sc->sc_type == WM_T_82543) { 3827 int miistatus, active; 3828 3829 /* 3830 * With 82543, we need to force speed and 3831 * duplex on the MAC equal to what the PHY 3832 * speed and duplex configuration is. 3833 */ 3834 miistatus = sc->sc_mii.mii_media_status; 3835 3836 if (miistatus & IFM_ACTIVE) { 3837 active = sc->sc_mii.mii_media_active; 3838 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); 3839 switch (IFM_SUBTYPE(active)) { 3840 case IFM_10_T: 3841 sc->sc_ctrl |= CTRL_SPEED_10; 3842 break; 3843 case IFM_100_TX: 3844 sc->sc_ctrl |= CTRL_SPEED_100; 3845 break; 3846 case IFM_1000_T: 3847 sc->sc_ctrl |= CTRL_SPEED_1000; 3848 break; 3849 default: 3850 /* 3851 * fiber? 3852 * Shoud not enter here. 3853 */ 3854 printf("unknown media (%x)\n", 3855 active); 3856 break; 3857 } 3858 if (active & IFM_FDX) 3859 sc->sc_ctrl |= CTRL_FD; 3860 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3861 } 3862 } else if ((sc->sc_type == WM_T_ICH8) 3863 && (sc->sc_phytype == WMPHY_IGP_3)) { 3864 wm_kmrn_lock_loss_workaround_ich8lan(sc); 3865 } else if (sc->sc_type == WM_T_PCH) { 3866 wm_k1_gig_workaround_hv(sc, 3867 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0)); 3868 } 3869 3870 if ((sc->sc_phytype == WMPHY_82578) 3871 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active) 3872 == IFM_1000_T)) { 3873 3874 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) { 3875 delay(200*1000); /* XXX too big */ 3876 3877 /* Link stall fix for link up */ 3878 wm_gmii_hv_writereg(sc->sc_dev, 1, 3879 HV_MUX_DATA_CTRL, 3880 HV_MUX_DATA_CTRL_GEN_TO_MAC 3881 | HV_MUX_DATA_CTRL_FORCE_SPEED); 3882 wm_gmii_hv_writereg(sc->sc_dev, 1, 3883 HV_MUX_DATA_CTRL, 3884 HV_MUX_DATA_CTRL_GEN_TO_MAC); 3885 } 3886 } 3887 } else if (icr & ICR_RXSEQ) { 3888 DPRINTF(WM_DEBUG_LINK, 3889 ("%s: LINK Receive sequence error\n", 3890 device_xname(sc->sc_dev))); 3891 } 3892 } 3893 3894 /* 3895 * wm_linkintr_tbi: 3896 * 3897 * Helper; handle link interrupts for TBI mode. 3898 */ 3899 static void 3900 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr) 3901 { 3902 uint32_t status; 3903 3904 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), 3905 __func__)); 3906 3907 status = CSR_READ(sc, WMREG_STATUS); 3908 if (icr & ICR_LSC) { 3909 if (status & STATUS_LU) { 3910 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n", 3911 device_xname(sc->sc_dev), 3912 (status & STATUS_FD) ? "FDX" : "HDX")); 3913 /* 3914 * NOTE: CTRL will update TFCE and RFCE automatically, 3915 * so we should update sc->sc_ctrl 3916 */ 3917 3918 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); 3919 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 3920 sc->sc_fcrtl &= ~FCRTL_XONE; 3921 if (status & STATUS_FD) 3922 sc->sc_tctl |= 3923 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 3924 else 3925 sc->sc_tctl |= 3926 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 3927 if (sc->sc_ctrl & CTRL_TFCE) 3928 sc->sc_fcrtl |= FCRTL_XONE; 3929 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 3930 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 3931 WMREG_OLD_FCRTL : WMREG_FCRTL, 3932 sc->sc_fcrtl); 3933 sc->sc_tbi_linkup = 1; 3934 } else { 3935 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", 3936 device_xname(sc->sc_dev))); 3937 sc->sc_tbi_linkup = 0; 3938 } 3939 wm_tbi_set_linkled(sc); 3940 } else if (icr & ICR_RXCFG) { 3941 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n", 3942 device_xname(sc->sc_dev))); 3943 sc->sc_tbi_nrxcfg++; 3944 wm_check_for_link(sc); 3945 } else if (icr & ICR_RXSEQ) { 3946 DPRINTF(WM_DEBUG_LINK, 3947 ("%s: LINK: Receive sequence error\n", 3948 device_xname(sc->sc_dev))); 3949 } 3950 } 3951 3952 /* 3953 * wm_linkintr: 3954 * 3955 * Helper; handle link interrupts. 3956 */ 3957 static void 3958 wm_linkintr(struct wm_softc *sc, uint32_t icr) 3959 { 3960 3961 if (sc->sc_flags & WM_F_HAS_MII) 3962 wm_linkintr_gmii(sc, icr); 3963 else 3964 wm_linkintr_tbi(sc, icr); 3965 } 3966 3967 /* 3968 * wm_tick: 3969 * 3970 * One second timer, used to check link status, sweep up 3971 * completed transmit jobs, etc. 3972 */ 3973 static void 3974 wm_tick(void *arg) 3975 { 3976 struct wm_softc *sc = arg; 3977 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3978 int s; 3979 3980 s = splnet(); 3981 3982 if (sc->sc_type >= WM_T_82542_2_1) { 3983 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC)); 3984 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC)); 3985 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC)); 3986 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC)); 3987 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC)); 3988 } 3989 3990 ifp->if_collisions += CSR_READ(sc, WMREG_COLC); 3991 ifp->if_ierrors += 0ULL + /* ensure quad_t */ 3992 + CSR_READ(sc, WMREG_CRCERRS) 3993 + CSR_READ(sc, WMREG_ALGNERRC) 3994 + CSR_READ(sc, WMREG_SYMERRC) 3995 + CSR_READ(sc, WMREG_RXERRC) 3996 + CSR_READ(sc, WMREG_SEC) 3997 + CSR_READ(sc, WMREG_CEXTERR) 3998 + CSR_READ(sc, WMREG_RLEC); 3999 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC); 4000 4001 if (sc->sc_flags & WM_F_HAS_MII) 4002 mii_tick(&sc->sc_mii); 4003 else 4004 wm_tbi_check_link(sc); 4005 4006 splx(s); 4007 4008 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); 4009 } 4010 4011 /* 4012 * wm_reset: 4013 * 4014 * Reset the i82542 chip. 4015 */ 4016 static void 4017 wm_reset(struct wm_softc *sc) 4018 { 4019 int phy_reset = 0; 4020 uint32_t reg, mask; 4021 4022 /* 4023 * Allocate on-chip memory according to the MTU size. 4024 * The Packet Buffer Allocation register must be written 4025 * before the chip is reset. 4026 */ 4027 switch (sc->sc_type) { 4028 case WM_T_82547: 4029 case WM_T_82547_2: 4030 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? 4031 PBA_22K : PBA_30K; 4032 sc->sc_txfifo_head = 0; 4033 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT; 4034 sc->sc_txfifo_size = 4035 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT; 4036 sc->sc_txfifo_stall = 0; 4037 break; 4038 case WM_T_82571: 4039 case WM_T_82572: 4040 case WM_T_82575: /* XXX need special handing for jumbo frames */ 4041 case WM_T_I350: 4042 case WM_T_80003: 4043 sc->sc_pba = PBA_32K; 4044 break; 4045 case WM_T_82580: 4046 case WM_T_82580ER: 4047 sc->sc_pba = PBA_35K; 4048 break; 4049 case WM_T_I210: 4050 case WM_T_I211: 4051 sc->sc_pba = PBA_34K; 4052 break; 4053 case WM_T_82576: 4054 sc->sc_pba = PBA_64K; 4055 break; 4056 case WM_T_82573: 4057 sc->sc_pba = PBA_12K; 4058 break; 4059 case WM_T_82574: 4060 case WM_T_82583: 4061 sc->sc_pba = PBA_20K; 4062 break; 4063 case WM_T_ICH8: 4064 sc->sc_pba = PBA_8K; 4065 CSR_WRITE(sc, WMREG_PBS, PBA_16K); 4066 break; 4067 case WM_T_ICH9: 4068 case WM_T_ICH10: 4069 sc->sc_pba = PBA_10K; 4070 break; 4071 case WM_T_PCH: 4072 case WM_T_PCH2: 4073 case WM_T_PCH_LPT: 4074 sc->sc_pba = PBA_26K; 4075 break; 4076 default: 4077 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? 4078 PBA_40K : PBA_48K; 4079 break; 4080 } 4081 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba); 4082 4083 /* Prevent the PCI-E bus from sticking */ 4084 if (sc->sc_flags & WM_F_PCIE) { 4085 int timeout = 800; 4086 4087 sc->sc_ctrl |= CTRL_GIO_M_DIS; 4088 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4089 4090 while (timeout--) { 4091 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) 4092 == 0) 4093 break; 4094 delay(100); 4095 } 4096 } 4097 4098 /* Set the completion timeout for interface */ 4099 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 4100 || (sc->sc_type == WM_T_I350)) 4101 wm_set_pcie_completion_timeout(sc); 4102 4103 /* Clear interrupt */ 4104 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 4105 4106 /* Stop the transmit and receive processes. */ 4107 CSR_WRITE(sc, WMREG_RCTL, 0); 4108 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP); 4109 sc->sc_rctl &= ~RCTL_EN; 4110 4111 /* XXX set_tbi_sbp_82543() */ 4112 4113 delay(10*1000); 4114 4115 /* Must acquire the MDIO ownership before MAC reset */ 4116 switch (sc->sc_type) { 4117 case WM_T_82573: 4118 case WM_T_82574: 4119 case WM_T_82583: 4120 wm_get_hw_semaphore_82573(sc); 4121 break; 4122 default: 4123 break; 4124 } 4125 4126 /* 4127 * 82541 Errata 29? & 82547 Errata 28? 4128 * See also the description about PHY_RST bit in CTRL register 4129 * in 8254x_GBe_SDM.pdf. 4130 */ 4131 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) { 4132 CSR_WRITE(sc, WMREG_CTRL, 4133 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET); 4134 delay(5000); 4135 } 4136 4137 switch (sc->sc_type) { 4138 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */ 4139 case WM_T_82541: 4140 case WM_T_82541_2: 4141 case WM_T_82547: 4142 case WM_T_82547_2: 4143 /* 4144 * On some chipsets, a reset through a memory-mapped write 4145 * cycle can cause the chip to reset before completing the 4146 * write cycle. This causes major headache that can be 4147 * avoided by issuing the reset via indirect register writes 4148 * through I/O space. 4149 * 4150 * So, if we successfully mapped the I/O BAR at attach time, 4151 * use that. Otherwise, try our luck with a memory-mapped 4152 * reset. 4153 */ 4154 if (sc->sc_flags & WM_F_IOH_VALID) 4155 wm_io_write(sc, WMREG_CTRL, CTRL_RST); 4156 else 4157 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST); 4158 break; 4159 case WM_T_82545_3: 4160 case WM_T_82546_3: 4161 /* Use the shadow control register on these chips. */ 4162 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST); 4163 break; 4164 case WM_T_80003: 4165 mask = swfwphysem[sc->sc_funcid]; 4166 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST; 4167 wm_get_swfw_semaphore(sc, mask); 4168 CSR_WRITE(sc, WMREG_CTRL, reg); 4169 wm_put_swfw_semaphore(sc, mask); 4170 break; 4171 case WM_T_ICH8: 4172 case WM_T_ICH9: 4173 case WM_T_ICH10: 4174 case WM_T_PCH: 4175 case WM_T_PCH2: 4176 case WM_T_PCH_LPT: 4177 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST; 4178 if (wm_check_reset_block(sc) == 0) { 4179 /* 4180 * Gate automatic PHY configuration by hardware on 4181 * non-managed 82579 4182 */ 4183 if ((sc->sc_type == WM_T_PCH2) 4184 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) 4185 != 0)) 4186 wm_gate_hw_phy_config_ich8lan(sc, 1); 4187 4188 4189 reg |= CTRL_PHY_RESET; 4190 phy_reset = 1; 4191 } 4192 wm_get_swfwhw_semaphore(sc); 4193 CSR_WRITE(sc, WMREG_CTRL, reg); 4194 delay(20*1000); 4195 wm_put_swfwhw_semaphore(sc); 4196 break; 4197 case WM_T_82542_2_0: 4198 case WM_T_82542_2_1: 4199 case WM_T_82543: 4200 case WM_T_82540: 4201 case WM_T_82545: 4202 case WM_T_82546: 4203 case WM_T_82571: 4204 case WM_T_82572: 4205 case WM_T_82573: 4206 case WM_T_82574: 4207 case WM_T_82575: 4208 case WM_T_82576: 4209 case WM_T_82580: 4210 case WM_T_82580ER: 4211 case WM_T_82583: 4212 case WM_T_I350: 4213 case WM_T_I210: 4214 case WM_T_I211: 4215 default: 4216 /* Everything else can safely use the documented method. */ 4217 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST); 4218 break; 4219 } 4220 4221 /* Must release the MDIO ownership after MAC reset */ 4222 switch (sc->sc_type) { 4223 case WM_T_82574: 4224 case WM_T_82583: 4225 wm_put_hw_semaphore_82573(sc); 4226 break; 4227 default: 4228 break; 4229 } 4230 4231 if (phy_reset != 0) 4232 wm_get_cfg_done(sc); 4233 4234 /* reload EEPROM */ 4235 switch (sc->sc_type) { 4236 case WM_T_82542_2_0: 4237 case WM_T_82542_2_1: 4238 case WM_T_82543: 4239 case WM_T_82544: 4240 delay(10); 4241 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; 4242 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 4243 delay(2000); 4244 break; 4245 case WM_T_82540: 4246 case WM_T_82545: 4247 case WM_T_82545_3: 4248 case WM_T_82546: 4249 case WM_T_82546_3: 4250 delay(5*1000); 4251 /* XXX Disable HW ARPs on ASF enabled adapters */ 4252 break; 4253 case WM_T_82541: 4254 case WM_T_82541_2: 4255 case WM_T_82547: 4256 case WM_T_82547_2: 4257 delay(20000); 4258 /* XXX Disable HW ARPs on ASF enabled adapters */ 4259 break; 4260 case WM_T_82571: 4261 case WM_T_82572: 4262 case WM_T_82573: 4263 case WM_T_82574: 4264 case WM_T_82583: 4265 if (sc->sc_flags & WM_F_EEPROM_FLASH) { 4266 delay(10); 4267 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; 4268 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 4269 } 4270 /* check EECD_EE_AUTORD */ 4271 wm_get_auto_rd_done(sc); 4272 /* 4273 * Phy configuration from NVM just starts after EECD_AUTO_RD 4274 * is set. 4275 */ 4276 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574) 4277 || (sc->sc_type == WM_T_82583)) 4278 delay(25*1000); 4279 break; 4280 case WM_T_82575: 4281 case WM_T_82576: 4282 case WM_T_82580: 4283 case WM_T_82580ER: 4284 case WM_T_I350: 4285 case WM_T_I210: 4286 case WM_T_I211: 4287 case WM_T_80003: 4288 /* check EECD_EE_AUTORD */ 4289 wm_get_auto_rd_done(sc); 4290 break; 4291 case WM_T_ICH8: 4292 case WM_T_ICH9: 4293 case WM_T_ICH10: 4294 case WM_T_PCH: 4295 case WM_T_PCH2: 4296 case WM_T_PCH_LPT: 4297 break; 4298 default: 4299 panic("%s: unknown type\n", __func__); 4300 } 4301 4302 /* Check whether EEPROM is present or not */ 4303 switch (sc->sc_type) { 4304 case WM_T_82575: 4305 case WM_T_82576: 4306 #if 0 /* XXX */ 4307 case WM_T_82580: 4308 case WM_T_82580ER: 4309 #endif 4310 case WM_T_I350: 4311 case WM_T_ICH8: 4312 case WM_T_ICH9: 4313 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) { 4314 /* Not found */ 4315 sc->sc_flags |= WM_F_EEPROM_INVALID; 4316 if ((sc->sc_type == WM_T_82575) 4317 || (sc->sc_type == WM_T_82576) 4318 || (sc->sc_type == WM_T_82580) 4319 || (sc->sc_type == WM_T_82580ER) 4320 || (sc->sc_type == WM_T_I350)) 4321 wm_reset_init_script_82575(sc); 4322 } 4323 break; 4324 default: 4325 break; 4326 } 4327 4328 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER) 4329 || (sc->sc_type == WM_T_I350)) { 4330 /* clear global device reset status bit */ 4331 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET); 4332 } 4333 4334 /* Clear any pending interrupt events. */ 4335 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 4336 reg = CSR_READ(sc, WMREG_ICR); 4337 4338 /* reload sc_ctrl */ 4339 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); 4340 4341 if (sc->sc_type == WM_T_I350) 4342 wm_set_eee_i350(sc); 4343 4344 /* dummy read from WUC */ 4345 if (sc->sc_type == WM_T_PCH) 4346 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC); 4347 /* 4348 * For PCH, this write will make sure that any noise will be detected 4349 * as a CRC error and be dropped rather than show up as a bad packet 4350 * to the DMA engine 4351 */ 4352 if (sc->sc_type == WM_T_PCH) 4353 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565); 4354 4355 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 4356 CSR_WRITE(sc, WMREG_WUC, 0); 4357 4358 /* XXX need special handling for 82580 */ 4359 } 4360 4361 static void 4362 wm_set_vlan(struct wm_softc *sc) 4363 { 4364 /* Deal with VLAN enables. */ 4365 if (VLAN_ATTACHED(&sc->sc_ethercom)) 4366 sc->sc_ctrl |= CTRL_VME; 4367 else 4368 sc->sc_ctrl &= ~CTRL_VME; 4369 4370 /* Write the control registers. */ 4371 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4372 } 4373 4374 /* 4375 * wm_init: [ifnet interface function] 4376 * 4377 * Initialize the interface. Must be called at splnet(). 4378 */ 4379 static int 4380 wm_init(struct ifnet *ifp) 4381 { 4382 struct wm_softc *sc = ifp->if_softc; 4383 struct wm_rxsoft *rxs; 4384 int i, j, trynum, error = 0; 4385 uint32_t reg; 4386 4387 /* 4388 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set. 4389 * There is a small but measurable benefit to avoiding the adjusment 4390 * of the descriptor so that the headers are aligned, for normal mtu, 4391 * on such platforms. One possibility is that the DMA itself is 4392 * slightly more efficient if the front of the entire packet (instead 4393 * of the front of the headers) is aligned. 4394 * 4395 * Note we must always set align_tweak to 0 if we are using 4396 * jumbo frames. 4397 */ 4398 #ifdef __NO_STRICT_ALIGNMENT 4399 sc->sc_align_tweak = 0; 4400 #else 4401 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2)) 4402 sc->sc_align_tweak = 0; 4403 else 4404 sc->sc_align_tweak = 2; 4405 #endif /* __NO_STRICT_ALIGNMENT */ 4406 4407 /* Cancel any pending I/O. */ 4408 wm_stop(ifp, 0); 4409 4410 /* update statistics before reset */ 4411 ifp->if_collisions += CSR_READ(sc, WMREG_COLC); 4412 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC); 4413 4414 /* Reset the chip to a known state. */ 4415 wm_reset(sc); 4416 4417 switch (sc->sc_type) { 4418 case WM_T_82571: 4419 case WM_T_82572: 4420 case WM_T_82573: 4421 case WM_T_82574: 4422 case WM_T_82583: 4423 case WM_T_80003: 4424 case WM_T_ICH8: 4425 case WM_T_ICH9: 4426 case WM_T_ICH10: 4427 case WM_T_PCH: 4428 case WM_T_PCH2: 4429 case WM_T_PCH_LPT: 4430 if (wm_check_mng_mode(sc) != 0) 4431 wm_get_hw_control(sc); 4432 break; 4433 default: 4434 break; 4435 } 4436 4437 /* Reset the PHY. */ 4438 if (sc->sc_flags & WM_F_HAS_MII) 4439 wm_gmii_reset(sc); 4440 4441 reg = CSR_READ(sc, WMREG_CTRL_EXT); 4442 /* Enable PHY low-power state when MAC is at D3 w/o WoL */ 4443 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2) 4444 || (sc->sc_type == WM_T_PCH_LPT)) 4445 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN); 4446 4447 /* Initialize the transmit descriptor ring. */ 4448 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc)); 4449 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc), 4450 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 4451 sc->sc_txfree = WM_NTXDESC(sc); 4452 sc->sc_txnext = 0; 4453 4454 if (sc->sc_type < WM_T_82543) { 4455 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0)); 4456 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0)); 4457 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc)); 4458 CSR_WRITE(sc, WMREG_OLD_TDH, 0); 4459 CSR_WRITE(sc, WMREG_OLD_TDT, 0); 4460 CSR_WRITE(sc, WMREG_OLD_TIDV, 128); 4461 } else { 4462 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0)); 4463 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0)); 4464 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc)); 4465 CSR_WRITE(sc, WMREG_TDH, 0); 4466 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */ 4467 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */ 4468 4469 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 4470 /* 4471 * Don't write TDT before TCTL.EN is set. 4472 * See the document. 4473 */ 4474 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE 4475 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0) 4476 | TXDCTL_WTHRESH(0)); 4477 else { 4478 CSR_WRITE(sc, WMREG_TDT, 0); 4479 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) | 4480 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0)); 4481 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) | 4482 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1)); 4483 } 4484 } 4485 CSR_WRITE(sc, WMREG_TQSA_LO, 0); 4486 CSR_WRITE(sc, WMREG_TQSA_HI, 0); 4487 4488 /* Initialize the transmit job descriptors. */ 4489 for (i = 0; i < WM_TXQUEUELEN(sc); i++) 4490 sc->sc_txsoft[i].txs_mbuf = NULL; 4491 sc->sc_txsfree = WM_TXQUEUELEN(sc); 4492 sc->sc_txsnext = 0; 4493 sc->sc_txsdirty = 0; 4494 4495 /* 4496 * Initialize the receive descriptor and receive job 4497 * descriptor rings. 4498 */ 4499 if (sc->sc_type < WM_T_82543) { 4500 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0)); 4501 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0)); 4502 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs)); 4503 CSR_WRITE(sc, WMREG_OLD_RDH0, 0); 4504 CSR_WRITE(sc, WMREG_OLD_RDT0, 0); 4505 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD); 4506 4507 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0); 4508 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0); 4509 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0); 4510 CSR_WRITE(sc, WMREG_OLD_RDH1, 0); 4511 CSR_WRITE(sc, WMREG_OLD_RDT1, 0); 4512 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0); 4513 } else { 4514 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0)); 4515 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0)); 4516 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs)); 4517 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 4518 CSR_WRITE(sc, WMREG_EITR(0), 450); 4519 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1)) 4520 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES); 4521 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY 4522 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT)); 4523 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE 4524 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8) 4525 | RXDCTL_WTHRESH(1)); 4526 } else { 4527 CSR_WRITE(sc, WMREG_RDH, 0); 4528 CSR_WRITE(sc, WMREG_RDT, 0); 4529 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */ 4530 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */ 4531 } 4532 } 4533 for (i = 0; i < WM_NRXDESC; i++) { 4534 rxs = &sc->sc_rxsoft[i]; 4535 if (rxs->rxs_mbuf == NULL) { 4536 if ((error = wm_add_rxbuf(sc, i)) != 0) { 4537 log(LOG_ERR, "%s: unable to allocate or map " 4538 "rx buffer %d, error = %d\n", 4539 device_xname(sc->sc_dev), i, error); 4540 /* 4541 * XXX Should attempt to run with fewer receive 4542 * XXX buffers instead of just failing. 4543 */ 4544 wm_rxdrain(sc); 4545 goto out; 4546 } 4547 } else { 4548 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0) 4549 WM_INIT_RXDESC(sc, i); 4550 /* 4551 * For 82575 and newer device, the RX descriptors 4552 * must be initialized after the setting of RCTL.EN in 4553 * wm_set_filter() 4554 */ 4555 } 4556 } 4557 sc->sc_rxptr = 0; 4558 sc->sc_rxdiscard = 0; 4559 WM_RXCHAIN_RESET(sc); 4560 4561 /* 4562 * Clear out the VLAN table -- we don't use it (yet). 4563 */ 4564 CSR_WRITE(sc, WMREG_VET, 0); 4565 if (sc->sc_type == WM_T_I350) 4566 trynum = 10; /* Due to hw errata */ 4567 else 4568 trynum = 1; 4569 for (i = 0; i < WM_VLAN_TABSIZE; i++) 4570 for (j = 0; j < trynum; j++) 4571 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0); 4572 4573 /* 4574 * Set up flow-control parameters. 4575 * 4576 * XXX Values could probably stand some tuning. 4577 */ 4578 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9) 4579 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH) 4580 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) { 4581 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST); 4582 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST); 4583 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL); 4584 } 4585 4586 sc->sc_fcrtl = FCRTL_DFLT; 4587 if (sc->sc_type < WM_T_82543) { 4588 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT); 4589 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl); 4590 } else { 4591 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT); 4592 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl); 4593 } 4594 4595 if (sc->sc_type == WM_T_80003) 4596 CSR_WRITE(sc, WMREG_FCTTV, 0xffff); 4597 else 4598 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT); 4599 4600 /* Writes the control register. */ 4601 wm_set_vlan(sc); 4602 4603 if (sc->sc_flags & WM_F_HAS_MII) { 4604 int val; 4605 4606 switch (sc->sc_type) { 4607 case WM_T_80003: 4608 case WM_T_ICH8: 4609 case WM_T_ICH9: 4610 case WM_T_ICH10: 4611 case WM_T_PCH: 4612 case WM_T_PCH2: 4613 case WM_T_PCH_LPT: 4614 /* 4615 * Set the mac to wait the maximum time between each 4616 * iteration and increase the max iterations when 4617 * polling the phy; this fixes erroneous timeouts at 4618 * 10Mbps. 4619 */ 4620 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, 4621 0xFFFF); 4622 val = wm_kmrn_readreg(sc, 4623 KUMCTRLSTA_OFFSET_INB_PARAM); 4624 val |= 0x3F; 4625 wm_kmrn_writereg(sc, 4626 KUMCTRLSTA_OFFSET_INB_PARAM, val); 4627 break; 4628 default: 4629 break; 4630 } 4631 4632 if (sc->sc_type == WM_T_80003) { 4633 val = CSR_READ(sc, WMREG_CTRL_EXT); 4634 val &= ~CTRL_EXT_LINK_MODE_MASK; 4635 CSR_WRITE(sc, WMREG_CTRL_EXT, val); 4636 4637 /* Bypass RX and TX FIFO's */ 4638 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL, 4639 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 4640 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS); 4641 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL, 4642 KUMCTRLSTA_INB_CTRL_DIS_PADDING | 4643 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT); 4644 } 4645 } 4646 #if 0 4647 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 4648 #endif 4649 4650 /* 4651 * Set up checksum offload parameters. 4652 */ 4653 reg = CSR_READ(sc, WMREG_RXCSUM); 4654 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL); 4655 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) 4656 reg |= RXCSUM_IPOFL; 4657 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) 4658 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL; 4659 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx)) 4660 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL; 4661 CSR_WRITE(sc, WMREG_RXCSUM, reg); 4662 4663 /* Reset TBI's RXCFG count */ 4664 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0; 4665 4666 /* 4667 * Set up the interrupt registers. 4668 */ 4669 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 4670 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 | 4671 ICR_RXO | ICR_RXT0; 4672 if ((sc->sc_flags & WM_F_HAS_MII) == 0) 4673 sc->sc_icr |= ICR_RXCFG; 4674 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr); 4675 4676 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 4677 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 4678 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) { 4679 reg = CSR_READ(sc, WMREG_KABGTXD); 4680 reg |= KABGTXD_BGSQLBIAS; 4681 CSR_WRITE(sc, WMREG_KABGTXD, reg); 4682 } 4683 4684 /* Set up the inter-packet gap. */ 4685 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); 4686 4687 if (sc->sc_type >= WM_T_82543) { 4688 /* 4689 * Set up the interrupt throttling register (units of 256ns) 4690 * Note that a footnote in Intel's documentation says this 4691 * ticker runs at 1/4 the rate when the chip is in 100Mbit 4692 * or 10Mbit mode. Empirically, it appears to be the case 4693 * that that is also true for the 1024ns units of the other 4694 * interrupt-related timer registers -- so, really, we ought 4695 * to divide this value by 4 when the link speed is low. 4696 * 4697 * XXX implement this division at link speed change! 4698 */ 4699 4700 /* 4701 * For N interrupts/sec, set this value to: 4702 * 1000000000 / (N * 256). Note that we set the 4703 * absolute and packet timer values to this value 4704 * divided by 4 to get "simple timer" behavior. 4705 */ 4706 4707 sc->sc_itr = 1500; /* 2604 ints/sec */ 4708 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr); 4709 } 4710 4711 /* Set the VLAN ethernetype. */ 4712 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN); 4713 4714 /* 4715 * Set up the transmit control register; we start out with 4716 * a collision distance suitable for FDX, but update it whe 4717 * we resolve the media type. 4718 */ 4719 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC 4720 | TCTL_CT(TX_COLLISION_THRESHOLD) 4721 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 4722 if (sc->sc_type >= WM_T_82571) 4723 sc->sc_tctl |= TCTL_MULR; 4724 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 4725 4726 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 4727 /* 4728 * Write TDT after TCTL.EN is set. 4729 * See the document. 4730 */ 4731 CSR_WRITE(sc, WMREG_TDT, 0); 4732 } 4733 4734 if (sc->sc_type == WM_T_80003) { 4735 reg = CSR_READ(sc, WMREG_TCTL_EXT); 4736 reg &= ~TCTL_EXT_GCEX_MASK; 4737 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX; 4738 CSR_WRITE(sc, WMREG_TCTL_EXT, reg); 4739 } 4740 4741 /* Set the media. */ 4742 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0) 4743 goto out; 4744 4745 /* Configure for OS presence */ 4746 wm_init_manageability(sc); 4747 4748 /* 4749 * Set up the receive control register; we actually program 4750 * the register when we set the receive filter. Use multicast 4751 * address offset type 0. 4752 * 4753 * Only the i82544 has the ability to strip the incoming 4754 * CRC, so we don't enable that feature. 4755 */ 4756 sc->sc_mchash_type = 0; 4757 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF 4758 | RCTL_MO(sc->sc_mchash_type); 4759 4760 /* 4761 * The I350 has a bug where it always strips the CRC whether 4762 * asked to or not. So ask for stripped CRC here and cope in rxeof 4763 */ 4764 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)) 4765 sc->sc_rctl |= RCTL_SECRC; 4766 4767 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0) 4768 && (ifp->if_mtu > ETHERMTU)) { 4769 sc->sc_rctl |= RCTL_LPE; 4770 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 4771 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO); 4772 } 4773 4774 if (MCLBYTES == 2048) { 4775 sc->sc_rctl |= RCTL_2k; 4776 } else { 4777 if (sc->sc_type >= WM_T_82543) { 4778 switch (MCLBYTES) { 4779 case 4096: 4780 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k; 4781 break; 4782 case 8192: 4783 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k; 4784 break; 4785 case 16384: 4786 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k; 4787 break; 4788 default: 4789 panic("wm_init: MCLBYTES %d unsupported", 4790 MCLBYTES); 4791 break; 4792 } 4793 } else panic("wm_init: i82542 requires MCLBYTES = 2048"); 4794 } 4795 4796 /* Set the receive filter. */ 4797 wm_set_filter(sc); 4798 4799 /* Enable ECC */ 4800 switch (sc->sc_type) { 4801 case WM_T_82571: 4802 reg = CSR_READ(sc, WMREG_PBA_ECC); 4803 reg |= PBA_ECC_CORR_EN; 4804 CSR_WRITE(sc, WMREG_PBA_ECC, reg); 4805 break; 4806 case WM_T_PCH_LPT: 4807 reg = CSR_READ(sc, WMREG_PBECCSTS); 4808 reg |= PBECCSTS_UNCORR_ECC_ENABLE; 4809 CSR_WRITE(sc, WMREG_PBECCSTS, reg); 4810 4811 reg = CSR_READ(sc, WMREG_CTRL); 4812 reg |= CTRL_MEHE; 4813 CSR_WRITE(sc, WMREG_CTRL, reg); 4814 break; 4815 default: 4816 break; 4817 } 4818 4819 /* On 575 and later set RDT only if RX enabled */ 4820 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 4821 for (i = 0; i < WM_NRXDESC; i++) 4822 WM_INIT_RXDESC(sc, i); 4823 4824 /* Start the one second link check clock. */ 4825 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); 4826 4827 /* ...all done! */ 4828 ifp->if_flags |= IFF_RUNNING; 4829 ifp->if_flags &= ~IFF_OACTIVE; 4830 4831 out: 4832 sc->sc_if_flags = ifp->if_flags; 4833 if (error) 4834 log(LOG_ERR, "%s: interface not running\n", 4835 device_xname(sc->sc_dev)); 4836 return error; 4837 } 4838 4839 /* 4840 * wm_rxdrain: 4841 * 4842 * Drain the receive queue. 4843 */ 4844 static void 4845 wm_rxdrain(struct wm_softc *sc) 4846 { 4847 struct wm_rxsoft *rxs; 4848 int i; 4849 4850 for (i = 0; i < WM_NRXDESC; i++) { 4851 rxs = &sc->sc_rxsoft[i]; 4852 if (rxs->rxs_mbuf != NULL) { 4853 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 4854 m_freem(rxs->rxs_mbuf); 4855 rxs->rxs_mbuf = NULL; 4856 } 4857 } 4858 } 4859 4860 /* 4861 * wm_stop: [ifnet interface function] 4862 * 4863 * Stop transmission on the interface. 4864 */ 4865 static void 4866 wm_stop(struct ifnet *ifp, int disable) 4867 { 4868 struct wm_softc *sc = ifp->if_softc; 4869 struct wm_txsoft *txs; 4870 int i; 4871 4872 /* Stop the one second clock. */ 4873 callout_stop(&sc->sc_tick_ch); 4874 4875 /* Stop the 82547 Tx FIFO stall check timer. */ 4876 if (sc->sc_type == WM_T_82547) 4877 callout_stop(&sc->sc_txfifo_ch); 4878 4879 if (sc->sc_flags & WM_F_HAS_MII) { 4880 /* Down the MII. */ 4881 mii_down(&sc->sc_mii); 4882 } else { 4883 #if 0 4884 /* Should we clear PHY's status properly? */ 4885 wm_reset(sc); 4886 #endif 4887 } 4888 4889 /* Stop the transmit and receive processes. */ 4890 CSR_WRITE(sc, WMREG_TCTL, 0); 4891 CSR_WRITE(sc, WMREG_RCTL, 0); 4892 sc->sc_rctl &= ~RCTL_EN; 4893 4894 /* 4895 * Clear the interrupt mask to ensure the device cannot assert its 4896 * interrupt line. 4897 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service 4898 * any currently pending or shared interrupt. 4899 */ 4900 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 4901 sc->sc_icr = 0; 4902 4903 /* Release any queued transmit buffers. */ 4904 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 4905 txs = &sc->sc_txsoft[i]; 4906 if (txs->txs_mbuf != NULL) { 4907 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 4908 m_freem(txs->txs_mbuf); 4909 txs->txs_mbuf = NULL; 4910 } 4911 } 4912 4913 /* Mark the interface as down and cancel the watchdog timer. */ 4914 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 4915 ifp->if_timer = 0; 4916 4917 if (disable) 4918 wm_rxdrain(sc); 4919 4920 #if 0 /* notyet */ 4921 if (sc->sc_type >= WM_T_82544) 4922 CSR_WRITE(sc, WMREG_WUC, 0); 4923 #endif 4924 } 4925 4926 void 4927 wm_get_auto_rd_done(struct wm_softc *sc) 4928 { 4929 int i; 4930 4931 /* wait for eeprom to reload */ 4932 switch (sc->sc_type) { 4933 case WM_T_82571: 4934 case WM_T_82572: 4935 case WM_T_82573: 4936 case WM_T_82574: 4937 case WM_T_82583: 4938 case WM_T_82575: 4939 case WM_T_82576: 4940 case WM_T_82580: 4941 case WM_T_82580ER: 4942 case WM_T_I350: 4943 case WM_T_I210: 4944 case WM_T_I211: 4945 case WM_T_80003: 4946 case WM_T_ICH8: 4947 case WM_T_ICH9: 4948 for (i = 0; i < 10; i++) { 4949 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD) 4950 break; 4951 delay(1000); 4952 } 4953 if (i == 10) { 4954 log(LOG_ERR, "%s: auto read from eeprom failed to " 4955 "complete\n", device_xname(sc->sc_dev)); 4956 } 4957 break; 4958 default: 4959 break; 4960 } 4961 } 4962 4963 void 4964 wm_lan_init_done(struct wm_softc *sc) 4965 { 4966 uint32_t reg = 0; 4967 int i; 4968 4969 /* wait for eeprom to reload */ 4970 switch (sc->sc_type) { 4971 case WM_T_ICH10: 4972 case WM_T_PCH: 4973 case WM_T_PCH2: 4974 case WM_T_PCH_LPT: 4975 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) { 4976 reg = CSR_READ(sc, WMREG_STATUS); 4977 if ((reg & STATUS_LAN_INIT_DONE) != 0) 4978 break; 4979 delay(100); 4980 } 4981 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) { 4982 log(LOG_ERR, "%s: %s: lan_init_done failed to " 4983 "complete\n", device_xname(sc->sc_dev), __func__); 4984 } 4985 break; 4986 default: 4987 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev), 4988 __func__); 4989 break; 4990 } 4991 4992 reg &= ~STATUS_LAN_INIT_DONE; 4993 CSR_WRITE(sc, WMREG_STATUS, reg); 4994 } 4995 4996 void 4997 wm_get_cfg_done(struct wm_softc *sc) 4998 { 4999 int mask; 5000 uint32_t reg; 5001 int i; 5002 5003 /* wait for eeprom to reload */ 5004 switch (sc->sc_type) { 5005 case WM_T_82542_2_0: 5006 case WM_T_82542_2_1: 5007 /* null */ 5008 break; 5009 case WM_T_82543: 5010 case WM_T_82544: 5011 case WM_T_82540: 5012 case WM_T_82545: 5013 case WM_T_82545_3: 5014 case WM_T_82546: 5015 case WM_T_82546_3: 5016 case WM_T_82541: 5017 case WM_T_82541_2: 5018 case WM_T_82547: 5019 case WM_T_82547_2: 5020 case WM_T_82573: 5021 case WM_T_82574: 5022 case WM_T_82583: 5023 /* generic */ 5024 delay(10*1000); 5025 break; 5026 case WM_T_80003: 5027 case WM_T_82571: 5028 case WM_T_82572: 5029 case WM_T_82575: 5030 case WM_T_82576: 5031 case WM_T_82580: 5032 case WM_T_82580ER: 5033 case WM_T_I350: 5034 case WM_T_I210: 5035 case WM_T_I211: 5036 if (sc->sc_type == WM_T_82571) { 5037 /* Only 82571 shares port 0 */ 5038 mask = EEMNGCTL_CFGDONE_0; 5039 } else 5040 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid; 5041 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) { 5042 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask) 5043 break; 5044 delay(1000); 5045 } 5046 if (i >= WM_PHY_CFG_TIMEOUT) { 5047 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n", 5048 device_xname(sc->sc_dev), __func__)); 5049 } 5050 break; 5051 case WM_T_ICH8: 5052 case WM_T_ICH9: 5053 case WM_T_ICH10: 5054 case WM_T_PCH: 5055 case WM_T_PCH2: 5056 case WM_T_PCH_LPT: 5057 delay(10*1000); 5058 if (sc->sc_type >= WM_T_ICH10) 5059 wm_lan_init_done(sc); 5060 else 5061 wm_get_auto_rd_done(sc); 5062 5063 reg = CSR_READ(sc, WMREG_STATUS); 5064 if ((reg & STATUS_PHYRA) != 0) 5065 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA); 5066 break; 5067 default: 5068 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev), 5069 __func__); 5070 break; 5071 } 5072 } 5073 5074 /* 5075 * wm_acquire_eeprom: 5076 * 5077 * Perform the EEPROM handshake required on some chips. 5078 */ 5079 static int 5080 wm_acquire_eeprom(struct wm_softc *sc) 5081 { 5082 uint32_t reg; 5083 int x; 5084 int ret = 0; 5085 5086 /* always success */ 5087 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0) 5088 return 0; 5089 5090 if (sc->sc_flags & WM_F_SWFWHW_SYNC) { 5091 ret = wm_get_swfwhw_semaphore(sc); 5092 } else if (sc->sc_flags & WM_F_SWFW_SYNC) { 5093 /* this will also do wm_get_swsm_semaphore() if needed */ 5094 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM); 5095 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) { 5096 ret = wm_get_swsm_semaphore(sc); 5097 } 5098 5099 if (ret) { 5100 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 5101 __func__); 5102 return 1; 5103 } 5104 5105 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) { 5106 reg = CSR_READ(sc, WMREG_EECD); 5107 5108 /* Request EEPROM access. */ 5109 reg |= EECD_EE_REQ; 5110 CSR_WRITE(sc, WMREG_EECD, reg); 5111 5112 /* ..and wait for it to be granted. */ 5113 for (x = 0; x < 1000; x++) { 5114 reg = CSR_READ(sc, WMREG_EECD); 5115 if (reg & EECD_EE_GNT) 5116 break; 5117 delay(5); 5118 } 5119 if ((reg & EECD_EE_GNT) == 0) { 5120 aprint_error_dev(sc->sc_dev, 5121 "could not acquire EEPROM GNT\n"); 5122 reg &= ~EECD_EE_REQ; 5123 CSR_WRITE(sc, WMREG_EECD, reg); 5124 if (sc->sc_flags & WM_F_SWFWHW_SYNC) 5125 wm_put_swfwhw_semaphore(sc); 5126 if (sc->sc_flags & WM_F_SWFW_SYNC) 5127 wm_put_swfw_semaphore(sc, SWFW_EEP_SM); 5128 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 5129 wm_put_swsm_semaphore(sc); 5130 return 1; 5131 } 5132 } 5133 5134 return 0; 5135 } 5136 5137 /* 5138 * wm_release_eeprom: 5139 * 5140 * Release the EEPROM mutex. 5141 */ 5142 static void 5143 wm_release_eeprom(struct wm_softc *sc) 5144 { 5145 uint32_t reg; 5146 5147 /* always success */ 5148 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0) 5149 return; 5150 5151 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) { 5152 reg = CSR_READ(sc, WMREG_EECD); 5153 reg &= ~EECD_EE_REQ; 5154 CSR_WRITE(sc, WMREG_EECD, reg); 5155 } 5156 5157 if (sc->sc_flags & WM_F_SWFWHW_SYNC) 5158 wm_put_swfwhw_semaphore(sc); 5159 if (sc->sc_flags & WM_F_SWFW_SYNC) 5160 wm_put_swfw_semaphore(sc, SWFW_EEP_SM); 5161 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 5162 wm_put_swsm_semaphore(sc); 5163 } 5164 5165 /* 5166 * wm_eeprom_sendbits: 5167 * 5168 * Send a series of bits to the EEPROM. 5169 */ 5170 static void 5171 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits) 5172 { 5173 uint32_t reg; 5174 int x; 5175 5176 reg = CSR_READ(sc, WMREG_EECD); 5177 5178 for (x = nbits; x > 0; x--) { 5179 if (bits & (1U << (x - 1))) 5180 reg |= EECD_DI; 5181 else 5182 reg &= ~EECD_DI; 5183 CSR_WRITE(sc, WMREG_EECD, reg); 5184 delay(2); 5185 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK); 5186 delay(2); 5187 CSR_WRITE(sc, WMREG_EECD, reg); 5188 delay(2); 5189 } 5190 } 5191 5192 /* 5193 * wm_eeprom_recvbits: 5194 * 5195 * Receive a series of bits from the EEPROM. 5196 */ 5197 static void 5198 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits) 5199 { 5200 uint32_t reg, val; 5201 int x; 5202 5203 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI; 5204 5205 val = 0; 5206 for (x = nbits; x > 0; x--) { 5207 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK); 5208 delay(2); 5209 if (CSR_READ(sc, WMREG_EECD) & EECD_DO) 5210 val |= (1U << (x - 1)); 5211 CSR_WRITE(sc, WMREG_EECD, reg); 5212 delay(2); 5213 } 5214 *valp = val; 5215 } 5216 5217 /* 5218 * wm_read_eeprom_uwire: 5219 * 5220 * Read a word from the EEPROM using the MicroWire protocol. 5221 */ 5222 static int 5223 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 5224 { 5225 uint32_t reg, val; 5226 int i; 5227 5228 for (i = 0; i < wordcnt; i++) { 5229 /* Clear SK and DI. */ 5230 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI); 5231 CSR_WRITE(sc, WMREG_EECD, reg); 5232 5233 /* 5234 * XXX: workaround for a bug in qemu-0.12.x and prior 5235 * and Xen. 5236 * 5237 * We use this workaround only for 82540 because qemu's 5238 * e1000 act as 82540. 5239 */ 5240 if (sc->sc_type == WM_T_82540) { 5241 reg |= EECD_SK; 5242 CSR_WRITE(sc, WMREG_EECD, reg); 5243 reg &= ~EECD_SK; 5244 CSR_WRITE(sc, WMREG_EECD, reg); 5245 delay(2); 5246 } 5247 /* XXX: end of workaround */ 5248 5249 /* Set CHIP SELECT. */ 5250 reg |= EECD_CS; 5251 CSR_WRITE(sc, WMREG_EECD, reg); 5252 delay(2); 5253 5254 /* Shift in the READ command. */ 5255 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3); 5256 5257 /* Shift in address. */ 5258 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits); 5259 5260 /* Shift out the data. */ 5261 wm_eeprom_recvbits(sc, &val, 16); 5262 data[i] = val & 0xffff; 5263 5264 /* Clear CHIP SELECT. */ 5265 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS; 5266 CSR_WRITE(sc, WMREG_EECD, reg); 5267 delay(2); 5268 } 5269 5270 return 0; 5271 } 5272 5273 /* 5274 * wm_spi_eeprom_ready: 5275 * 5276 * Wait for a SPI EEPROM to be ready for commands. 5277 */ 5278 static int 5279 wm_spi_eeprom_ready(struct wm_softc *sc) 5280 { 5281 uint32_t val; 5282 int usec; 5283 5284 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) { 5285 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8); 5286 wm_eeprom_recvbits(sc, &val, 8); 5287 if ((val & SPI_SR_RDY) == 0) 5288 break; 5289 } 5290 if (usec >= SPI_MAX_RETRIES) { 5291 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n"); 5292 return 1; 5293 } 5294 return 0; 5295 } 5296 5297 /* 5298 * wm_read_eeprom_spi: 5299 * 5300 * Read a work from the EEPROM using the SPI protocol. 5301 */ 5302 static int 5303 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 5304 { 5305 uint32_t reg, val; 5306 int i; 5307 uint8_t opc; 5308 5309 /* Clear SK and CS. */ 5310 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS); 5311 CSR_WRITE(sc, WMREG_EECD, reg); 5312 delay(2); 5313 5314 if (wm_spi_eeprom_ready(sc)) 5315 return 1; 5316 5317 /* Toggle CS to flush commands. */ 5318 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS); 5319 delay(2); 5320 CSR_WRITE(sc, WMREG_EECD, reg); 5321 delay(2); 5322 5323 opc = SPI_OPC_READ; 5324 if (sc->sc_ee_addrbits == 8 && word >= 128) 5325 opc |= SPI_OPC_A8; 5326 5327 wm_eeprom_sendbits(sc, opc, 8); 5328 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits); 5329 5330 for (i = 0; i < wordcnt; i++) { 5331 wm_eeprom_recvbits(sc, &val, 16); 5332 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8); 5333 } 5334 5335 /* Raise CS and clear SK. */ 5336 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS; 5337 CSR_WRITE(sc, WMREG_EECD, reg); 5338 delay(2); 5339 5340 return 0; 5341 } 5342 5343 #define NVM_CHECKSUM 0xBABA 5344 #define EEPROM_SIZE 0x0040 5345 #define NVM_COMPAT 0x0003 5346 #define NVM_COMPAT_VALID_CHECKSUM 0x0001 5347 #define NVM_FUTURE_INIT_WORD1 0x0019 5348 #define NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM 0x0040 5349 5350 /* 5351 * wm_validate_eeprom_checksum 5352 * 5353 * The checksum is defined as the sum of the first 64 (16 bit) words. 5354 */ 5355 static int 5356 wm_validate_eeprom_checksum(struct wm_softc *sc) 5357 { 5358 uint16_t checksum; 5359 uint16_t eeprom_data; 5360 #ifdef WM_DEBUG 5361 uint16_t csum_wordaddr, valid_checksum; 5362 #endif 5363 int i; 5364 5365 checksum = 0; 5366 5367 /* Don't check for I211 */ 5368 if (sc->sc_type == WM_T_I211) 5369 return 0; 5370 5371 #ifdef WM_DEBUG 5372 if (sc->sc_type == WM_T_PCH_LPT) { 5373 csum_wordaddr = NVM_COMPAT; 5374 valid_checksum = NVM_COMPAT_VALID_CHECKSUM; 5375 } else { 5376 csum_wordaddr = NVM_FUTURE_INIT_WORD1; 5377 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM; 5378 } 5379 5380 /* Dump EEPROM image for debug */ 5381 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 5382 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 5383 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) { 5384 wm_read_eeprom(sc, csum_wordaddr, 1, &eeprom_data); 5385 if ((eeprom_data & valid_checksum) == 0) { 5386 DPRINTF(WM_DEBUG_NVM, 5387 ("%s: NVM need to be updated (%04x != %04x)\n", 5388 device_xname(sc->sc_dev), eeprom_data, 5389 valid_checksum)); 5390 } 5391 } 5392 5393 if ((wm_debug & WM_DEBUG_NVM) != 0) { 5394 printf("%s: NVM dump:\n", device_xname(sc->sc_dev)); 5395 for (i = 0; i < EEPROM_SIZE; i++) { 5396 if (wm_read_eeprom(sc, i, 1, &eeprom_data)) 5397 printf("XX "); 5398 else 5399 printf("%04x ", eeprom_data); 5400 if (i % 8 == 7) 5401 printf("\n"); 5402 } 5403 } 5404 5405 #endif /* WM_DEBUG */ 5406 5407 for (i = 0; i < EEPROM_SIZE; i++) { 5408 if (wm_read_eeprom(sc, i, 1, &eeprom_data)) 5409 return 1; 5410 checksum += eeprom_data; 5411 } 5412 5413 if (checksum != (uint16_t) NVM_CHECKSUM) { 5414 #ifdef WM_DEBUG 5415 printf("%s: NVM checksum mismatch (%04x != %04x)\n", 5416 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM); 5417 #endif 5418 } 5419 5420 return 0; 5421 } 5422 5423 /* 5424 * wm_read_eeprom: 5425 * 5426 * Read data from the serial EEPROM. 5427 */ 5428 static int 5429 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 5430 { 5431 int rv; 5432 5433 if (sc->sc_flags & WM_F_EEPROM_INVALID) 5434 return 1; 5435 5436 if (wm_acquire_eeprom(sc)) 5437 return 1; 5438 5439 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 5440 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 5441 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) 5442 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data); 5443 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR) 5444 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data); 5445 else if (sc->sc_flags & WM_F_EEPROM_SPI) 5446 rv = wm_read_eeprom_spi(sc, word, wordcnt, data); 5447 else 5448 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data); 5449 5450 wm_release_eeprom(sc); 5451 return rv; 5452 } 5453 5454 static int 5455 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt, 5456 uint16_t *data) 5457 { 5458 int i, eerd = 0; 5459 int error = 0; 5460 5461 for (i = 0; i < wordcnt; i++) { 5462 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START; 5463 5464 CSR_WRITE(sc, WMREG_EERD, eerd); 5465 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD); 5466 if (error != 0) 5467 break; 5468 5469 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT); 5470 } 5471 5472 return error; 5473 } 5474 5475 static int 5476 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw) 5477 { 5478 uint32_t attempts = 100000; 5479 uint32_t i, reg = 0; 5480 int32_t done = -1; 5481 5482 for (i = 0; i < attempts; i++) { 5483 reg = CSR_READ(sc, rw); 5484 5485 if (reg & EERD_DONE) { 5486 done = 0; 5487 break; 5488 } 5489 delay(5); 5490 } 5491 5492 return done; 5493 } 5494 5495 static int 5496 wm_check_alt_mac_addr(struct wm_softc *sc) 5497 { 5498 uint16_t myea[ETHER_ADDR_LEN / 2]; 5499 uint16_t offset = EEPROM_OFF_MACADDR; 5500 5501 /* Try to read alternative MAC address pointer */ 5502 if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0) 5503 return -1; 5504 5505 /* Check pointer */ 5506 if (offset == 0xffff) 5507 return -1; 5508 5509 /* 5510 * Check whether alternative MAC address is valid or not. 5511 * Some cards have non 0xffff pointer but those don't use 5512 * alternative MAC address in reality. 5513 * 5514 * Check whether the broadcast bit is set or not. 5515 */ 5516 if (wm_read_eeprom(sc, offset, 1, myea) == 0) 5517 if (((myea[0] & 0xff) & 0x01) == 0) 5518 return 0; /* found! */ 5519 5520 /* not found */ 5521 return -1; 5522 } 5523 5524 static int 5525 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr) 5526 { 5527 uint16_t myea[ETHER_ADDR_LEN / 2]; 5528 uint16_t offset = EEPROM_OFF_MACADDR; 5529 int do_invert = 0; 5530 5531 switch (sc->sc_type) { 5532 case WM_T_82580: 5533 case WM_T_82580ER: 5534 case WM_T_I350: 5535 switch (sc->sc_funcid) { 5536 case 0: 5537 /* default value (== EEPROM_OFF_MACADDR) */ 5538 break; 5539 case 1: 5540 offset = EEPROM_OFF_LAN1; 5541 break; 5542 case 2: 5543 offset = EEPROM_OFF_LAN2; 5544 break; 5545 case 3: 5546 offset = EEPROM_OFF_LAN3; 5547 break; 5548 default: 5549 goto bad; 5550 /* NOTREACHED */ 5551 break; 5552 } 5553 break; 5554 case WM_T_82571: 5555 case WM_T_82575: 5556 case WM_T_82576: 5557 case WM_T_80003: 5558 case WM_T_I210: 5559 case WM_T_I211: 5560 if (wm_check_alt_mac_addr(sc) != 0) { 5561 /* reset the offset to LAN0 */ 5562 offset = EEPROM_OFF_MACADDR; 5563 if ((sc->sc_funcid & 0x01) == 1) 5564 do_invert = 1; 5565 goto do_read; 5566 } 5567 switch (sc->sc_funcid) { 5568 case 0: 5569 /* 5570 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR 5571 * itself. 5572 */ 5573 break; 5574 case 1: 5575 offset += EEPROM_OFF_MACADDR_LAN1; 5576 break; 5577 case 2: 5578 offset += EEPROM_OFF_MACADDR_LAN2; 5579 break; 5580 case 3: 5581 offset += EEPROM_OFF_MACADDR_LAN3; 5582 break; 5583 default: 5584 goto bad; 5585 /* NOTREACHED */ 5586 break; 5587 } 5588 break; 5589 default: 5590 if ((sc->sc_funcid & 0x01) == 1) 5591 do_invert = 1; 5592 break; 5593 } 5594 5595 do_read: 5596 if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]), 5597 myea) != 0) { 5598 goto bad; 5599 } 5600 5601 enaddr[0] = myea[0] & 0xff; 5602 enaddr[1] = myea[0] >> 8; 5603 enaddr[2] = myea[1] & 0xff; 5604 enaddr[3] = myea[1] >> 8; 5605 enaddr[4] = myea[2] & 0xff; 5606 enaddr[5] = myea[2] >> 8; 5607 5608 /* 5609 * Toggle the LSB of the MAC address on the second port 5610 * of some dual port cards. 5611 */ 5612 if (do_invert != 0) 5613 enaddr[5] ^= 1; 5614 5615 return 0; 5616 5617 bad: 5618 aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n"); 5619 5620 return -1; 5621 } 5622 5623 /* 5624 * wm_add_rxbuf: 5625 * 5626 * Add a receive buffer to the indiciated descriptor. 5627 */ 5628 static int 5629 wm_add_rxbuf(struct wm_softc *sc, int idx) 5630 { 5631 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx]; 5632 struct mbuf *m; 5633 int error; 5634 5635 MGETHDR(m, M_DONTWAIT, MT_DATA); 5636 if (m == NULL) 5637 return ENOBUFS; 5638 5639 MCLGET(m, M_DONTWAIT); 5640 if ((m->m_flags & M_EXT) == 0) { 5641 m_freem(m); 5642 return ENOBUFS; 5643 } 5644 5645 if (rxs->rxs_mbuf != NULL) 5646 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 5647 5648 rxs->rxs_mbuf = m; 5649 5650 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 5651 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m, 5652 BUS_DMA_READ|BUS_DMA_NOWAIT); 5653 if (error) { 5654 /* XXX XXX XXX */ 5655 aprint_error_dev(sc->sc_dev, 5656 "unable to load rx DMA map %d, error = %d\n", 5657 idx, error); 5658 panic("wm_add_rxbuf"); 5659 } 5660 5661 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 5662 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 5663 5664 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 5665 if ((sc->sc_rctl & RCTL_EN) != 0) 5666 WM_INIT_RXDESC(sc, idx); 5667 } else 5668 WM_INIT_RXDESC(sc, idx); 5669 5670 return 0; 5671 } 5672 5673 /* 5674 * wm_set_ral: 5675 * 5676 * Set an entery in the receive address list. 5677 */ 5678 static void 5679 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx) 5680 { 5681 uint32_t ral_lo, ral_hi; 5682 5683 if (enaddr != NULL) { 5684 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) | 5685 (enaddr[3] << 24); 5686 ral_hi = enaddr[4] | (enaddr[5] << 8); 5687 ral_hi |= RAL_AV; 5688 } else { 5689 ral_lo = 0; 5690 ral_hi = 0; 5691 } 5692 5693 if (sc->sc_type >= WM_T_82544) { 5694 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx), 5695 ral_lo); 5696 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx), 5697 ral_hi); 5698 } else { 5699 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo); 5700 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi); 5701 } 5702 } 5703 5704 /* 5705 * wm_mchash: 5706 * 5707 * Compute the hash of the multicast address for the 4096-bit 5708 * multicast filter. 5709 */ 5710 static uint32_t 5711 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr) 5712 { 5713 static const int lo_shift[4] = { 4, 3, 2, 0 }; 5714 static const int hi_shift[4] = { 4, 5, 6, 8 }; 5715 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 }; 5716 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 }; 5717 uint32_t hash; 5718 5719 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 5720 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 5721 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) { 5722 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) | 5723 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]); 5724 return (hash & 0x3ff); 5725 } 5726 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) | 5727 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]); 5728 5729 return (hash & 0xfff); 5730 } 5731 5732 /* 5733 * wm_set_filter: 5734 * 5735 * Set up the receive filter. 5736 */ 5737 static void 5738 wm_set_filter(struct wm_softc *sc) 5739 { 5740 struct ethercom *ec = &sc->sc_ethercom; 5741 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 5742 struct ether_multi *enm; 5743 struct ether_multistep step; 5744 bus_addr_t mta_reg; 5745 uint32_t hash, reg, bit; 5746 int i, size; 5747 5748 if (sc->sc_type >= WM_T_82544) 5749 mta_reg = WMREG_CORDOVA_MTA; 5750 else 5751 mta_reg = WMREG_MTA; 5752 5753 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE); 5754 5755 if (ifp->if_flags & IFF_BROADCAST) 5756 sc->sc_rctl |= RCTL_BAM; 5757 if (ifp->if_flags & IFF_PROMISC) { 5758 sc->sc_rctl |= RCTL_UPE; 5759 goto allmulti; 5760 } 5761 5762 /* 5763 * Set the station address in the first RAL slot, and 5764 * clear the remaining slots. 5765 */ 5766 if (sc->sc_type == WM_T_ICH8) 5767 size = WM_RAL_TABSIZE_ICH8 -1; 5768 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10) 5769 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2) 5770 || (sc->sc_type == WM_T_PCH_LPT)) 5771 size = WM_RAL_TABSIZE_ICH8; 5772 else if (sc->sc_type == WM_T_82575) 5773 size = WM_RAL_TABSIZE_82575; 5774 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580)) 5775 size = WM_RAL_TABSIZE_82576; 5776 else if (sc->sc_type == WM_T_I350) 5777 size = WM_RAL_TABSIZE_I350; 5778 else 5779 size = WM_RAL_TABSIZE; 5780 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0); 5781 for (i = 1; i < size; i++) 5782 wm_set_ral(sc, NULL, i); 5783 5784 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 5785 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 5786 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) 5787 size = WM_ICH8_MC_TABSIZE; 5788 else 5789 size = WM_MC_TABSIZE; 5790 /* Clear out the multicast table. */ 5791 for (i = 0; i < size; i++) 5792 CSR_WRITE(sc, mta_reg + (i << 2), 0); 5793 5794 ETHER_FIRST_MULTI(step, ec, enm); 5795 while (enm != NULL) { 5796 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 5797 /* 5798 * We must listen to a range of multicast addresses. 5799 * For now, just accept all multicasts, rather than 5800 * trying to set only those filter bits needed to match 5801 * the range. (At this time, the only use of address 5802 * ranges is for IP multicast routing, for which the 5803 * range is big enough to require all bits set.) 5804 */ 5805 goto allmulti; 5806 } 5807 5808 hash = wm_mchash(sc, enm->enm_addrlo); 5809 5810 reg = (hash >> 5); 5811 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 5812 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 5813 || (sc->sc_type == WM_T_PCH2) 5814 || (sc->sc_type == WM_T_PCH_LPT)) 5815 reg &= 0x1f; 5816 else 5817 reg &= 0x7f; 5818 bit = hash & 0x1f; 5819 5820 hash = CSR_READ(sc, mta_reg + (reg << 2)); 5821 hash |= 1U << bit; 5822 5823 /* XXX Hardware bug?? */ 5824 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) { 5825 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2)); 5826 CSR_WRITE(sc, mta_reg + (reg << 2), hash); 5827 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit); 5828 } else 5829 CSR_WRITE(sc, mta_reg + (reg << 2), hash); 5830 5831 ETHER_NEXT_MULTI(step, enm); 5832 } 5833 5834 ifp->if_flags &= ~IFF_ALLMULTI; 5835 goto setit; 5836 5837 allmulti: 5838 ifp->if_flags |= IFF_ALLMULTI; 5839 sc->sc_rctl |= RCTL_MPE; 5840 5841 setit: 5842 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl); 5843 } 5844 5845 /* 5846 * wm_tbi_mediainit: 5847 * 5848 * Initialize media for use on 1000BASE-X devices. 5849 */ 5850 static void 5851 wm_tbi_mediainit(struct wm_softc *sc) 5852 { 5853 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 5854 const char *sep = ""; 5855 5856 if (sc->sc_type < WM_T_82543) 5857 sc->sc_tipg = TIPG_WM_DFLT; 5858 else 5859 sc->sc_tipg = TIPG_LG_DFLT; 5860 5861 sc->sc_tbi_anegticks = 5; 5862 5863 /* Initialize our media structures */ 5864 sc->sc_mii.mii_ifp = ifp; 5865 5866 sc->sc_ethercom.ec_mii = &sc->sc_mii; 5867 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange, 5868 wm_tbi_mediastatus); 5869 5870 /* 5871 * SWD Pins: 5872 * 5873 * 0 = Link LED (output) 5874 * 1 = Loss Of Signal (input) 5875 */ 5876 sc->sc_ctrl |= CTRL_SWDPIO(0); 5877 sc->sc_ctrl &= ~CTRL_SWDPIO(1); 5878 5879 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 5880 5881 #define ADD(ss, mm, dd) \ 5882 do { \ 5883 aprint_normal("%s%s", sep, ss); \ 5884 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \ 5885 sep = ", "; \ 5886 } while (/*CONSTCOND*/0) 5887 5888 aprint_normal_dev(sc->sc_dev, ""); 5889 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD); 5890 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD); 5891 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD); 5892 aprint_normal("\n"); 5893 5894 #undef ADD 5895 5896 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 5897 } 5898 5899 /* 5900 * wm_tbi_mediastatus: [ifmedia interface function] 5901 * 5902 * Get the current interface media status on a 1000BASE-X device. 5903 */ 5904 static void 5905 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 5906 { 5907 struct wm_softc *sc = ifp->if_softc; 5908 uint32_t ctrl, status; 5909 5910 ifmr->ifm_status = IFM_AVALID; 5911 ifmr->ifm_active = IFM_ETHER; 5912 5913 status = CSR_READ(sc, WMREG_STATUS); 5914 if ((status & STATUS_LU) == 0) { 5915 ifmr->ifm_active |= IFM_NONE; 5916 return; 5917 } 5918 5919 ifmr->ifm_status |= IFM_ACTIVE; 5920 ifmr->ifm_active |= IFM_1000_SX; 5921 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD) 5922 ifmr->ifm_active |= IFM_FDX; 5923 ctrl = CSR_READ(sc, WMREG_CTRL); 5924 if (ctrl & CTRL_RFCE) 5925 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE; 5926 if (ctrl & CTRL_TFCE) 5927 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE; 5928 } 5929 5930 /* 5931 * wm_tbi_mediachange: [ifmedia interface function] 5932 * 5933 * Set hardware to newly-selected media on a 1000BASE-X device. 5934 */ 5935 static int 5936 wm_tbi_mediachange(struct ifnet *ifp) 5937 { 5938 struct wm_softc *sc = ifp->if_softc; 5939 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 5940 uint32_t status; 5941 int i; 5942 5943 sc->sc_txcw = 0; 5944 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO || 5945 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0) 5946 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE; 5947 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 5948 sc->sc_txcw |= TXCW_ANE; 5949 } else { 5950 /* 5951 * If autonegotiation is turned off, force link up and turn on 5952 * full duplex 5953 */ 5954 sc->sc_txcw &= ~TXCW_ANE; 5955 sc->sc_ctrl |= CTRL_SLU | CTRL_FD; 5956 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE); 5957 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 5958 delay(1000); 5959 } 5960 5961 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n", 5962 device_xname(sc->sc_dev),sc->sc_txcw)); 5963 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); 5964 delay(10000); 5965 5966 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1); 5967 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i)); 5968 5969 /* 5970 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the 5971 * optics detect a signal, 0 if they don't. 5972 */ 5973 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) { 5974 /* Have signal; wait for the link to come up. */ 5975 5976 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 5977 /* 5978 * Reset the link, and let autonegotiation do its thing 5979 */ 5980 sc->sc_ctrl |= CTRL_LRST; 5981 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 5982 delay(1000); 5983 sc->sc_ctrl &= ~CTRL_LRST; 5984 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 5985 delay(1000); 5986 } 5987 5988 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) { 5989 delay(10000); 5990 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU) 5991 break; 5992 } 5993 5994 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n", 5995 device_xname(sc->sc_dev),i)); 5996 5997 status = CSR_READ(sc, WMREG_STATUS); 5998 DPRINTF(WM_DEBUG_LINK, 5999 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n", 6000 device_xname(sc->sc_dev),status, STATUS_LU)); 6001 if (status & STATUS_LU) { 6002 /* Link is up. */ 6003 DPRINTF(WM_DEBUG_LINK, 6004 ("%s: LINK: set media -> link up %s\n", 6005 device_xname(sc->sc_dev), 6006 (status & STATUS_FD) ? "FDX" : "HDX")); 6007 6008 /* 6009 * NOTE: CTRL will update TFCE and RFCE automatically, 6010 * so we should update sc->sc_ctrl 6011 */ 6012 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); 6013 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 6014 sc->sc_fcrtl &= ~FCRTL_XONE; 6015 if (status & STATUS_FD) 6016 sc->sc_tctl |= 6017 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 6018 else 6019 sc->sc_tctl |= 6020 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 6021 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE) 6022 sc->sc_fcrtl |= FCRTL_XONE; 6023 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 6024 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 6025 WMREG_OLD_FCRTL : WMREG_FCRTL, 6026 sc->sc_fcrtl); 6027 sc->sc_tbi_linkup = 1; 6028 } else { 6029 if (i == WM_LINKUP_TIMEOUT) 6030 wm_check_for_link(sc); 6031 /* Link is down. */ 6032 DPRINTF(WM_DEBUG_LINK, 6033 ("%s: LINK: set media -> link down\n", 6034 device_xname(sc->sc_dev))); 6035 sc->sc_tbi_linkup = 0; 6036 } 6037 } else { 6038 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n", 6039 device_xname(sc->sc_dev))); 6040 sc->sc_tbi_linkup = 0; 6041 } 6042 6043 wm_tbi_set_linkled(sc); 6044 6045 return 0; 6046 } 6047 6048 /* 6049 * wm_tbi_set_linkled: 6050 * 6051 * Update the link LED on 1000BASE-X devices. 6052 */ 6053 static void 6054 wm_tbi_set_linkled(struct wm_softc *sc) 6055 { 6056 6057 if (sc->sc_tbi_linkup) 6058 sc->sc_ctrl |= CTRL_SWDPIN(0); 6059 else 6060 sc->sc_ctrl &= ~CTRL_SWDPIN(0); 6061 6062 /* 82540 or newer devices are active low */ 6063 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0; 6064 6065 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6066 } 6067 6068 /* 6069 * wm_tbi_check_link: 6070 * 6071 * Check the link on 1000BASE-X devices. 6072 */ 6073 static void 6074 wm_tbi_check_link(struct wm_softc *sc) 6075 { 6076 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 6077 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 6078 uint32_t status; 6079 6080 status = CSR_READ(sc, WMREG_STATUS); 6081 6082 /* XXX is this needed? */ 6083 (void)CSR_READ(sc, WMREG_RXCW); 6084 (void)CSR_READ(sc, WMREG_CTRL); 6085 6086 /* set link status */ 6087 if ((status & STATUS_LU) == 0) { 6088 DPRINTF(WM_DEBUG_LINK, 6089 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev))); 6090 sc->sc_tbi_linkup = 0; 6091 } else if (sc->sc_tbi_linkup == 0) { 6092 DPRINTF(WM_DEBUG_LINK, 6093 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev), 6094 (status & STATUS_FD) ? "FDX" : "HDX")); 6095 sc->sc_tbi_linkup = 1; 6096 } 6097 6098 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) 6099 && ((status & STATUS_LU) == 0)) { 6100 sc->sc_tbi_linkup = 0; 6101 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) { 6102 /* RXCFG storm! */ 6103 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n", 6104 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg)); 6105 wm_init(ifp); 6106 ifp->if_start(ifp); 6107 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 6108 /* If the timer expired, retry autonegotiation */ 6109 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) { 6110 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n")); 6111 sc->sc_tbi_ticks = 0; 6112 /* 6113 * Reset the link, and let autonegotiation do 6114 * its thing 6115 */ 6116 sc->sc_ctrl |= CTRL_LRST; 6117 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6118 delay(1000); 6119 sc->sc_ctrl &= ~CTRL_LRST; 6120 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6121 delay(1000); 6122 CSR_WRITE(sc, WMREG_TXCW, 6123 sc->sc_txcw & ~TXCW_ANE); 6124 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); 6125 } 6126 } 6127 } 6128 6129 wm_tbi_set_linkled(sc); 6130 } 6131 6132 /* 6133 * wm_gmii_reset: 6134 * 6135 * Reset the PHY. 6136 */ 6137 static void 6138 wm_gmii_reset(struct wm_softc *sc) 6139 { 6140 uint32_t reg; 6141 int rv; 6142 6143 /* get phy semaphore */ 6144 switch (sc->sc_type) { 6145 case WM_T_82571: 6146 case WM_T_82572: 6147 case WM_T_82573: 6148 case WM_T_82574: 6149 case WM_T_82583: 6150 /* XXX should get sw semaphore, too */ 6151 rv = wm_get_swsm_semaphore(sc); 6152 break; 6153 case WM_T_82575: 6154 case WM_T_82576: 6155 case WM_T_82580: 6156 case WM_T_82580ER: 6157 case WM_T_I350: 6158 case WM_T_I210: 6159 case WM_T_I211: 6160 case WM_T_80003: 6161 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]); 6162 break; 6163 case WM_T_ICH8: 6164 case WM_T_ICH9: 6165 case WM_T_ICH10: 6166 case WM_T_PCH: 6167 case WM_T_PCH2: 6168 case WM_T_PCH_LPT: 6169 rv = wm_get_swfwhw_semaphore(sc); 6170 break; 6171 default: 6172 /* nothing to do*/ 6173 rv = 0; 6174 break; 6175 } 6176 if (rv != 0) { 6177 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 6178 __func__); 6179 return; 6180 } 6181 6182 switch (sc->sc_type) { 6183 case WM_T_82542_2_0: 6184 case WM_T_82542_2_1: 6185 /* null */ 6186 break; 6187 case WM_T_82543: 6188 /* 6189 * With 82543, we need to force speed and duplex on the MAC 6190 * equal to what the PHY speed and duplex configuration is. 6191 * In addition, we need to perform a hardware reset on the PHY 6192 * to take it out of reset. 6193 */ 6194 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; 6195 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6196 6197 /* The PHY reset pin is active-low. */ 6198 reg = CSR_READ(sc, WMREG_CTRL_EXT); 6199 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) | 6200 CTRL_EXT_SWDPIN(4)); 6201 reg |= CTRL_EXT_SWDPIO(4); 6202 6203 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 6204 delay(10*1000); 6205 6206 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4)); 6207 delay(150); 6208 #if 0 6209 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4); 6210 #endif 6211 delay(20*1000); /* XXX extra delay to get PHY ID? */ 6212 break; 6213 case WM_T_82544: /* reset 10000us */ 6214 case WM_T_82540: 6215 case WM_T_82545: 6216 case WM_T_82545_3: 6217 case WM_T_82546: 6218 case WM_T_82546_3: 6219 case WM_T_82541: 6220 case WM_T_82541_2: 6221 case WM_T_82547: 6222 case WM_T_82547_2: 6223 case WM_T_82571: /* reset 100us */ 6224 case WM_T_82572: 6225 case WM_T_82573: 6226 case WM_T_82574: 6227 case WM_T_82575: 6228 case WM_T_82576: 6229 case WM_T_82580: 6230 case WM_T_82580ER: 6231 case WM_T_I350: 6232 case WM_T_I210: 6233 case WM_T_I211: 6234 case WM_T_82583: 6235 case WM_T_80003: 6236 /* generic reset */ 6237 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 6238 delay(20000); 6239 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6240 delay(20000); 6241 6242 if ((sc->sc_type == WM_T_82541) 6243 || (sc->sc_type == WM_T_82541_2) 6244 || (sc->sc_type == WM_T_82547) 6245 || (sc->sc_type == WM_T_82547_2)) { 6246 /* workaround for igp are done in igp_reset() */ 6247 /* XXX add code to set LED after phy reset */ 6248 } 6249 break; 6250 case WM_T_ICH8: 6251 case WM_T_ICH9: 6252 case WM_T_ICH10: 6253 case WM_T_PCH: 6254 case WM_T_PCH2: 6255 case WM_T_PCH_LPT: 6256 /* generic reset */ 6257 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 6258 delay(100); 6259 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6260 delay(150); 6261 break; 6262 default: 6263 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev), 6264 __func__); 6265 break; 6266 } 6267 6268 /* release PHY semaphore */ 6269 switch (sc->sc_type) { 6270 case WM_T_82571: 6271 case WM_T_82572: 6272 case WM_T_82573: 6273 case WM_T_82574: 6274 case WM_T_82583: 6275 /* XXX should put sw semaphore, too */ 6276 wm_put_swsm_semaphore(sc); 6277 break; 6278 case WM_T_82575: 6279 case WM_T_82576: 6280 case WM_T_82580: 6281 case WM_T_82580ER: 6282 case WM_T_I350: 6283 case WM_T_I210: 6284 case WM_T_I211: 6285 case WM_T_80003: 6286 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]); 6287 break; 6288 case WM_T_ICH8: 6289 case WM_T_ICH9: 6290 case WM_T_ICH10: 6291 case WM_T_PCH: 6292 case WM_T_PCH2: 6293 case WM_T_PCH_LPT: 6294 wm_put_swfwhw_semaphore(sc); 6295 break; 6296 default: 6297 /* nothing to do*/ 6298 rv = 0; 6299 break; 6300 } 6301 6302 /* get_cfg_done */ 6303 wm_get_cfg_done(sc); 6304 6305 /* extra setup */ 6306 switch (sc->sc_type) { 6307 case WM_T_82542_2_0: 6308 case WM_T_82542_2_1: 6309 case WM_T_82543: 6310 case WM_T_82544: 6311 case WM_T_82540: 6312 case WM_T_82545: 6313 case WM_T_82545_3: 6314 case WM_T_82546: 6315 case WM_T_82546_3: 6316 case WM_T_82541_2: 6317 case WM_T_82547_2: 6318 case WM_T_82571: 6319 case WM_T_82572: 6320 case WM_T_82573: 6321 case WM_T_82574: 6322 case WM_T_82575: 6323 case WM_T_82576: 6324 case WM_T_82580: 6325 case WM_T_82580ER: 6326 case WM_T_I350: 6327 case WM_T_I210: 6328 case WM_T_I211: 6329 case WM_T_82583: 6330 case WM_T_80003: 6331 /* null */ 6332 break; 6333 case WM_T_82541: 6334 case WM_T_82547: 6335 /* XXX Configure actively LED after PHY reset */ 6336 break; 6337 case WM_T_ICH8: 6338 case WM_T_ICH9: 6339 case WM_T_ICH10: 6340 case WM_T_PCH: 6341 case WM_T_PCH2: 6342 case WM_T_PCH_LPT: 6343 /* Allow time for h/w to get to a quiescent state afer reset */ 6344 delay(10*1000); 6345 6346 if (sc->sc_type == WM_T_PCH) 6347 wm_hv_phy_workaround_ich8lan(sc); 6348 6349 if (sc->sc_type == WM_T_PCH2) 6350 wm_lv_phy_workaround_ich8lan(sc); 6351 6352 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) { 6353 /* 6354 * dummy read to clear the phy wakeup bit after lcd 6355 * reset 6356 */ 6357 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC); 6358 } 6359 6360 /* 6361 * XXX Configure the LCD with th extended configuration region 6362 * in NVM 6363 */ 6364 6365 /* Configure the LCD with the OEM bits in NVM */ 6366 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2) 6367 || (sc->sc_type == WM_T_PCH_LPT)) { 6368 /* 6369 * Disable LPLU. 6370 * XXX It seems that 82567 has LPLU, too. 6371 */ 6372 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS); 6373 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU); 6374 reg |= HV_OEM_BITS_ANEGNOW; 6375 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg); 6376 } 6377 break; 6378 default: 6379 panic("%s: unknown type\n", __func__); 6380 break; 6381 } 6382 } 6383 6384 /* 6385 * wm_gmii_mediainit: 6386 * 6387 * Initialize media for use on 1000BASE-T devices. 6388 */ 6389 static void 6390 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid) 6391 { 6392 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 6393 struct mii_data *mii = &sc->sc_mii; 6394 6395 /* We have MII. */ 6396 sc->sc_flags |= WM_F_HAS_MII; 6397 6398 if (sc->sc_type == WM_T_80003) 6399 sc->sc_tipg = TIPG_1000T_80003_DFLT; 6400 else 6401 sc->sc_tipg = TIPG_1000T_DFLT; 6402 6403 /* 6404 * Let the chip set speed/duplex on its own based on 6405 * signals from the PHY. 6406 * XXXbouyer - I'm not sure this is right for the 80003, 6407 * the em driver only sets CTRL_SLU here - but it seems to work. 6408 */ 6409 sc->sc_ctrl |= CTRL_SLU; 6410 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6411 6412 /* Initialize our media structures and probe the GMII. */ 6413 mii->mii_ifp = ifp; 6414 6415 /* 6416 * Determine the PHY access method. 6417 * 6418 * For SGMII, use SGMII specific method. 6419 * 6420 * For some devices, we can determine the PHY access method 6421 * from sc_type. 6422 * 6423 * For ICH8 variants, it's difficult to detemine the PHY access 6424 * method by sc_type, so use the PCI product ID for some devices. 6425 * For other ICH8 variants, try to use igp's method. If the PHY 6426 * can't detect, then use bm's method. 6427 */ 6428 switch (prodid) { 6429 case PCI_PRODUCT_INTEL_PCH_M_LM: 6430 case PCI_PRODUCT_INTEL_PCH_M_LC: 6431 /* 82577 */ 6432 sc->sc_phytype = WMPHY_82577; 6433 mii->mii_readreg = wm_gmii_hv_readreg; 6434 mii->mii_writereg = wm_gmii_hv_writereg; 6435 break; 6436 case PCI_PRODUCT_INTEL_PCH_D_DM: 6437 case PCI_PRODUCT_INTEL_PCH_D_DC: 6438 /* 82578 */ 6439 sc->sc_phytype = WMPHY_82578; 6440 mii->mii_readreg = wm_gmii_hv_readreg; 6441 mii->mii_writereg = wm_gmii_hv_writereg; 6442 break; 6443 case PCI_PRODUCT_INTEL_PCH2_LV_LM: 6444 case PCI_PRODUCT_INTEL_PCH2_LV_V: 6445 /* 82579 */ 6446 sc->sc_phytype = WMPHY_82579; 6447 mii->mii_readreg = wm_gmii_hv_readreg; 6448 mii->mii_writereg = wm_gmii_hv_writereg; 6449 break; 6450 case PCI_PRODUCT_INTEL_I217_LM: 6451 case PCI_PRODUCT_INTEL_I217_V: 6452 case PCI_PRODUCT_INTEL_I218_LM: 6453 case PCI_PRODUCT_INTEL_I218_V: 6454 /* I21[78] */ 6455 mii->mii_readreg = wm_gmii_hv_readreg; 6456 mii->mii_writereg = wm_gmii_hv_writereg; 6457 break; 6458 case PCI_PRODUCT_INTEL_82801I_BM: 6459 case PCI_PRODUCT_INTEL_82801J_R_BM_LM: 6460 case PCI_PRODUCT_INTEL_82801J_R_BM_LF: 6461 case PCI_PRODUCT_INTEL_82801J_D_BM_LM: 6462 case PCI_PRODUCT_INTEL_82801J_D_BM_LF: 6463 case PCI_PRODUCT_INTEL_82801J_R_BM_V: 6464 /* 82567 */ 6465 sc->sc_phytype = WMPHY_BM; 6466 mii->mii_readreg = wm_gmii_bm_readreg; 6467 mii->mii_writereg = wm_gmii_bm_writereg; 6468 break; 6469 default: 6470 if ((sc->sc_flags & WM_F_SGMII) != 0) { 6471 mii->mii_readreg = wm_sgmii_readreg; 6472 mii->mii_writereg = wm_sgmii_writereg; 6473 } else if (sc->sc_type >= WM_T_80003) { 6474 mii->mii_readreg = wm_gmii_i80003_readreg; 6475 mii->mii_writereg = wm_gmii_i80003_writereg; 6476 } else if (sc->sc_type >= WM_T_I210) { 6477 mii->mii_readreg = wm_gmii_i82544_readreg; 6478 mii->mii_writereg = wm_gmii_i82544_writereg; 6479 } else if (sc->sc_type >= WM_T_82580) { 6480 sc->sc_phytype = WMPHY_82580; 6481 mii->mii_readreg = wm_gmii_82580_readreg; 6482 mii->mii_writereg = wm_gmii_82580_writereg; 6483 } else if (sc->sc_type >= WM_T_82544) { 6484 mii->mii_readreg = wm_gmii_i82544_readreg; 6485 mii->mii_writereg = wm_gmii_i82544_writereg; 6486 } else { 6487 mii->mii_readreg = wm_gmii_i82543_readreg; 6488 mii->mii_writereg = wm_gmii_i82543_writereg; 6489 } 6490 break; 6491 } 6492 mii->mii_statchg = wm_gmii_statchg; 6493 6494 wm_gmii_reset(sc); 6495 6496 sc->sc_ethercom.ec_mii = &sc->sc_mii; 6497 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange, 6498 wm_gmii_mediastatus); 6499 6500 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 6501 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER) 6502 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210) 6503 || (sc->sc_type == WM_T_I211)) { 6504 if ((sc->sc_flags & WM_F_SGMII) == 0) { 6505 /* Attach only one port */ 6506 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1, 6507 MII_OFFSET_ANY, MIIF_DOPAUSE); 6508 } else { 6509 int i; 6510 uint32_t ctrl_ext; 6511 6512 /* Power on sgmii phy if it is disabled */ 6513 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT); 6514 CSR_WRITE(sc, WMREG_CTRL_EXT, 6515 ctrl_ext &~ CTRL_EXT_SWDPIN(3)); 6516 CSR_WRITE_FLUSH(sc); 6517 delay(300*1000); /* XXX too long */ 6518 6519 /* from 1 to 8 */ 6520 for (i = 1; i < 8; i++) 6521 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 6522 i, MII_OFFSET_ANY, MIIF_DOPAUSE); 6523 6524 /* restore previous sfp cage power state */ 6525 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext); 6526 } 6527 } else { 6528 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 6529 MII_OFFSET_ANY, MIIF_DOPAUSE); 6530 } 6531 6532 /* 6533 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call 6534 * wm_set_mdio_slow_mode_hv() for a workaround and retry. 6535 */ 6536 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) && 6537 (LIST_FIRST(&mii->mii_phys) == NULL)) { 6538 wm_set_mdio_slow_mode_hv(sc); 6539 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 6540 MII_OFFSET_ANY, MIIF_DOPAUSE); 6541 } 6542 6543 /* 6544 * (For ICH8 variants) 6545 * If PHY detection failed, use BM's r/w function and retry. 6546 */ 6547 if (LIST_FIRST(&mii->mii_phys) == NULL) { 6548 /* if failed, retry with *_bm_* */ 6549 mii->mii_readreg = wm_gmii_bm_readreg; 6550 mii->mii_writereg = wm_gmii_bm_writereg; 6551 6552 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 6553 MII_OFFSET_ANY, MIIF_DOPAUSE); 6554 } 6555 6556 if (LIST_FIRST(&mii->mii_phys) == NULL) { 6557 /* Any PHY wasn't find */ 6558 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 6559 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE); 6560 sc->sc_phytype = WMPHY_NONE; 6561 } else { 6562 /* 6563 * PHY Found! 6564 * Check PHY type. 6565 */ 6566 uint32_t model; 6567 struct mii_softc *child; 6568 6569 child = LIST_FIRST(&mii->mii_phys); 6570 if (device_is_a(child->mii_dev, "igphy")) { 6571 struct igphy_softc *isc = (struct igphy_softc *)child; 6572 6573 model = isc->sc_mii.mii_mpd_model; 6574 if (model == MII_MODEL_yyINTEL_I82566) 6575 sc->sc_phytype = WMPHY_IGP_3; 6576 } 6577 6578 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 6579 } 6580 } 6581 6582 /* 6583 * wm_gmii_mediastatus: [ifmedia interface function] 6584 * 6585 * Get the current interface media status on a 1000BASE-T device. 6586 */ 6587 static void 6588 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 6589 { 6590 struct wm_softc *sc = ifp->if_softc; 6591 6592 ether_mediastatus(ifp, ifmr); 6593 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) 6594 | sc->sc_flowflags; 6595 } 6596 6597 /* 6598 * wm_gmii_mediachange: [ifmedia interface function] 6599 * 6600 * Set hardware to newly-selected media on a 1000BASE-T device. 6601 */ 6602 static int 6603 wm_gmii_mediachange(struct ifnet *ifp) 6604 { 6605 struct wm_softc *sc = ifp->if_softc; 6606 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 6607 int rc; 6608 6609 if ((ifp->if_flags & IFF_UP) == 0) 6610 return 0; 6611 6612 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); 6613 sc->sc_ctrl |= CTRL_SLU; 6614 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) 6615 || (sc->sc_type > WM_T_82543)) { 6616 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX); 6617 } else { 6618 sc->sc_ctrl &= ~CTRL_ASDE; 6619 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; 6620 if (ife->ifm_media & IFM_FDX) 6621 sc->sc_ctrl |= CTRL_FD; 6622 switch (IFM_SUBTYPE(ife->ifm_media)) { 6623 case IFM_10_T: 6624 sc->sc_ctrl |= CTRL_SPEED_10; 6625 break; 6626 case IFM_100_TX: 6627 sc->sc_ctrl |= CTRL_SPEED_100; 6628 break; 6629 case IFM_1000_T: 6630 sc->sc_ctrl |= CTRL_SPEED_1000; 6631 break; 6632 default: 6633 panic("wm_gmii_mediachange: bad media 0x%x", 6634 ife->ifm_media); 6635 } 6636 } 6637 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6638 if (sc->sc_type <= WM_T_82543) 6639 wm_gmii_reset(sc); 6640 6641 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO) 6642 return 0; 6643 return rc; 6644 } 6645 6646 #define MDI_IO CTRL_SWDPIN(2) 6647 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */ 6648 #define MDI_CLK CTRL_SWDPIN(3) 6649 6650 static void 6651 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits) 6652 { 6653 uint32_t i, v; 6654 6655 v = CSR_READ(sc, WMREG_CTRL); 6656 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 6657 v |= MDI_DIR | CTRL_SWDPIO(3); 6658 6659 for (i = 1 << (nbits - 1); i != 0; i >>= 1) { 6660 if (data & i) 6661 v |= MDI_IO; 6662 else 6663 v &= ~MDI_IO; 6664 CSR_WRITE(sc, WMREG_CTRL, v); 6665 delay(10); 6666 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 6667 delay(10); 6668 CSR_WRITE(sc, WMREG_CTRL, v); 6669 delay(10); 6670 } 6671 } 6672 6673 static uint32_t 6674 i82543_mii_recvbits(struct wm_softc *sc) 6675 { 6676 uint32_t v, i, data = 0; 6677 6678 v = CSR_READ(sc, WMREG_CTRL); 6679 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 6680 v |= CTRL_SWDPIO(3); 6681 6682 CSR_WRITE(sc, WMREG_CTRL, v); 6683 delay(10); 6684 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 6685 delay(10); 6686 CSR_WRITE(sc, WMREG_CTRL, v); 6687 delay(10); 6688 6689 for (i = 0; i < 16; i++) { 6690 data <<= 1; 6691 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 6692 delay(10); 6693 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO) 6694 data |= 1; 6695 CSR_WRITE(sc, WMREG_CTRL, v); 6696 delay(10); 6697 } 6698 6699 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 6700 delay(10); 6701 CSR_WRITE(sc, WMREG_CTRL, v); 6702 delay(10); 6703 6704 return data; 6705 } 6706 6707 #undef MDI_IO 6708 #undef MDI_DIR 6709 #undef MDI_CLK 6710 6711 /* 6712 * wm_gmii_i82543_readreg: [mii interface function] 6713 * 6714 * Read a PHY register on the GMII (i82543 version). 6715 */ 6716 static int 6717 wm_gmii_i82543_readreg(device_t self, int phy, int reg) 6718 { 6719 struct wm_softc *sc = device_private(self); 6720 int rv; 6721 6722 i82543_mii_sendbits(sc, 0xffffffffU, 32); 6723 i82543_mii_sendbits(sc, reg | (phy << 5) | 6724 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14); 6725 rv = i82543_mii_recvbits(sc) & 0xffff; 6726 6727 DPRINTF(WM_DEBUG_GMII, 6728 ("%s: GMII: read phy %d reg %d -> 0x%04x\n", 6729 device_xname(sc->sc_dev), phy, reg, rv)); 6730 6731 return rv; 6732 } 6733 6734 /* 6735 * wm_gmii_i82543_writereg: [mii interface function] 6736 * 6737 * Write a PHY register on the GMII (i82543 version). 6738 */ 6739 static void 6740 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val) 6741 { 6742 struct wm_softc *sc = device_private(self); 6743 6744 i82543_mii_sendbits(sc, 0xffffffffU, 32); 6745 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) | 6746 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) | 6747 (MII_COMMAND_START << 30), 32); 6748 } 6749 6750 /* 6751 * wm_gmii_i82544_readreg: [mii interface function] 6752 * 6753 * Read a PHY register on the GMII. 6754 */ 6755 static int 6756 wm_gmii_i82544_readreg(device_t self, int phy, int reg) 6757 { 6758 struct wm_softc *sc = device_private(self); 6759 uint32_t mdic = 0; 6760 int i, rv; 6761 6762 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) | 6763 MDIC_REGADD(reg)); 6764 6765 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) { 6766 mdic = CSR_READ(sc, WMREG_MDIC); 6767 if (mdic & MDIC_READY) 6768 break; 6769 delay(50); 6770 } 6771 6772 if ((mdic & MDIC_READY) == 0) { 6773 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n", 6774 device_xname(sc->sc_dev), phy, reg); 6775 rv = 0; 6776 } else if (mdic & MDIC_E) { 6777 #if 0 /* This is normal if no PHY is present. */ 6778 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n", 6779 device_xname(sc->sc_dev), phy, reg); 6780 #endif 6781 rv = 0; 6782 } else { 6783 rv = MDIC_DATA(mdic); 6784 if (rv == 0xffff) 6785 rv = 0; 6786 } 6787 6788 return rv; 6789 } 6790 6791 /* 6792 * wm_gmii_i82544_writereg: [mii interface function] 6793 * 6794 * Write a PHY register on the GMII. 6795 */ 6796 static void 6797 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val) 6798 { 6799 struct wm_softc *sc = device_private(self); 6800 uint32_t mdic = 0; 6801 int i; 6802 6803 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) | 6804 MDIC_REGADD(reg) | MDIC_DATA(val)); 6805 6806 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) { 6807 mdic = CSR_READ(sc, WMREG_MDIC); 6808 if (mdic & MDIC_READY) 6809 break; 6810 delay(50); 6811 } 6812 6813 if ((mdic & MDIC_READY) == 0) 6814 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n", 6815 device_xname(sc->sc_dev), phy, reg); 6816 else if (mdic & MDIC_E) 6817 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n", 6818 device_xname(sc->sc_dev), phy, reg); 6819 } 6820 6821 /* 6822 * wm_gmii_i80003_readreg: [mii interface function] 6823 * 6824 * Read a PHY register on the kumeran 6825 * This could be handled by the PHY layer if we didn't have to lock the 6826 * ressource ... 6827 */ 6828 static int 6829 wm_gmii_i80003_readreg(device_t self, int phy, int reg) 6830 { 6831 struct wm_softc *sc = device_private(self); 6832 int sem; 6833 int rv; 6834 6835 if (phy != 1) /* only one PHY on kumeran bus */ 6836 return 0; 6837 6838 sem = swfwphysem[sc->sc_funcid]; 6839 if (wm_get_swfw_semaphore(sc, sem)) { 6840 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 6841 __func__); 6842 return 0; 6843 } 6844 6845 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) { 6846 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT, 6847 reg >> GG82563_PAGE_SHIFT); 6848 } else { 6849 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT, 6850 reg >> GG82563_PAGE_SHIFT); 6851 } 6852 /* Wait more 200us for a bug of the ready bit in the MDIC register */ 6853 delay(200); 6854 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS); 6855 delay(200); 6856 6857 wm_put_swfw_semaphore(sc, sem); 6858 return rv; 6859 } 6860 6861 /* 6862 * wm_gmii_i80003_writereg: [mii interface function] 6863 * 6864 * Write a PHY register on the kumeran. 6865 * This could be handled by the PHY layer if we didn't have to lock the 6866 * ressource ... 6867 */ 6868 static void 6869 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val) 6870 { 6871 struct wm_softc *sc = device_private(self); 6872 int sem; 6873 6874 if (phy != 1) /* only one PHY on kumeran bus */ 6875 return; 6876 6877 sem = swfwphysem[sc->sc_funcid]; 6878 if (wm_get_swfw_semaphore(sc, sem)) { 6879 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 6880 __func__); 6881 return; 6882 } 6883 6884 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) { 6885 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT, 6886 reg >> GG82563_PAGE_SHIFT); 6887 } else { 6888 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT, 6889 reg >> GG82563_PAGE_SHIFT); 6890 } 6891 /* Wait more 200us for a bug of the ready bit in the MDIC register */ 6892 delay(200); 6893 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val); 6894 delay(200); 6895 6896 wm_put_swfw_semaphore(sc, sem); 6897 } 6898 6899 /* 6900 * wm_gmii_bm_readreg: [mii interface function] 6901 * 6902 * Read a PHY register on the kumeran 6903 * This could be handled by the PHY layer if we didn't have to lock the 6904 * ressource ... 6905 */ 6906 static int 6907 wm_gmii_bm_readreg(device_t self, int phy, int reg) 6908 { 6909 struct wm_softc *sc = device_private(self); 6910 int sem; 6911 int rv; 6912 6913 sem = swfwphysem[sc->sc_funcid]; 6914 if (wm_get_swfw_semaphore(sc, sem)) { 6915 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 6916 __func__); 6917 return 0; 6918 } 6919 6920 if (reg > BME1000_MAX_MULTI_PAGE_REG) { 6921 if (phy == 1) 6922 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT, 6923 reg); 6924 else 6925 wm_gmii_i82544_writereg(self, phy, 6926 GG82563_PHY_PAGE_SELECT, 6927 reg >> GG82563_PAGE_SHIFT); 6928 } 6929 6930 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS); 6931 wm_put_swfw_semaphore(sc, sem); 6932 return rv; 6933 } 6934 6935 /* 6936 * wm_gmii_bm_writereg: [mii interface function] 6937 * 6938 * Write a PHY register on the kumeran. 6939 * This could be handled by the PHY layer if we didn't have to lock the 6940 * ressource ... 6941 */ 6942 static void 6943 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val) 6944 { 6945 struct wm_softc *sc = device_private(self); 6946 int sem; 6947 6948 sem = swfwphysem[sc->sc_funcid]; 6949 if (wm_get_swfw_semaphore(sc, sem)) { 6950 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 6951 __func__); 6952 return; 6953 } 6954 6955 if (reg > BME1000_MAX_MULTI_PAGE_REG) { 6956 if (phy == 1) 6957 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT, 6958 reg); 6959 else 6960 wm_gmii_i82544_writereg(self, phy, 6961 GG82563_PHY_PAGE_SELECT, 6962 reg >> GG82563_PAGE_SHIFT); 6963 } 6964 6965 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val); 6966 wm_put_swfw_semaphore(sc, sem); 6967 } 6968 6969 static void 6970 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd) 6971 { 6972 struct wm_softc *sc = device_private(self); 6973 uint16_t regnum = BM_PHY_REG_NUM(offset); 6974 uint16_t wuce; 6975 6976 /* XXX Gig must be disabled for MDIO accesses to page 800 */ 6977 if (sc->sc_type == WM_T_PCH) { 6978 /* XXX e1000 driver do nothing... why? */ 6979 } 6980 6981 /* Set page 769 */ 6982 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT, 6983 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT); 6984 6985 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG); 6986 6987 wuce &= ~BM_WUC_HOST_WU_BIT; 6988 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, 6989 wuce | BM_WUC_ENABLE_BIT); 6990 6991 /* Select page 800 */ 6992 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT, 6993 BM_WUC_PAGE << BME1000_PAGE_SHIFT); 6994 6995 /* Write page 800 */ 6996 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum); 6997 6998 if (rd) 6999 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE); 7000 else 7001 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val); 7002 7003 /* Set page 769 */ 7004 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT, 7005 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT); 7006 7007 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce); 7008 } 7009 7010 /* 7011 * wm_gmii_hv_readreg: [mii interface function] 7012 * 7013 * Read a PHY register on the kumeran 7014 * This could be handled by the PHY layer if we didn't have to lock the 7015 * ressource ... 7016 */ 7017 static int 7018 wm_gmii_hv_readreg(device_t self, int phy, int reg) 7019 { 7020 struct wm_softc *sc = device_private(self); 7021 uint16_t page = BM_PHY_REG_PAGE(reg); 7022 uint16_t regnum = BM_PHY_REG_NUM(reg); 7023 uint16_t val; 7024 int rv; 7025 7026 if (wm_get_swfwhw_semaphore(sc)) { 7027 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 7028 __func__); 7029 return 0; 7030 } 7031 7032 /* XXX Workaround failure in MDIO access while cable is disconnected */ 7033 if (sc->sc_phytype == WMPHY_82577) { 7034 /* XXX must write */ 7035 } 7036 7037 /* Page 800 works differently than the rest so it has its own func */ 7038 if (page == BM_WUC_PAGE) { 7039 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1); 7040 return val; 7041 } 7042 7043 /* 7044 * Lower than page 768 works differently than the rest so it has its 7045 * own func 7046 */ 7047 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) { 7048 printf("gmii_hv_readreg!!!\n"); 7049 return 0; 7050 } 7051 7052 if (regnum > BME1000_MAX_MULTI_PAGE_REG) { 7053 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT, 7054 page << BME1000_PAGE_SHIFT); 7055 } 7056 7057 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR); 7058 wm_put_swfwhw_semaphore(sc); 7059 return rv; 7060 } 7061 7062 /* 7063 * wm_gmii_hv_writereg: [mii interface function] 7064 * 7065 * Write a PHY register on the kumeran. 7066 * This could be handled by the PHY layer if we didn't have to lock the 7067 * ressource ... 7068 */ 7069 static void 7070 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val) 7071 { 7072 struct wm_softc *sc = device_private(self); 7073 uint16_t page = BM_PHY_REG_PAGE(reg); 7074 uint16_t regnum = BM_PHY_REG_NUM(reg); 7075 7076 if (wm_get_swfwhw_semaphore(sc)) { 7077 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 7078 __func__); 7079 return; 7080 } 7081 7082 /* XXX Workaround failure in MDIO access while cable is disconnected */ 7083 7084 /* Page 800 works differently than the rest so it has its own func */ 7085 if (page == BM_WUC_PAGE) { 7086 uint16_t tmp; 7087 7088 tmp = val; 7089 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0); 7090 return; 7091 } 7092 7093 /* 7094 * Lower than page 768 works differently than the rest so it has its 7095 * own func 7096 */ 7097 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) { 7098 printf("gmii_hv_writereg!!!\n"); 7099 return; 7100 } 7101 7102 /* 7103 * XXX Workaround MDIO accesses being disabled after entering IEEE 7104 * Power Down (whenever bit 11 of the PHY control register is set) 7105 */ 7106 7107 if (regnum > BME1000_MAX_MULTI_PAGE_REG) { 7108 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT, 7109 page << BME1000_PAGE_SHIFT); 7110 } 7111 7112 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val); 7113 wm_put_swfwhw_semaphore(sc); 7114 } 7115 7116 /* 7117 * wm_sgmii_readreg: [mii interface function] 7118 * 7119 * Read a PHY register on the SGMII 7120 * This could be handled by the PHY layer if we didn't have to lock the 7121 * ressource ... 7122 */ 7123 static int 7124 wm_sgmii_readreg(device_t self, int phy, int reg) 7125 { 7126 struct wm_softc *sc = device_private(self); 7127 uint32_t i2ccmd; 7128 int i, rv; 7129 7130 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) { 7131 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 7132 __func__); 7133 return 0; 7134 } 7135 7136 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT) 7137 | (phy << I2CCMD_PHY_ADDR_SHIFT) 7138 | I2CCMD_OPCODE_READ; 7139 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd); 7140 7141 /* Poll the ready bit */ 7142 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) { 7143 delay(50); 7144 i2ccmd = CSR_READ(sc, WMREG_I2CCMD); 7145 if (i2ccmd & I2CCMD_READY) 7146 break; 7147 } 7148 if ((i2ccmd & I2CCMD_READY) == 0) 7149 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n"); 7150 if ((i2ccmd & I2CCMD_ERROR) != 0) 7151 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n"); 7152 7153 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00); 7154 7155 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]); 7156 return rv; 7157 } 7158 7159 /* 7160 * wm_sgmii_writereg: [mii interface function] 7161 * 7162 * Write a PHY register on the SGMII. 7163 * This could be handled by the PHY layer if we didn't have to lock the 7164 * ressource ... 7165 */ 7166 static void 7167 wm_sgmii_writereg(device_t self, int phy, int reg, int val) 7168 { 7169 struct wm_softc *sc = device_private(self); 7170 uint32_t i2ccmd; 7171 int i; 7172 7173 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) { 7174 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 7175 __func__); 7176 return; 7177 } 7178 7179 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT) 7180 | (phy << I2CCMD_PHY_ADDR_SHIFT) 7181 | I2CCMD_OPCODE_WRITE; 7182 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd); 7183 7184 /* Poll the ready bit */ 7185 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) { 7186 delay(50); 7187 i2ccmd = CSR_READ(sc, WMREG_I2CCMD); 7188 if (i2ccmd & I2CCMD_READY) 7189 break; 7190 } 7191 if ((i2ccmd & I2CCMD_READY) == 0) 7192 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n"); 7193 if ((i2ccmd & I2CCMD_ERROR) != 0) 7194 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n"); 7195 7196 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM); 7197 } 7198 7199 /* 7200 * wm_gmii_82580_readreg: [mii interface function] 7201 * 7202 * Read a PHY register on the 82580 and I350. 7203 * This could be handled by the PHY layer if we didn't have to lock the 7204 * ressource ... 7205 */ 7206 static int 7207 wm_gmii_82580_readreg(device_t self, int phy, int reg) 7208 { 7209 struct wm_softc *sc = device_private(self); 7210 int sem; 7211 int rv; 7212 7213 sem = swfwphysem[sc->sc_funcid]; 7214 if (wm_get_swfw_semaphore(sc, sem)) { 7215 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 7216 __func__); 7217 return 0; 7218 } 7219 7220 rv = wm_gmii_i82544_readreg(self, phy, reg); 7221 7222 wm_put_swfw_semaphore(sc, sem); 7223 return rv; 7224 } 7225 7226 /* 7227 * wm_gmii_82580_writereg: [mii interface function] 7228 * 7229 * Write a PHY register on the 82580 and I350. 7230 * This could be handled by the PHY layer if we didn't have to lock the 7231 * ressource ... 7232 */ 7233 static void 7234 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val) 7235 { 7236 struct wm_softc *sc = device_private(self); 7237 int sem; 7238 7239 sem = swfwphysem[sc->sc_funcid]; 7240 if (wm_get_swfw_semaphore(sc, sem)) { 7241 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 7242 __func__); 7243 return; 7244 } 7245 7246 wm_gmii_i82544_writereg(self, phy, reg, val); 7247 7248 wm_put_swfw_semaphore(sc, sem); 7249 } 7250 7251 /* 7252 * wm_gmii_statchg: [mii interface function] 7253 * 7254 * Callback from MII layer when media changes. 7255 */ 7256 static void 7257 wm_gmii_statchg(struct ifnet *ifp) 7258 { 7259 struct wm_softc *sc = ifp->if_softc; 7260 struct mii_data *mii = &sc->sc_mii; 7261 7262 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE); 7263 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 7264 sc->sc_fcrtl &= ~FCRTL_XONE; 7265 7266 /* 7267 * Get flow control negotiation result. 7268 */ 7269 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 7270 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { 7271 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 7272 mii->mii_media_active &= ~IFM_ETH_FMASK; 7273 } 7274 7275 if (sc->sc_flowflags & IFM_FLOW) { 7276 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) { 7277 sc->sc_ctrl |= CTRL_TFCE; 7278 sc->sc_fcrtl |= FCRTL_XONE; 7279 } 7280 if (sc->sc_flowflags & IFM_ETH_RXPAUSE) 7281 sc->sc_ctrl |= CTRL_RFCE; 7282 } 7283 7284 if (sc->sc_mii.mii_media_active & IFM_FDX) { 7285 DPRINTF(WM_DEBUG_LINK, 7286 ("%s: LINK: statchg: FDX\n", ifp->if_xname)); 7287 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 7288 } else { 7289 DPRINTF(WM_DEBUG_LINK, 7290 ("%s: LINK: statchg: HDX\n", ifp->if_xname)); 7291 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 7292 } 7293 7294 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 7295 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 7296 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL 7297 : WMREG_FCRTL, sc->sc_fcrtl); 7298 if (sc->sc_type == WM_T_80003) { 7299 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { 7300 case IFM_1000_T: 7301 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL, 7302 KUMCTRLSTA_HD_CTRL_1000_DEFAULT); 7303 sc->sc_tipg = TIPG_1000T_80003_DFLT; 7304 break; 7305 default: 7306 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL, 7307 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT); 7308 sc->sc_tipg = TIPG_10_100_80003_DFLT; 7309 break; 7310 } 7311 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); 7312 } 7313 } 7314 7315 /* 7316 * wm_kmrn_readreg: 7317 * 7318 * Read a kumeran register 7319 */ 7320 static int 7321 wm_kmrn_readreg(struct wm_softc *sc, int reg) 7322 { 7323 int rv; 7324 7325 if (sc->sc_flags == WM_F_SWFW_SYNC) { 7326 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) { 7327 aprint_error_dev(sc->sc_dev, 7328 "%s: failed to get semaphore\n", __func__); 7329 return 0; 7330 } 7331 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) { 7332 if (wm_get_swfwhw_semaphore(sc)) { 7333 aprint_error_dev(sc->sc_dev, 7334 "%s: failed to get semaphore\n", __func__); 7335 return 0; 7336 } 7337 } 7338 7339 CSR_WRITE(sc, WMREG_KUMCTRLSTA, 7340 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | 7341 KUMCTRLSTA_REN); 7342 delay(2); 7343 7344 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK; 7345 7346 if (sc->sc_flags == WM_F_SWFW_SYNC) 7347 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM); 7348 else if (sc->sc_flags == WM_F_SWFWHW_SYNC) 7349 wm_put_swfwhw_semaphore(sc); 7350 7351 return rv; 7352 } 7353 7354 /* 7355 * wm_kmrn_writereg: 7356 * 7357 * Write a kumeran register 7358 */ 7359 static void 7360 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val) 7361 { 7362 7363 if (sc->sc_flags == WM_F_SWFW_SYNC) { 7364 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) { 7365 aprint_error_dev(sc->sc_dev, 7366 "%s: failed to get semaphore\n", __func__); 7367 return; 7368 } 7369 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) { 7370 if (wm_get_swfwhw_semaphore(sc)) { 7371 aprint_error_dev(sc->sc_dev, 7372 "%s: failed to get semaphore\n", __func__); 7373 return; 7374 } 7375 } 7376 7377 CSR_WRITE(sc, WMREG_KUMCTRLSTA, 7378 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | 7379 (val & KUMCTRLSTA_MASK)); 7380 7381 if (sc->sc_flags == WM_F_SWFW_SYNC) 7382 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM); 7383 else if (sc->sc_flags == WM_F_SWFWHW_SYNC) 7384 wm_put_swfwhw_semaphore(sc); 7385 } 7386 7387 static int 7388 wm_is_onboard_nvm_eeprom(struct wm_softc *sc) 7389 { 7390 uint32_t eecd = 0; 7391 7392 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574 7393 || sc->sc_type == WM_T_82583) { 7394 eecd = CSR_READ(sc, WMREG_EECD); 7395 7396 /* Isolate bits 15 & 16 */ 7397 eecd = ((eecd >> 15) & 0x03); 7398 7399 /* If both bits are set, device is Flash type */ 7400 if (eecd == 0x03) 7401 return 0; 7402 } 7403 return 1; 7404 } 7405 7406 static int 7407 wm_get_swsm_semaphore(struct wm_softc *sc) 7408 { 7409 int32_t timeout; 7410 uint32_t swsm; 7411 7412 /* Get the FW semaphore. */ 7413 timeout = 1000 + 1; /* XXX */ 7414 while (timeout) { 7415 swsm = CSR_READ(sc, WMREG_SWSM); 7416 swsm |= SWSM_SWESMBI; 7417 CSR_WRITE(sc, WMREG_SWSM, swsm); 7418 /* if we managed to set the bit we got the semaphore. */ 7419 swsm = CSR_READ(sc, WMREG_SWSM); 7420 if (swsm & SWSM_SWESMBI) 7421 break; 7422 7423 delay(50); 7424 timeout--; 7425 } 7426 7427 if (timeout == 0) { 7428 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n"); 7429 /* Release semaphores */ 7430 wm_put_swsm_semaphore(sc); 7431 return 1; 7432 } 7433 return 0; 7434 } 7435 7436 static void 7437 wm_put_swsm_semaphore(struct wm_softc *sc) 7438 { 7439 uint32_t swsm; 7440 7441 swsm = CSR_READ(sc, WMREG_SWSM); 7442 swsm &= ~(SWSM_SWESMBI); 7443 CSR_WRITE(sc, WMREG_SWSM, swsm); 7444 } 7445 7446 static int 7447 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask) 7448 { 7449 uint32_t swfw_sync; 7450 uint32_t swmask = mask << SWFW_SOFT_SHIFT; 7451 uint32_t fwmask = mask << SWFW_FIRM_SHIFT; 7452 int timeout = 200; 7453 7454 for (timeout = 0; timeout < 200; timeout++) { 7455 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) { 7456 if (wm_get_swsm_semaphore(sc)) { 7457 aprint_error_dev(sc->sc_dev, 7458 "%s: failed to get semaphore\n", 7459 __func__); 7460 return 1; 7461 } 7462 } 7463 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC); 7464 if ((swfw_sync & (swmask | fwmask)) == 0) { 7465 swfw_sync |= swmask; 7466 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync); 7467 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 7468 wm_put_swsm_semaphore(sc); 7469 return 0; 7470 } 7471 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 7472 wm_put_swsm_semaphore(sc); 7473 delay(5000); 7474 } 7475 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n", 7476 device_xname(sc->sc_dev), mask, swfw_sync); 7477 return 1; 7478 } 7479 7480 static void 7481 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask) 7482 { 7483 uint32_t swfw_sync; 7484 7485 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) { 7486 while (wm_get_swsm_semaphore(sc) != 0) 7487 continue; 7488 } 7489 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC); 7490 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT); 7491 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync); 7492 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 7493 wm_put_swsm_semaphore(sc); 7494 } 7495 7496 static int 7497 wm_get_swfwhw_semaphore(struct wm_softc *sc) 7498 { 7499 uint32_t ext_ctrl; 7500 int timeout = 200; 7501 7502 for (timeout = 0; timeout < 200; timeout++) { 7503 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 7504 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; 7505 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 7506 7507 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 7508 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG) 7509 return 0; 7510 delay(5000); 7511 } 7512 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n", 7513 device_xname(sc->sc_dev), ext_ctrl); 7514 return 1; 7515 } 7516 7517 static void 7518 wm_put_swfwhw_semaphore(struct wm_softc *sc) 7519 { 7520 uint32_t ext_ctrl; 7521 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 7522 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 7523 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 7524 } 7525 7526 static int 7527 wm_get_hw_semaphore_82573(struct wm_softc *sc) 7528 { 7529 int i = 0; 7530 uint32_t reg; 7531 7532 reg = CSR_READ(sc, WMREG_EXTCNFCTR); 7533 do { 7534 CSR_WRITE(sc, WMREG_EXTCNFCTR, 7535 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP); 7536 reg = CSR_READ(sc, WMREG_EXTCNFCTR); 7537 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0) 7538 break; 7539 delay(2*1000); 7540 i++; 7541 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT); 7542 7543 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) { 7544 wm_put_hw_semaphore_82573(sc); 7545 log(LOG_ERR, "%s: Driver can't access the PHY\n", 7546 device_xname(sc->sc_dev)); 7547 return -1; 7548 } 7549 7550 return 0; 7551 } 7552 7553 static void 7554 wm_put_hw_semaphore_82573(struct wm_softc *sc) 7555 { 7556 uint32_t reg; 7557 7558 reg = CSR_READ(sc, WMREG_EXTCNFCTR); 7559 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP; 7560 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg); 7561 } 7562 7563 static int 7564 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank) 7565 { 7566 uint32_t eecd; 7567 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1; 7568 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t); 7569 uint8_t sig_byte = 0; 7570 7571 switch (sc->sc_type) { 7572 case WM_T_ICH8: 7573 case WM_T_ICH9: 7574 eecd = CSR_READ(sc, WMREG_EECD); 7575 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) { 7576 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0; 7577 return 0; 7578 } 7579 /* FALLTHROUGH */ 7580 default: 7581 /* Default to 0 */ 7582 *bank = 0; 7583 7584 /* Check bank 0 */ 7585 wm_read_ich8_byte(sc, act_offset, &sig_byte); 7586 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) { 7587 *bank = 0; 7588 return 0; 7589 } 7590 7591 /* Check bank 1 */ 7592 wm_read_ich8_byte(sc, act_offset + bank1_offset, 7593 &sig_byte); 7594 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) { 7595 *bank = 1; 7596 return 0; 7597 } 7598 } 7599 7600 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n", 7601 device_xname(sc->sc_dev))); 7602 return -1; 7603 } 7604 7605 /****************************************************************************** 7606 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access 7607 * register. 7608 * 7609 * sc - Struct containing variables accessed by shared code 7610 * offset - offset of word in the EEPROM to read 7611 * data - word read from the EEPROM 7612 * words - number of words to read 7613 *****************************************************************************/ 7614 static int 7615 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data) 7616 { 7617 int32_t error = 0; 7618 uint32_t flash_bank = 0; 7619 uint32_t act_offset = 0; 7620 uint32_t bank_offset = 0; 7621 uint16_t word = 0; 7622 uint16_t i = 0; 7623 7624 /* We need to know which is the valid flash bank. In the event 7625 * that we didn't allocate eeprom_shadow_ram, we may not be 7626 * managing flash_bank. So it cannot be trusted and needs 7627 * to be updated with each read. 7628 */ 7629 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank); 7630 if (error) { 7631 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n", 7632 __func__); 7633 flash_bank = 0; 7634 } 7635 7636 /* 7637 * Adjust offset appropriately if we're on bank 1 - adjust for word 7638 * size 7639 */ 7640 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2); 7641 7642 error = wm_get_swfwhw_semaphore(sc); 7643 if (error) { 7644 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 7645 __func__); 7646 return error; 7647 } 7648 7649 for (i = 0; i < words; i++) { 7650 /* The NVM part needs a byte offset, hence * 2 */ 7651 act_offset = bank_offset + ((offset + i) * 2); 7652 error = wm_read_ich8_word(sc, act_offset, &word); 7653 if (error) { 7654 aprint_error_dev(sc->sc_dev, 7655 "%s: failed to read NVM\n", __func__); 7656 break; 7657 } 7658 data[i] = word; 7659 } 7660 7661 wm_put_swfwhw_semaphore(sc); 7662 return error; 7663 } 7664 7665 /****************************************************************************** 7666 * This function does initial flash setup so that a new read/write/erase cycle 7667 * can be started. 7668 * 7669 * sc - The pointer to the hw structure 7670 ****************************************************************************/ 7671 static int32_t 7672 wm_ich8_cycle_init(struct wm_softc *sc) 7673 { 7674 uint16_t hsfsts; 7675 int32_t error = 1; 7676 int32_t i = 0; 7677 7678 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 7679 7680 /* May be check the Flash Des Valid bit in Hw status */ 7681 if ((hsfsts & HSFSTS_FLDVAL) == 0) { 7682 return error; 7683 } 7684 7685 /* Clear FCERR in Hw status by writing 1 */ 7686 /* Clear DAEL in Hw status by writing a 1 */ 7687 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL; 7688 7689 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); 7690 7691 /* 7692 * Either we should have a hardware SPI cycle in progress bit to check 7693 * against, in order to start a new cycle or FDONE bit should be 7694 * changed in the hardware so that it is 1 after harware reset, which 7695 * can then be used as an indication whether a cycle is in progress or 7696 * has been completed .. we should also have some software semaphore 7697 * mechanism to guard FDONE or the cycle in progress bit so that two 7698 * threads access to those bits can be sequentiallized or a way so that 7699 * 2 threads dont start the cycle at the same time 7700 */ 7701 7702 if ((hsfsts & HSFSTS_FLINPRO) == 0) { 7703 /* 7704 * There is no cycle running at present, so we can start a 7705 * cycle 7706 */ 7707 7708 /* Begin by setting Flash Cycle Done. */ 7709 hsfsts |= HSFSTS_DONE; 7710 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); 7711 error = 0; 7712 } else { 7713 /* 7714 * otherwise poll for sometime so the current cycle has a 7715 * chance to end before giving up. 7716 */ 7717 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) { 7718 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 7719 if ((hsfsts & HSFSTS_FLINPRO) == 0) { 7720 error = 0; 7721 break; 7722 } 7723 delay(1); 7724 } 7725 if (error == 0) { 7726 /* 7727 * Successful in waiting for previous cycle to timeout, 7728 * now set the Flash Cycle Done. 7729 */ 7730 hsfsts |= HSFSTS_DONE; 7731 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); 7732 } 7733 } 7734 return error; 7735 } 7736 7737 /****************************************************************************** 7738 * This function starts a flash cycle and waits for its completion 7739 * 7740 * sc - The pointer to the hw structure 7741 ****************************************************************************/ 7742 static int32_t 7743 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout) 7744 { 7745 uint16_t hsflctl; 7746 uint16_t hsfsts; 7747 int32_t error = 1; 7748 uint32_t i = 0; 7749 7750 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ 7751 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL); 7752 hsflctl |= HSFCTL_GO; 7753 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl); 7754 7755 /* wait till FDONE bit is set to 1 */ 7756 do { 7757 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 7758 if (hsfsts & HSFSTS_DONE) 7759 break; 7760 delay(1); 7761 i++; 7762 } while (i < timeout); 7763 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) 7764 error = 0; 7765 7766 return error; 7767 } 7768 7769 /****************************************************************************** 7770 * Reads a byte or word from the NVM using the ICH8 flash access registers. 7771 * 7772 * sc - The pointer to the hw structure 7773 * index - The index of the byte or word to read. 7774 * size - Size of data to read, 1=byte 2=word 7775 * data - Pointer to the word to store the value read. 7776 *****************************************************************************/ 7777 static int32_t 7778 wm_read_ich8_data(struct wm_softc *sc, uint32_t index, 7779 uint32_t size, uint16_t* data) 7780 { 7781 uint16_t hsfsts; 7782 uint16_t hsflctl; 7783 uint32_t flash_linear_address; 7784 uint32_t flash_data = 0; 7785 int32_t error = 1; 7786 int32_t count = 0; 7787 7788 if (size < 1 || size > 2 || data == 0x0 || 7789 index > ICH_FLASH_LINEAR_ADDR_MASK) 7790 return error; 7791 7792 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) + 7793 sc->sc_ich8_flash_base; 7794 7795 do { 7796 delay(1); 7797 /* Steps */ 7798 error = wm_ich8_cycle_init(sc); 7799 if (error) 7800 break; 7801 7802 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL); 7803 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 7804 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT) 7805 & HSFCTL_BCOUNT_MASK; 7806 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT; 7807 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl); 7808 7809 /* 7810 * Write the last 24 bits of index into Flash Linear address 7811 * field in Flash Address 7812 */ 7813 /* TODO: TBD maybe check the index against the size of flash */ 7814 7815 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address); 7816 7817 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT); 7818 7819 /* 7820 * Check if FCERR is set to 1, if set to 1, clear it and try 7821 * the whole sequence a few more times, else read in (shift in) 7822 * the Flash Data0, the order is least significant byte first 7823 * msb to lsb 7824 */ 7825 if (error == 0) { 7826 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0); 7827 if (size == 1) 7828 *data = (uint8_t)(flash_data & 0x000000FF); 7829 else if (size == 2) 7830 *data = (uint16_t)(flash_data & 0x0000FFFF); 7831 break; 7832 } else { 7833 /* 7834 * If we've gotten here, then things are probably 7835 * completely hosed, but if the error condition is 7836 * detected, it won't hurt to give it another try... 7837 * ICH_FLASH_CYCLE_REPEAT_COUNT times. 7838 */ 7839 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 7840 if (hsfsts & HSFSTS_ERR) { 7841 /* Repeat for some time before giving up. */ 7842 continue; 7843 } else if ((hsfsts & HSFSTS_DONE) == 0) 7844 break; 7845 } 7846 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 7847 7848 return error; 7849 } 7850 7851 /****************************************************************************** 7852 * Reads a single byte from the NVM using the ICH8 flash access registers. 7853 * 7854 * sc - pointer to wm_hw structure 7855 * index - The index of the byte to read. 7856 * data - Pointer to a byte to store the value read. 7857 *****************************************************************************/ 7858 static int32_t 7859 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data) 7860 { 7861 int32_t status; 7862 uint16_t word = 0; 7863 7864 status = wm_read_ich8_data(sc, index, 1, &word); 7865 if (status == 0) 7866 *data = (uint8_t)word; 7867 else 7868 *data = 0; 7869 7870 return status; 7871 } 7872 7873 /****************************************************************************** 7874 * Reads a word from the NVM using the ICH8 flash access registers. 7875 * 7876 * sc - pointer to wm_hw structure 7877 * index - The starting byte index of the word to read. 7878 * data - Pointer to a word to store the value read. 7879 *****************************************************************************/ 7880 static int32_t 7881 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data) 7882 { 7883 int32_t status; 7884 7885 status = wm_read_ich8_data(sc, index, 2, data); 7886 return status; 7887 } 7888 7889 static int 7890 wm_check_mng_mode(struct wm_softc *sc) 7891 { 7892 int rv; 7893 7894 switch (sc->sc_type) { 7895 case WM_T_ICH8: 7896 case WM_T_ICH9: 7897 case WM_T_ICH10: 7898 case WM_T_PCH: 7899 case WM_T_PCH2: 7900 case WM_T_PCH_LPT: 7901 rv = wm_check_mng_mode_ich8lan(sc); 7902 break; 7903 case WM_T_82574: 7904 case WM_T_82583: 7905 rv = wm_check_mng_mode_82574(sc); 7906 break; 7907 case WM_T_82571: 7908 case WM_T_82572: 7909 case WM_T_82573: 7910 case WM_T_80003: 7911 rv = wm_check_mng_mode_generic(sc); 7912 break; 7913 default: 7914 /* noting to do */ 7915 rv = 0; 7916 break; 7917 } 7918 7919 return rv; 7920 } 7921 7922 static int 7923 wm_check_mng_mode_ich8lan(struct wm_softc *sc) 7924 { 7925 uint32_t fwsm; 7926 7927 fwsm = CSR_READ(sc, WMREG_FWSM); 7928 7929 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)) 7930 return 1; 7931 7932 return 0; 7933 } 7934 7935 static int 7936 wm_check_mng_mode_82574(struct wm_softc *sc) 7937 { 7938 uint16_t data; 7939 7940 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data); 7941 7942 if ((data & EEPROM_CFG2_MNGM_MASK) != 0) 7943 return 1; 7944 7945 return 0; 7946 } 7947 7948 static int 7949 wm_check_mng_mode_generic(struct wm_softc *sc) 7950 { 7951 uint32_t fwsm; 7952 7953 fwsm = CSR_READ(sc, WMREG_FWSM); 7954 7955 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT)) 7956 return 1; 7957 7958 return 0; 7959 } 7960 7961 static int 7962 wm_enable_mng_pass_thru(struct wm_softc *sc) 7963 { 7964 uint32_t manc, fwsm, factps; 7965 7966 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0) 7967 return 0; 7968 7969 manc = CSR_READ(sc, WMREG_MANC); 7970 7971 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n", 7972 device_xname(sc->sc_dev), manc)); 7973 if ((manc & MANC_RECV_TCO_EN) == 0) 7974 return 0; 7975 7976 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) { 7977 fwsm = CSR_READ(sc, WMREG_FWSM); 7978 factps = CSR_READ(sc, WMREG_FACTPS); 7979 if (((factps & FACTPS_MNGCG) == 0) 7980 && ((fwsm & FWSM_MODE_MASK) 7981 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))) 7982 return 1; 7983 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){ 7984 uint16_t data; 7985 7986 factps = CSR_READ(sc, WMREG_FACTPS); 7987 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data); 7988 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n", 7989 device_xname(sc->sc_dev), factps, data)); 7990 if (((factps & FACTPS_MNGCG) == 0) 7991 && ((data & EEPROM_CFG2_MNGM_MASK) 7992 == (EEPROM_CFG2_MNGM_PT << EEPROM_CFG2_MNGM_SHIFT))) 7993 return 1; 7994 } else if (((manc & MANC_SMBUS_EN) != 0) 7995 && ((manc & MANC_ASF_EN) == 0)) 7996 return 1; 7997 7998 return 0; 7999 } 8000 8001 static int 8002 wm_check_reset_block(struct wm_softc *sc) 8003 { 8004 uint32_t reg; 8005 8006 switch (sc->sc_type) { 8007 case WM_T_ICH8: 8008 case WM_T_ICH9: 8009 case WM_T_ICH10: 8010 case WM_T_PCH: 8011 case WM_T_PCH2: 8012 case WM_T_PCH_LPT: 8013 reg = CSR_READ(sc, WMREG_FWSM); 8014 if ((reg & FWSM_RSPCIPHY) != 0) 8015 return 0; 8016 else 8017 return -1; 8018 break; 8019 case WM_T_82571: 8020 case WM_T_82572: 8021 case WM_T_82573: 8022 case WM_T_82574: 8023 case WM_T_82583: 8024 case WM_T_80003: 8025 reg = CSR_READ(sc, WMREG_MANC); 8026 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0) 8027 return -1; 8028 else 8029 return 0; 8030 break; 8031 default: 8032 /* no problem */ 8033 break; 8034 } 8035 8036 return 0; 8037 } 8038 8039 static void 8040 wm_get_hw_control(struct wm_softc *sc) 8041 { 8042 uint32_t reg; 8043 8044 switch (sc->sc_type) { 8045 case WM_T_82573: 8046 reg = CSR_READ(sc, WMREG_SWSM); 8047 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD); 8048 break; 8049 case WM_T_82571: 8050 case WM_T_82572: 8051 case WM_T_82574: 8052 case WM_T_82583: 8053 case WM_T_80003: 8054 case WM_T_ICH8: 8055 case WM_T_ICH9: 8056 case WM_T_ICH10: 8057 case WM_T_PCH: 8058 case WM_T_PCH2: 8059 case WM_T_PCH_LPT: 8060 reg = CSR_READ(sc, WMREG_CTRL_EXT); 8061 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD); 8062 break; 8063 default: 8064 break; 8065 } 8066 } 8067 8068 static void 8069 wm_release_hw_control(struct wm_softc *sc) 8070 { 8071 uint32_t reg; 8072 8073 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0) 8074 return; 8075 8076 if (sc->sc_type == WM_T_82573) { 8077 reg = CSR_READ(sc, WMREG_SWSM); 8078 reg &= ~SWSM_DRV_LOAD; 8079 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD); 8080 } else { 8081 reg = CSR_READ(sc, WMREG_CTRL_EXT); 8082 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD); 8083 } 8084 } 8085 8086 /* XXX Currently TBI only */ 8087 static int 8088 wm_check_for_link(struct wm_softc *sc) 8089 { 8090 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 8091 uint32_t rxcw; 8092 uint32_t ctrl; 8093 uint32_t status; 8094 uint32_t sig; 8095 8096 rxcw = CSR_READ(sc, WMREG_RXCW); 8097 ctrl = CSR_READ(sc, WMREG_CTRL); 8098 status = CSR_READ(sc, WMREG_STATUS); 8099 8100 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0; 8101 8102 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n", 8103 device_xname(sc->sc_dev), __func__, 8104 ((ctrl & CTRL_SWDPIN(1)) == sig), 8105 ((status & STATUS_LU) != 0), 8106 ((rxcw & RXCW_C) != 0) 8107 )); 8108 8109 /* 8110 * SWDPIN LU RXCW 8111 * 0 0 0 8112 * 0 0 1 (should not happen) 8113 * 0 1 0 (should not happen) 8114 * 0 1 1 (should not happen) 8115 * 1 0 0 Disable autonego and force linkup 8116 * 1 0 1 got /C/ but not linkup yet 8117 * 1 1 0 (linkup) 8118 * 1 1 1 If IFM_AUTO, back to autonego 8119 * 8120 */ 8121 if (((ctrl & CTRL_SWDPIN(1)) == sig) 8122 && ((status & STATUS_LU) == 0) 8123 && ((rxcw & RXCW_C) == 0)) { 8124 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n", 8125 __func__)); 8126 sc->sc_tbi_linkup = 0; 8127 /* Disable auto-negotiation in the TXCW register */ 8128 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE)); 8129 8130 /* 8131 * Force link-up and also force full-duplex. 8132 * 8133 * NOTE: CTRL was updated TFCE and RFCE automatically, 8134 * so we should update sc->sc_ctrl 8135 */ 8136 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD; 8137 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 8138 } else if (((status & STATUS_LU) != 0) 8139 && ((rxcw & RXCW_C) != 0) 8140 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) { 8141 sc->sc_tbi_linkup = 1; 8142 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n", 8143 __func__)); 8144 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); 8145 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU)); 8146 } else if (((ctrl & CTRL_SWDPIN(1)) == sig) 8147 && ((rxcw & RXCW_C) != 0)) { 8148 DPRINTF(WM_DEBUG_LINK, ("/C/")); 8149 } else { 8150 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl, 8151 status)); 8152 } 8153 8154 return 0; 8155 } 8156 8157 /* Work-around for 82566 Kumeran PCS lock loss */ 8158 static void 8159 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc) 8160 { 8161 int miistatus, active, i; 8162 int reg; 8163 8164 miistatus = sc->sc_mii.mii_media_status; 8165 8166 /* If the link is not up, do nothing */ 8167 if ((miistatus & IFM_ACTIVE) != 0) 8168 return; 8169 8170 active = sc->sc_mii.mii_media_active; 8171 8172 /* Nothing to do if the link is other than 1Gbps */ 8173 if (IFM_SUBTYPE(active) != IFM_1000_T) 8174 return; 8175 8176 for (i = 0; i < 10; i++) { 8177 /* read twice */ 8178 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG); 8179 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG); 8180 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0) 8181 goto out; /* GOOD! */ 8182 8183 /* Reset the PHY */ 8184 wm_gmii_reset(sc); 8185 delay(5*1000); 8186 } 8187 8188 /* Disable GigE link negotiation */ 8189 reg = CSR_READ(sc, WMREG_PHY_CTRL); 8190 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS; 8191 CSR_WRITE(sc, WMREG_PHY_CTRL, reg); 8192 8193 /* 8194 * Call gig speed drop workaround on Gig disable before accessing 8195 * any PHY registers. 8196 */ 8197 wm_gig_downshift_workaround_ich8lan(sc); 8198 8199 out: 8200 return; 8201 } 8202 8203 /* WOL from S5 stops working */ 8204 static void 8205 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc) 8206 { 8207 uint16_t kmrn_reg; 8208 8209 /* Only for igp3 */ 8210 if (sc->sc_phytype == WMPHY_IGP_3) { 8211 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG); 8212 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK; 8213 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg); 8214 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK; 8215 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg); 8216 } 8217 } 8218 8219 #ifdef WM_WOL 8220 /* Power down workaround on D3 */ 8221 static void 8222 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc) 8223 { 8224 uint32_t reg; 8225 int i; 8226 8227 for (i = 0; i < 2; i++) { 8228 /* Disable link */ 8229 reg = CSR_READ(sc, WMREG_PHY_CTRL); 8230 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS; 8231 CSR_WRITE(sc, WMREG_PHY_CTRL, reg); 8232 8233 /* 8234 * Call gig speed drop workaround on Gig disable before 8235 * accessing any PHY registers 8236 */ 8237 if (sc->sc_type == WM_T_ICH8) 8238 wm_gig_downshift_workaround_ich8lan(sc); 8239 8240 /* Write VR power-down enable */ 8241 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL); 8242 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; 8243 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN; 8244 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg); 8245 8246 /* Read it back and test */ 8247 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL); 8248 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; 8249 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0)) 8250 break; 8251 8252 /* Issue PHY reset and repeat at most one more time */ 8253 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 8254 } 8255 } 8256 #endif /* WM_WOL */ 8257 8258 /* 8259 * Workaround for pch's PHYs 8260 * XXX should be moved to new PHY driver? 8261 */ 8262 static void 8263 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc) 8264 { 8265 if (sc->sc_phytype == WMPHY_82577) 8266 wm_set_mdio_slow_mode_hv(sc); 8267 8268 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */ 8269 8270 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/ 8271 8272 /* 82578 */ 8273 if (sc->sc_phytype == WMPHY_82578) { 8274 /* PCH rev. < 3 */ 8275 if (sc->sc_rev < 3) { 8276 /* XXX 6 bit shift? Why? Is it page2? */ 8277 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29), 8278 0x66c0); 8279 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e), 8280 0xffff); 8281 } 8282 8283 /* XXX phy rev. < 2 */ 8284 } 8285 8286 /* Select page 0 */ 8287 8288 /* XXX acquire semaphore */ 8289 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0); 8290 /* XXX release semaphore */ 8291 8292 /* 8293 * Configure the K1 Si workaround during phy reset assuming there is 8294 * link so that it disables K1 if link is in 1Gbps. 8295 */ 8296 wm_k1_gig_workaround_hv(sc, 1); 8297 } 8298 8299 static void 8300 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc) 8301 { 8302 8303 wm_set_mdio_slow_mode_hv(sc); 8304 } 8305 8306 static void 8307 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link) 8308 { 8309 int k1_enable = sc->sc_nvm_k1_enabled; 8310 8311 /* XXX acquire semaphore */ 8312 8313 if (link) { 8314 k1_enable = 0; 8315 8316 /* Link stall fix for link up */ 8317 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100); 8318 } else { 8319 /* Link stall fix for link down */ 8320 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100); 8321 } 8322 8323 wm_configure_k1_ich8lan(sc, k1_enable); 8324 8325 /* XXX release semaphore */ 8326 } 8327 8328 static void 8329 wm_set_mdio_slow_mode_hv(struct wm_softc *sc) 8330 { 8331 uint32_t reg; 8332 8333 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL); 8334 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, 8335 reg | HV_KMRN_MDIO_SLOW); 8336 } 8337 8338 static void 8339 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable) 8340 { 8341 uint32_t ctrl, ctrl_ext, tmp; 8342 uint16_t kmrn_reg; 8343 8344 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG); 8345 8346 if (k1_enable) 8347 kmrn_reg |= KUMCTRLSTA_K1_ENABLE; 8348 else 8349 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE; 8350 8351 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg); 8352 8353 delay(20); 8354 8355 ctrl = CSR_READ(sc, WMREG_CTRL); 8356 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT); 8357 8358 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100); 8359 tmp |= CTRL_FRCSPD; 8360 8361 CSR_WRITE(sc, WMREG_CTRL, tmp); 8362 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS); 8363 delay(20); 8364 8365 CSR_WRITE(sc, WMREG_CTRL, ctrl); 8366 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext); 8367 delay(20); 8368 } 8369 8370 static void 8371 wm_smbustopci(struct wm_softc *sc) 8372 { 8373 uint32_t fwsm; 8374 8375 fwsm = CSR_READ(sc, WMREG_FWSM); 8376 if (((fwsm & FWSM_FW_VALID) == 0) 8377 && ((wm_check_reset_block(sc) == 0))) { 8378 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE; 8379 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE; 8380 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 8381 delay(10); 8382 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE; 8383 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 8384 delay(50*1000); 8385 8386 /* 8387 * Gate automatic PHY configuration by hardware on non-managed 8388 * 82579 8389 */ 8390 if (sc->sc_type == WM_T_PCH2) 8391 wm_gate_hw_phy_config_ich8lan(sc, 1); 8392 } 8393 } 8394 8395 static void 8396 wm_set_pcie_completion_timeout(struct wm_softc *sc) 8397 { 8398 uint32_t gcr; 8399 pcireg_t ctrl2; 8400 8401 gcr = CSR_READ(sc, WMREG_GCR); 8402 8403 /* Only take action if timeout value is defaulted to 0 */ 8404 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0) 8405 goto out; 8406 8407 if ((gcr & GCR_CAP_VER2) == 0) { 8408 gcr |= GCR_CMPL_TMOUT_10MS; 8409 goto out; 8410 } 8411 8412 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 8413 sc->sc_pcixe_capoff + PCIE_DCSR2); 8414 ctrl2 |= WM_PCIE_DCSR2_16MS; 8415 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 8416 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2); 8417 8418 out: 8419 /* Disable completion timeout resend */ 8420 gcr &= ~GCR_CMPL_TMOUT_RESEND; 8421 8422 CSR_WRITE(sc, WMREG_GCR, gcr); 8423 } 8424 8425 /* special case - for 82575 - need to do manual init ... */ 8426 static void 8427 wm_reset_init_script_82575(struct wm_softc *sc) 8428 { 8429 /* 8430 * remark: this is untested code - we have no board without EEPROM 8431 * same setup as mentioned int the freeBSD driver for the i82575 8432 */ 8433 8434 /* SerDes configuration via SERDESCTRL */ 8435 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c); 8436 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78); 8437 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23); 8438 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15); 8439 8440 /* CCM configuration via CCMCTL register */ 8441 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00); 8442 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00); 8443 8444 /* PCIe lanes configuration */ 8445 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec); 8446 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf); 8447 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05); 8448 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81); 8449 8450 /* PCIe PLL Configuration */ 8451 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47); 8452 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00); 8453 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00); 8454 } 8455 8456 static void 8457 wm_init_manageability(struct wm_softc *sc) 8458 { 8459 8460 if (sc->sc_flags & WM_F_HAS_MANAGE) { 8461 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H); 8462 uint32_t manc = CSR_READ(sc, WMREG_MANC); 8463 8464 /* disabl hardware interception of ARP */ 8465 manc &= ~MANC_ARP_EN; 8466 8467 /* enable receiving management packets to the host */ 8468 if (sc->sc_type >= WM_T_82571) { 8469 manc |= MANC_EN_MNG2HOST; 8470 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624; 8471 CSR_WRITE(sc, WMREG_MANC2H, manc2h); 8472 8473 } 8474 8475 CSR_WRITE(sc, WMREG_MANC, manc); 8476 } 8477 } 8478 8479 static void 8480 wm_release_manageability(struct wm_softc *sc) 8481 { 8482 8483 if (sc->sc_flags & WM_F_HAS_MANAGE) { 8484 uint32_t manc = CSR_READ(sc, WMREG_MANC); 8485 8486 manc |= MANC_ARP_EN; 8487 if (sc->sc_type >= WM_T_82571) 8488 manc &= ~MANC_EN_MNG2HOST; 8489 8490 CSR_WRITE(sc, WMREG_MANC, manc); 8491 } 8492 } 8493 8494 static void 8495 wm_get_wakeup(struct wm_softc *sc) 8496 { 8497 8498 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */ 8499 switch (sc->sc_type) { 8500 case WM_T_82573: 8501 case WM_T_82583: 8502 sc->sc_flags |= WM_F_HAS_AMT; 8503 /* FALLTHROUGH */ 8504 case WM_T_80003: 8505 case WM_T_82541: 8506 case WM_T_82547: 8507 case WM_T_82571: 8508 case WM_T_82572: 8509 case WM_T_82574: 8510 case WM_T_82575: 8511 case WM_T_82576: 8512 case WM_T_82580: 8513 case WM_T_82580ER: 8514 case WM_T_I350: 8515 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0) 8516 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID; 8517 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES; 8518 break; 8519 case WM_T_ICH8: 8520 case WM_T_ICH9: 8521 case WM_T_ICH10: 8522 case WM_T_PCH: 8523 case WM_T_PCH2: 8524 case WM_T_PCH_LPT: 8525 sc->sc_flags |= WM_F_HAS_AMT; 8526 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES; 8527 break; 8528 default: 8529 break; 8530 } 8531 8532 /* 1: HAS_MANAGE */ 8533 if (wm_enable_mng_pass_thru(sc) != 0) 8534 sc->sc_flags |= WM_F_HAS_MANAGE; 8535 8536 #ifdef WM_DEBUG 8537 printf("\n"); 8538 if ((sc->sc_flags & WM_F_HAS_AMT) != 0) 8539 printf("HAS_AMT,"); 8540 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) 8541 printf("ARC_SUBSYS_VALID,"); 8542 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0) 8543 printf("ASF_FIRMWARE_PRES,"); 8544 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0) 8545 printf("HAS_MANAGE,"); 8546 printf("\n"); 8547 #endif 8548 /* 8549 * Note that the WOL flags is set after the resetting of the eeprom 8550 * stuff 8551 */ 8552 } 8553 8554 #ifdef WM_WOL 8555 /* WOL in the newer chipset interfaces (pchlan) */ 8556 static void 8557 wm_enable_phy_wakeup(struct wm_softc *sc) 8558 { 8559 #if 0 8560 uint16_t preg; 8561 8562 /* Copy MAC RARs to PHY RARs */ 8563 8564 /* Copy MAC MTA to PHY MTA */ 8565 8566 /* Configure PHY Rx Control register */ 8567 8568 /* Enable PHY wakeup in MAC register */ 8569 8570 /* Configure and enable PHY wakeup in PHY registers */ 8571 8572 /* Activate PHY wakeup */ 8573 8574 /* XXX */ 8575 #endif 8576 } 8577 8578 static void 8579 wm_enable_wakeup(struct wm_softc *sc) 8580 { 8581 uint32_t reg, pmreg; 8582 pcireg_t pmode; 8583 8584 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT, 8585 &pmreg, NULL) == 0) 8586 return; 8587 8588 /* Advertise the wakeup capability */ 8589 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2) 8590 | CTRL_SWDPIN(3)); 8591 CSR_WRITE(sc, WMREG_WUC, WUC_APME); 8592 8593 /* ICH workaround */ 8594 switch (sc->sc_type) { 8595 case WM_T_ICH8: 8596 case WM_T_ICH9: 8597 case WM_T_ICH10: 8598 case WM_T_PCH: 8599 case WM_T_PCH2: 8600 case WM_T_PCH_LPT: 8601 /* Disable gig during WOL */ 8602 reg = CSR_READ(sc, WMREG_PHY_CTRL); 8603 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS; 8604 CSR_WRITE(sc, WMREG_PHY_CTRL, reg); 8605 if (sc->sc_type == WM_T_PCH) 8606 wm_gmii_reset(sc); 8607 8608 /* Power down workaround */ 8609 if (sc->sc_phytype == WMPHY_82577) { 8610 struct mii_softc *child; 8611 8612 /* Assume that the PHY is copper */ 8613 child = LIST_FIRST(&sc->sc_mii.mii_phys); 8614 if (child->mii_mpd_rev <= 2) 8615 sc->sc_mii.mii_writereg(sc->sc_dev, 1, 8616 (768 << 5) | 25, 0x0444); /* magic num */ 8617 } 8618 break; 8619 default: 8620 break; 8621 } 8622 8623 /* Keep the laser running on fiber adapters */ 8624 if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0) 8625 || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) { 8626 reg = CSR_READ(sc, WMREG_CTRL_EXT); 8627 reg |= CTRL_EXT_SWDPIN(3); 8628 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 8629 } 8630 8631 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG; 8632 #if 0 /* for the multicast packet */ 8633 reg |= WUFC_MC; 8634 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE); 8635 #endif 8636 8637 if (sc->sc_type == WM_T_PCH) { 8638 wm_enable_phy_wakeup(sc); 8639 } else { 8640 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN); 8641 CSR_WRITE(sc, WMREG_WUFC, reg); 8642 } 8643 8644 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 8645 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 8646 || (sc->sc_type == WM_T_PCH2)) 8647 && (sc->sc_phytype == WMPHY_IGP_3)) 8648 wm_igp3_phy_powerdown_workaround_ich8lan(sc); 8649 8650 /* Request PME */ 8651 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR); 8652 #if 0 8653 /* Disable WOL */ 8654 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN); 8655 #else 8656 /* For WOL */ 8657 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN; 8658 #endif 8659 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode); 8660 } 8661 #endif /* WM_WOL */ 8662 8663 static bool 8664 wm_suspend(device_t self, const pmf_qual_t *qual) 8665 { 8666 struct wm_softc *sc = device_private(self); 8667 8668 wm_release_manageability(sc); 8669 wm_release_hw_control(sc); 8670 #ifdef WM_WOL 8671 wm_enable_wakeup(sc); 8672 #endif 8673 8674 return true; 8675 } 8676 8677 static bool 8678 wm_resume(device_t self, const pmf_qual_t *qual) 8679 { 8680 struct wm_softc *sc = device_private(self); 8681 8682 wm_init_manageability(sc); 8683 8684 return true; 8685 } 8686 8687 static void 8688 wm_set_eee_i350(struct wm_softc * sc) 8689 { 8690 uint32_t ipcnfg, eeer; 8691 8692 ipcnfg = CSR_READ(sc, WMREG_IPCNFG); 8693 eeer = CSR_READ(sc, WMREG_EEER); 8694 8695 if ((sc->sc_flags & WM_F_EEE) != 0) { 8696 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN); 8697 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN 8698 | EEER_LPI_FC); 8699 } else { 8700 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN); 8701 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN 8702 | EEER_LPI_FC); 8703 } 8704 8705 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg); 8706 CSR_WRITE(sc, WMREG_EEER, eeer); 8707 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */ 8708 CSR_READ(sc, WMREG_EEER); /* XXX flush? */ 8709 } 8710