1 /* $NetBSD: if_wm.c,v 1.262 2013/07/16 10:30:27 msaitoh Exp $ */ 2 3 /* 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /******************************************************************************* 39 40 Copyright (c) 2001-2005, Intel Corporation 41 All rights reserved. 42 43 Redistribution and use in source and binary forms, with or without 44 modification, are permitted provided that the following conditions are met: 45 46 1. Redistributions of source code must retain the above copyright notice, 47 this list of conditions and the following disclaimer. 48 49 2. Redistributions in binary form must reproduce the above copyright 50 notice, this list of conditions and the following disclaimer in the 51 documentation and/or other materials provided with the distribution. 52 53 3. Neither the name of the Intel Corporation nor the names of its 54 contributors may be used to endorse or promote products derived from 55 this software without specific prior written permission. 56 57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 67 POSSIBILITY OF SUCH DAMAGE. 68 69 *******************************************************************************/ 70 /* 71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips. 72 * 73 * TODO (in order of importance): 74 * 75 * - Rework how parameters are loaded from the EEPROM. 76 */ 77 78 #include <sys/cdefs.h> 79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.262 2013/07/16 10:30:27 msaitoh Exp $"); 80 81 #include <sys/param.h> 82 #include <sys/systm.h> 83 #include <sys/callout.h> 84 #include <sys/mbuf.h> 85 #include <sys/malloc.h> 86 #include <sys/kernel.h> 87 #include <sys/socket.h> 88 #include <sys/ioctl.h> 89 #include <sys/errno.h> 90 #include <sys/device.h> 91 #include <sys/queue.h> 92 #include <sys/syslog.h> 93 94 #include <sys/rnd.h> 95 96 #include <net/if.h> 97 #include <net/if_dl.h> 98 #include <net/if_media.h> 99 #include <net/if_ether.h> 100 101 #include <net/bpf.h> 102 103 #include <netinet/in.h> /* XXX for struct ip */ 104 #include <netinet/in_systm.h> /* XXX for struct ip */ 105 #include <netinet/ip.h> /* XXX for struct ip */ 106 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */ 107 #include <netinet/tcp.h> /* XXX for struct tcphdr */ 108 109 #include <sys/bus.h> 110 #include <sys/intr.h> 111 #include <machine/endian.h> 112 113 #include <dev/mii/mii.h> 114 #include <dev/mii/miivar.h> 115 #include <dev/mii/miidevs.h> 116 #include <dev/mii/mii_bitbang.h> 117 #include <dev/mii/ikphyreg.h> 118 #include <dev/mii/igphyreg.h> 119 #include <dev/mii/igphyvar.h> 120 #include <dev/mii/inbmphyreg.h> 121 122 #include <dev/pci/pcireg.h> 123 #include <dev/pci/pcivar.h> 124 #include <dev/pci/pcidevs.h> 125 126 #include <dev/pci/if_wmreg.h> 127 #include <dev/pci/if_wmvar.h> 128 129 #ifdef WM_DEBUG 130 #define WM_DEBUG_LINK 0x01 131 #define WM_DEBUG_TX 0x02 132 #define WM_DEBUG_RX 0x04 133 #define WM_DEBUG_GMII 0x08 134 #define WM_DEBUG_MANAGE 0x10 135 #define WM_DEBUG_NVM 0x20 136 int wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII 137 | WM_DEBUG_MANAGE | WM_DEBUG_NVM; 138 139 #define DPRINTF(x, y) if (wm_debug & (x)) printf y 140 #else 141 #define DPRINTF(x, y) /* nothing */ 142 #endif /* WM_DEBUG */ 143 144 /* 145 * Transmit descriptor list size. Due to errata, we can only have 146 * 256 hardware descriptors in the ring on < 82544, but we use 4096 147 * on >= 82544. We tell the upper layers that they can queue a lot 148 * of packets, and we go ahead and manage up to 64 (16 for the i82547) 149 * of them at a time. 150 * 151 * We allow up to 256 (!) DMA segments per packet. Pathological packet 152 * chains containing many small mbufs have been observed in zero-copy 153 * situations with jumbo frames. 154 */ 155 #define WM_NTXSEGS 256 156 #define WM_IFQUEUELEN 256 157 #define WM_TXQUEUELEN_MAX 64 158 #define WM_TXQUEUELEN_MAX_82547 16 159 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum) 160 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1) 161 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8) 162 #define WM_NTXDESC_82542 256 163 #define WM_NTXDESC_82544 4096 164 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc) 165 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1) 166 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t)) 167 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc)) 168 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc)) 169 170 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */ 171 172 /* 173 * Receive descriptor list size. We have one Rx buffer for normal 174 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized 175 * packet. We allocate 256 receive descriptors, each with a 2k 176 * buffer (MCLBYTES), which gives us room for 50 jumbo packets. 177 */ 178 #define WM_NRXDESC 256 179 #define WM_NRXDESC_MASK (WM_NRXDESC - 1) 180 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) 181 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) 182 183 /* 184 * Control structures are DMA'd to the i82542 chip. We allocate them in 185 * a single clump that maps to a single DMA segment to make several things 186 * easier. 187 */ 188 struct wm_control_data_82544 { 189 /* 190 * The receive descriptors. 191 */ 192 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; 193 194 /* 195 * The transmit descriptors. Put these at the end, because 196 * we might use a smaller number of them. 197 */ 198 union { 199 wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544]; 200 nq_txdesc_t wcdu_nq_txdescs[WM_NTXDESC_82544]; 201 } wdc_u; 202 }; 203 204 struct wm_control_data_82542 { 205 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; 206 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542]; 207 }; 208 209 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x) 210 #define WM_CDTXOFF(x) WM_CDOFF(wdc_u.wcdu_txdescs[(x)]) 211 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)]) 212 213 /* 214 * Software state for transmit jobs. 215 */ 216 struct wm_txsoft { 217 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 218 bus_dmamap_t txs_dmamap; /* our DMA map */ 219 int txs_firstdesc; /* first descriptor in packet */ 220 int txs_lastdesc; /* last descriptor in packet */ 221 int txs_ndesc; /* # of descriptors used */ 222 }; 223 224 /* 225 * Software state for receive buffers. Each descriptor gets a 226 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill 227 * more than one buffer, we chain them together. 228 */ 229 struct wm_rxsoft { 230 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 231 bus_dmamap_t rxs_dmamap; /* our DMA map */ 232 }; 233 234 #define WM_LINKUP_TIMEOUT 50 235 236 static uint16_t swfwphysem[] = { 237 SWFW_PHY0_SM, 238 SWFW_PHY1_SM, 239 SWFW_PHY2_SM, 240 SWFW_PHY3_SM 241 }; 242 243 /* 244 * Software state per device. 245 */ 246 struct wm_softc { 247 device_t sc_dev; /* generic device information */ 248 bus_space_tag_t sc_st; /* bus space tag */ 249 bus_space_handle_t sc_sh; /* bus space handle */ 250 bus_size_t sc_ss; /* bus space size */ 251 bus_space_tag_t sc_iot; /* I/O space tag */ 252 bus_space_handle_t sc_ioh; /* I/O space handle */ 253 bus_size_t sc_ios; /* I/O space size */ 254 bus_space_tag_t sc_flasht; /* flash registers space tag */ 255 bus_space_handle_t sc_flashh; /* flash registers space handle */ 256 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 257 258 struct ethercom sc_ethercom; /* ethernet common data */ 259 struct mii_data sc_mii; /* MII/media information */ 260 261 pci_chipset_tag_t sc_pc; 262 pcitag_t sc_pcitag; 263 int sc_bus_speed; /* PCI/PCIX bus speed */ 264 int sc_pcixe_capoff; /* PCI[Xe] capability register offset */ 265 266 const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */ 267 wm_chip_type sc_type; /* MAC type */ 268 int sc_rev; /* MAC revision */ 269 wm_phy_type sc_phytype; /* PHY type */ 270 int sc_funcid; /* unit number of the chip (0 to 3) */ 271 int sc_flags; /* flags; see below */ 272 int sc_if_flags; /* last if_flags */ 273 int sc_flowflags; /* 802.3x flow control flags */ 274 int sc_align_tweak; 275 276 void *sc_ih; /* interrupt cookie */ 277 callout_t sc_tick_ch; /* tick callout */ 278 279 int sc_ee_addrbits; /* EEPROM address bits */ 280 int sc_ich8_flash_base; 281 int sc_ich8_flash_bank_size; 282 int sc_nvm_k1_enabled; 283 284 /* 285 * Software state for the transmit and receive descriptors. 286 */ 287 int sc_txnum; /* must be a power of two */ 288 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX]; 289 struct wm_rxsoft sc_rxsoft[WM_NRXDESC]; 290 291 /* 292 * Control data structures. 293 */ 294 int sc_ntxdesc; /* must be a power of two */ 295 struct wm_control_data_82544 *sc_control_data; 296 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 297 bus_dma_segment_t sc_cd_seg; /* control data segment */ 298 int sc_cd_rseg; /* real number of control segment */ 299 size_t sc_cd_size; /* control data size */ 300 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 301 #define sc_txdescs sc_control_data->wdc_u.wcdu_txdescs 302 #define sc_nq_txdescs sc_control_data->wdc_u.wcdu_nq_txdescs 303 #define sc_rxdescs sc_control_data->wcd_rxdescs 304 305 #ifdef WM_EVENT_COUNTERS 306 /* Event counters. */ 307 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */ 308 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */ 309 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */ 310 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */ 311 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */ 312 struct evcnt sc_ev_rxintr; /* Rx interrupts */ 313 struct evcnt sc_ev_linkintr; /* Link interrupts */ 314 315 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */ 316 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */ 317 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */ 318 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */ 319 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */ 320 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */ 321 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */ 322 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */ 323 324 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ 325 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */ 326 327 struct evcnt sc_ev_tu; /* Tx underrun */ 328 329 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ 330 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ 331 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ 332 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ 333 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ 334 #endif /* WM_EVENT_COUNTERS */ 335 336 bus_addr_t sc_tdt_reg; /* offset of TDT register */ 337 338 int sc_txfree; /* number of free Tx descriptors */ 339 int sc_txnext; /* next ready Tx descriptor */ 340 341 int sc_txsfree; /* number of free Tx jobs */ 342 int sc_txsnext; /* next free Tx job */ 343 int sc_txsdirty; /* dirty Tx jobs */ 344 345 /* These 5 variables are used only on the 82547. */ 346 int sc_txfifo_size; /* Tx FIFO size */ 347 int sc_txfifo_head; /* current head of FIFO */ 348 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */ 349 int sc_txfifo_stall; /* Tx FIFO is stalled */ 350 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */ 351 352 bus_addr_t sc_rdt_reg; /* offset of RDT register */ 353 354 int sc_rxptr; /* next ready Rx descriptor/queue ent */ 355 int sc_rxdiscard; 356 int sc_rxlen; 357 struct mbuf *sc_rxhead; 358 struct mbuf *sc_rxtail; 359 struct mbuf **sc_rxtailp; 360 361 uint32_t sc_ctrl; /* prototype CTRL register */ 362 #if 0 363 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ 364 #endif 365 uint32_t sc_icr; /* prototype interrupt bits */ 366 uint32_t sc_itr; /* prototype intr throttling reg */ 367 uint32_t sc_tctl; /* prototype TCTL register */ 368 uint32_t sc_rctl; /* prototype RCTL register */ 369 uint32_t sc_txcw; /* prototype TXCW register */ 370 uint32_t sc_tipg; /* prototype TIPG register */ 371 uint32_t sc_fcrtl; /* prototype FCRTL register */ 372 uint32_t sc_pba; /* prototype PBA register */ 373 374 int sc_tbi_linkup; /* TBI link status */ 375 int sc_tbi_anegticks; /* autonegotiation ticks */ 376 int sc_tbi_ticks; /* tbi ticks */ 377 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */ 378 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */ 379 380 int sc_mchash_type; /* multicast filter offset */ 381 382 krndsource_t rnd_source; /* random source */ 383 }; 384 385 #define WM_RXCHAIN_RESET(sc) \ 386 do { \ 387 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \ 388 *(sc)->sc_rxtailp = NULL; \ 389 (sc)->sc_rxlen = 0; \ 390 } while (/*CONSTCOND*/0) 391 392 #define WM_RXCHAIN_LINK(sc, m) \ 393 do { \ 394 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \ 395 (sc)->sc_rxtailp = &(m)->m_next; \ 396 } while (/*CONSTCOND*/0) 397 398 #ifdef WM_EVENT_COUNTERS 399 #define WM_EVCNT_INCR(ev) (ev)->ev_count++ 400 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val) 401 #else 402 #define WM_EVCNT_INCR(ev) /* nothing */ 403 #define WM_EVCNT_ADD(ev, val) /* nothing */ 404 #endif 405 406 #define CSR_READ(sc, reg) \ 407 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) 408 #define CSR_WRITE(sc, reg, val) \ 409 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) 410 #define CSR_WRITE_FLUSH(sc) \ 411 (void) CSR_READ((sc), WMREG_STATUS) 412 413 #define ICH8_FLASH_READ32(sc, reg) \ 414 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg)) 415 #define ICH8_FLASH_WRITE32(sc, reg, data) \ 416 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) 417 418 #define ICH8_FLASH_READ16(sc, reg) \ 419 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg)) 420 #define ICH8_FLASH_WRITE16(sc, reg, data) \ 421 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) 422 423 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x))) 424 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x))) 425 426 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU) 427 #define WM_CDTXADDR_HI(sc, x) \ 428 (sizeof(bus_addr_t) == 8 ? \ 429 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0) 430 431 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU) 432 #define WM_CDRXADDR_HI(sc, x) \ 433 (sizeof(bus_addr_t) == 8 ? \ 434 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0) 435 436 #define WM_CDTXSYNC(sc, x, n, ops) \ 437 do { \ 438 int __x, __n; \ 439 \ 440 __x = (x); \ 441 __n = (n); \ 442 \ 443 /* If it will wrap around, sync to the end of the ring. */ \ 444 if ((__x + __n) > WM_NTXDESC(sc)) { \ 445 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 446 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \ 447 (WM_NTXDESC(sc) - __x), (ops)); \ 448 __n -= (WM_NTXDESC(sc) - __x); \ 449 __x = 0; \ 450 } \ 451 \ 452 /* Now sync whatever is left. */ \ 453 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 454 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \ 455 } while (/*CONSTCOND*/0) 456 457 #define WM_CDRXSYNC(sc, x, ops) \ 458 do { \ 459 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 460 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \ 461 } while (/*CONSTCOND*/0) 462 463 #define WM_INIT_RXDESC(sc, x) \ 464 do { \ 465 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 466 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \ 467 struct mbuf *__m = __rxs->rxs_mbuf; \ 468 \ 469 /* \ 470 * Note: We scoot the packet forward 2 bytes in the buffer \ 471 * so that the payload after the Ethernet header is aligned \ 472 * to a 4-byte boundary. \ 473 * \ 474 * XXX BRAINDAMAGE ALERT! \ 475 * The stupid chip uses the same size for every buffer, which \ 476 * is set in the Receive Control register. We are using the 2K \ 477 * size option, but what we REALLY want is (2K - 2)! For this \ 478 * reason, we can't "scoot" packets longer than the standard \ 479 * Ethernet MTU. On strict-alignment platforms, if the total \ 480 * size exceeds (2K - 2) we set align_tweak to 0 and let \ 481 * the upper layer copy the headers. \ 482 */ \ 483 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \ 484 \ 485 wm_set_dma_addr(&__rxd->wrx_addr, \ 486 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \ 487 __rxd->wrx_len = 0; \ 488 __rxd->wrx_cksum = 0; \ 489 __rxd->wrx_status = 0; \ 490 __rxd->wrx_errors = 0; \ 491 __rxd->wrx_special = 0; \ 492 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 493 \ 494 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \ 495 } while (/*CONSTCOND*/0) 496 497 static void wm_start(struct ifnet *); 498 static void wm_nq_start(struct ifnet *); 499 static void wm_watchdog(struct ifnet *); 500 static int wm_ifflags_cb(struct ethercom *); 501 static int wm_ioctl(struct ifnet *, u_long, void *); 502 static int wm_init(struct ifnet *); 503 static void wm_stop(struct ifnet *, int); 504 static bool wm_suspend(device_t, const pmf_qual_t *); 505 static bool wm_resume(device_t, const pmf_qual_t *); 506 507 static void wm_reset(struct wm_softc *); 508 static void wm_rxdrain(struct wm_softc *); 509 static int wm_add_rxbuf(struct wm_softc *, int); 510 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *); 511 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *); 512 static int wm_validate_eeprom_checksum(struct wm_softc *); 513 static int wm_check_alt_mac_addr(struct wm_softc *); 514 static int wm_read_mac_addr(struct wm_softc *, uint8_t *); 515 static void wm_tick(void *); 516 517 static void wm_set_filter(struct wm_softc *); 518 static void wm_set_vlan(struct wm_softc *); 519 520 static int wm_intr(void *); 521 static void wm_txintr(struct wm_softc *); 522 static void wm_rxintr(struct wm_softc *); 523 static void wm_linkintr(struct wm_softc *, uint32_t); 524 525 static void wm_tbi_mediainit(struct wm_softc *); 526 static int wm_tbi_mediachange(struct ifnet *); 527 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); 528 529 static void wm_tbi_set_linkled(struct wm_softc *); 530 static void wm_tbi_check_link(struct wm_softc *); 531 532 static void wm_gmii_reset(struct wm_softc *); 533 534 static int wm_gmii_i82543_readreg(device_t, int, int); 535 static void wm_gmii_i82543_writereg(device_t, int, int, int); 536 static int wm_gmii_i82544_readreg(device_t, int, int); 537 static void wm_gmii_i82544_writereg(device_t, int, int, int); 538 static int wm_gmii_i80003_readreg(device_t, int, int); 539 static void wm_gmii_i80003_writereg(device_t, int, int, int); 540 static int wm_gmii_bm_readreg(device_t, int, int); 541 static void wm_gmii_bm_writereg(device_t, int, int, int); 542 static int wm_gmii_hv_readreg(device_t, int, int); 543 static void wm_gmii_hv_writereg(device_t, int, int, int); 544 static int wm_gmii_82580_readreg(device_t, int, int); 545 static void wm_gmii_82580_writereg(device_t, int, int, int); 546 static int wm_sgmii_readreg(device_t, int, int); 547 static void wm_sgmii_writereg(device_t, int, int, int); 548 549 static void wm_gmii_statchg(struct ifnet *); 550 551 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t); 552 static int wm_gmii_mediachange(struct ifnet *); 553 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); 554 555 static int wm_kmrn_readreg(struct wm_softc *, int); 556 static void wm_kmrn_writereg(struct wm_softc *, int, int); 557 558 static void wm_set_spiaddrbits(struct wm_softc *); 559 static int wm_match(device_t, cfdata_t, void *); 560 static void wm_attach(device_t, device_t, void *); 561 static int wm_detach(device_t, int); 562 static int wm_is_onboard_nvm_eeprom(struct wm_softc *); 563 static void wm_get_auto_rd_done(struct wm_softc *); 564 static void wm_lan_init_done(struct wm_softc *); 565 static void wm_get_cfg_done(struct wm_softc *); 566 static int wm_get_swsm_semaphore(struct wm_softc *); 567 static void wm_put_swsm_semaphore(struct wm_softc *); 568 static int wm_poll_eerd_eewr_done(struct wm_softc *, int); 569 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t); 570 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t); 571 static int wm_get_swfwhw_semaphore(struct wm_softc *); 572 static void wm_put_swfwhw_semaphore(struct wm_softc *); 573 static int wm_get_hw_semaphore_82573(struct wm_softc *); 574 static void wm_put_hw_semaphore_82573(struct wm_softc *); 575 576 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *); 577 static int32_t wm_ich8_cycle_init(struct wm_softc *); 578 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t); 579 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, 580 uint32_t, uint16_t *); 581 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *); 582 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *); 583 static void wm_82547_txfifo_stall(void *); 584 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int); 585 static int wm_check_mng_mode(struct wm_softc *); 586 static int wm_check_mng_mode_ich8lan(struct wm_softc *); 587 static int wm_check_mng_mode_82574(struct wm_softc *); 588 static int wm_check_mng_mode_generic(struct wm_softc *); 589 static int wm_enable_mng_pass_thru(struct wm_softc *); 590 static int wm_check_reset_block(struct wm_softc *); 591 static void wm_get_hw_control(struct wm_softc *); 592 static int wm_check_for_link(struct wm_softc *); 593 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *); 594 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *); 595 #ifdef WM_WOL 596 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *); 597 #endif 598 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *); 599 static void wm_lv_phy_workaround_ich8lan(struct wm_softc *); 600 static void wm_k1_gig_workaround_hv(struct wm_softc *, int); 601 static void wm_set_mdio_slow_mode_hv(struct wm_softc *); 602 static void wm_configure_k1_ich8lan(struct wm_softc *, int); 603 static void wm_smbustopci(struct wm_softc *); 604 static void wm_set_pcie_completion_timeout(struct wm_softc *); 605 static void wm_reset_init_script_82575(struct wm_softc *); 606 static void wm_release_manageability(struct wm_softc *); 607 static void wm_release_hw_control(struct wm_softc *); 608 static void wm_get_wakeup(struct wm_softc *); 609 #ifdef WM_WOL 610 static void wm_enable_phy_wakeup(struct wm_softc *); 611 static void wm_enable_wakeup(struct wm_softc *); 612 #endif 613 static void wm_init_manageability(struct wm_softc *); 614 static void wm_set_eee_i350(struct wm_softc *); 615 616 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc), 617 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 618 619 /* 620 * Devices supported by this driver. 621 */ 622 static const struct wm_product { 623 pci_vendor_id_t wmp_vendor; 624 pci_product_id_t wmp_product; 625 const char *wmp_name; 626 wm_chip_type wmp_type; 627 int wmp_flags; 628 #define WMP_F_1000X 0x01 629 #define WMP_F_1000T 0x02 630 #define WMP_F_SERDES 0x04 631 } wm_products[] = { 632 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, 633 "Intel i82542 1000BASE-X Ethernet", 634 WM_T_82542_2_1, WMP_F_1000X }, 635 636 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, 637 "Intel i82543GC 1000BASE-X Ethernet", 638 WM_T_82543, WMP_F_1000X }, 639 640 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, 641 "Intel i82543GC 1000BASE-T Ethernet", 642 WM_T_82543, WMP_F_1000T }, 643 644 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, 645 "Intel i82544EI 1000BASE-T Ethernet", 646 WM_T_82544, WMP_F_1000T }, 647 648 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, 649 "Intel i82544EI 1000BASE-X Ethernet", 650 WM_T_82544, WMP_F_1000X }, 651 652 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, 653 "Intel i82544GC 1000BASE-T Ethernet", 654 WM_T_82544, WMP_F_1000T }, 655 656 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, 657 "Intel i82544GC (LOM) 1000BASE-T Ethernet", 658 WM_T_82544, WMP_F_1000T }, 659 660 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, 661 "Intel i82540EM 1000BASE-T Ethernet", 662 WM_T_82540, WMP_F_1000T }, 663 664 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM, 665 "Intel i82540EM (LOM) 1000BASE-T Ethernet", 666 WM_T_82540, WMP_F_1000T }, 667 668 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM, 669 "Intel i82540EP 1000BASE-T Ethernet", 670 WM_T_82540, WMP_F_1000T }, 671 672 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP, 673 "Intel i82540EP 1000BASE-T Ethernet", 674 WM_T_82540, WMP_F_1000T }, 675 676 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP, 677 "Intel i82540EP 1000BASE-T Ethernet", 678 WM_T_82540, WMP_F_1000T }, 679 680 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER, 681 "Intel i82545EM 1000BASE-T Ethernet", 682 WM_T_82545, WMP_F_1000T }, 683 684 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER, 685 "Intel i82545GM 1000BASE-T Ethernet", 686 WM_T_82545_3, WMP_F_1000T }, 687 688 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER, 689 "Intel i82545GM 1000BASE-X Ethernet", 690 WM_T_82545_3, WMP_F_1000X }, 691 #if 0 692 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES, 693 "Intel i82545GM Gigabit Ethernet (SERDES)", 694 WM_T_82545_3, WMP_F_SERDES }, 695 #endif 696 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER, 697 "Intel i82546EB 1000BASE-T Ethernet", 698 WM_T_82546, WMP_F_1000T }, 699 700 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD, 701 "Intel i82546EB 1000BASE-T Ethernet", 702 WM_T_82546, WMP_F_1000T }, 703 704 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER, 705 "Intel i82545EM 1000BASE-X Ethernet", 706 WM_T_82545, WMP_F_1000X }, 707 708 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER, 709 "Intel i82546EB 1000BASE-X Ethernet", 710 WM_T_82546, WMP_F_1000X }, 711 712 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER, 713 "Intel i82546GB 1000BASE-T Ethernet", 714 WM_T_82546_3, WMP_F_1000T }, 715 716 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER, 717 "Intel i82546GB 1000BASE-X Ethernet", 718 WM_T_82546_3, WMP_F_1000X }, 719 #if 0 720 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES, 721 "Intel i82546GB Gigabit Ethernet (SERDES)", 722 WM_T_82546_3, WMP_F_SERDES }, 723 #endif 724 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER, 725 "i82546GB quad-port Gigabit Ethernet", 726 WM_T_82546_3, WMP_F_1000T }, 727 728 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3, 729 "i82546GB quad-port Gigabit Ethernet (KSP3)", 730 WM_T_82546_3, WMP_F_1000T }, 731 732 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE, 733 "Intel PRO/1000MT (82546GB)", 734 WM_T_82546_3, WMP_F_1000T }, 735 736 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI, 737 "Intel i82541EI 1000BASE-T Ethernet", 738 WM_T_82541, WMP_F_1000T }, 739 740 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM, 741 "Intel i82541ER (LOM) 1000BASE-T Ethernet", 742 WM_T_82541, WMP_F_1000T }, 743 744 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE, 745 "Intel i82541EI Mobile 1000BASE-T Ethernet", 746 WM_T_82541, WMP_F_1000T }, 747 748 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER, 749 "Intel i82541ER 1000BASE-T Ethernet", 750 WM_T_82541_2, WMP_F_1000T }, 751 752 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI, 753 "Intel i82541GI 1000BASE-T Ethernet", 754 WM_T_82541_2, WMP_F_1000T }, 755 756 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE, 757 "Intel i82541GI Mobile 1000BASE-T Ethernet", 758 WM_T_82541_2, WMP_F_1000T }, 759 760 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI, 761 "Intel i82541PI 1000BASE-T Ethernet", 762 WM_T_82541_2, WMP_F_1000T }, 763 764 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI, 765 "Intel i82547EI 1000BASE-T Ethernet", 766 WM_T_82547, WMP_F_1000T }, 767 768 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE, 769 "Intel i82547EI Mobile 1000BASE-T Ethernet", 770 WM_T_82547, WMP_F_1000T }, 771 772 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI, 773 "Intel i82547GI 1000BASE-T Ethernet", 774 WM_T_82547_2, WMP_F_1000T }, 775 776 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER, 777 "Intel PRO/1000 PT (82571EB)", 778 WM_T_82571, WMP_F_1000T }, 779 780 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER, 781 "Intel PRO/1000 PF (82571EB)", 782 WM_T_82571, WMP_F_1000X }, 783 #if 0 784 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES, 785 "Intel PRO/1000 PB (82571EB)", 786 WM_T_82571, WMP_F_SERDES }, 787 #endif 788 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER, 789 "Intel PRO/1000 QT (82571EB)", 790 WM_T_82571, WMP_F_1000T }, 791 792 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER, 793 "Intel i82572EI 1000baseT Ethernet", 794 WM_T_82572, WMP_F_1000T }, 795 796 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER, 797 "Intel PRO/1000 PT Quad Port Server Adapter", 798 WM_T_82571, WMP_F_1000T, }, 799 800 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER, 801 "Intel i82572EI 1000baseX Ethernet", 802 WM_T_82572, WMP_F_1000X }, 803 #if 0 804 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES, 805 "Intel i82572EI Gigabit Ethernet (SERDES)", 806 WM_T_82572, WMP_F_SERDES }, 807 #endif 808 809 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI, 810 "Intel i82572EI 1000baseT Ethernet", 811 WM_T_82572, WMP_F_1000T }, 812 813 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E, 814 "Intel i82573E", 815 WM_T_82573, WMP_F_1000T }, 816 817 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT, 818 "Intel i82573E IAMT", 819 WM_T_82573, WMP_F_1000T }, 820 821 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L, 822 "Intel i82573L Gigabit Ethernet", 823 WM_T_82573, WMP_F_1000T }, 824 825 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L, 826 "Intel i82574L", 827 WM_T_82574, WMP_F_1000T }, 828 829 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V, 830 "Intel i82583V", 831 WM_T_82583, WMP_F_1000T }, 832 833 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT, 834 "i80003 dual 1000baseT Ethernet", 835 WM_T_80003, WMP_F_1000T }, 836 837 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT, 838 "i80003 dual 1000baseX Ethernet", 839 WM_T_80003, WMP_F_1000T }, 840 #if 0 841 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT, 842 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)", 843 WM_T_80003, WMP_F_SERDES }, 844 #endif 845 846 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT, 847 "Intel i80003 1000baseT Ethernet", 848 WM_T_80003, WMP_F_1000T }, 849 #if 0 850 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT, 851 "Intel i80003 Gigabit Ethernet (SERDES)", 852 WM_T_80003, WMP_F_SERDES }, 853 #endif 854 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT, 855 "Intel i82801H (M_AMT) LAN Controller", 856 WM_T_ICH8, WMP_F_1000T }, 857 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT, 858 "Intel i82801H (AMT) LAN Controller", 859 WM_T_ICH8, WMP_F_1000T }, 860 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN, 861 "Intel i82801H LAN Controller", 862 WM_T_ICH8, WMP_F_1000T }, 863 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN, 864 "Intel i82801H (IFE) LAN Controller", 865 WM_T_ICH8, WMP_F_1000T }, 866 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN, 867 "Intel i82801H (M) LAN Controller", 868 WM_T_ICH8, WMP_F_1000T }, 869 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT, 870 "Intel i82801H IFE (GT) LAN Controller", 871 WM_T_ICH8, WMP_F_1000T }, 872 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G, 873 "Intel i82801H IFE (G) LAN Controller", 874 WM_T_ICH8, WMP_F_1000T }, 875 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT, 876 "82801I (AMT) LAN Controller", 877 WM_T_ICH9, WMP_F_1000T }, 878 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE, 879 "82801I LAN Controller", 880 WM_T_ICH9, WMP_F_1000T }, 881 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G, 882 "82801I (G) LAN Controller", 883 WM_T_ICH9, WMP_F_1000T }, 884 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT, 885 "82801I (GT) LAN Controller", 886 WM_T_ICH9, WMP_F_1000T }, 887 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C, 888 "82801I (C) LAN Controller", 889 WM_T_ICH9, WMP_F_1000T }, 890 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M, 891 "82801I mobile LAN Controller", 892 WM_T_ICH9, WMP_F_1000T }, 893 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V, 894 "82801I mobile (V) LAN Controller", 895 WM_T_ICH9, WMP_F_1000T }, 896 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT, 897 "82801I mobile (AMT) LAN Controller", 898 WM_T_ICH9, WMP_F_1000T }, 899 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM, 900 "82567LM-4 LAN Controller", 901 WM_T_ICH9, WMP_F_1000T }, 902 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3, 903 "82567V-3 LAN Controller", 904 WM_T_ICH9, WMP_F_1000T }, 905 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM, 906 "82567LM-2 LAN Controller", 907 WM_T_ICH10, WMP_F_1000T }, 908 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF, 909 "82567LF-2 LAN Controller", 910 WM_T_ICH10, WMP_F_1000T }, 911 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM, 912 "82567LM-3 LAN Controller", 913 WM_T_ICH10, WMP_F_1000T }, 914 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF, 915 "82567LF-3 LAN Controller", 916 WM_T_ICH10, WMP_F_1000T }, 917 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V, 918 "82567V-2 LAN Controller", 919 WM_T_ICH10, WMP_F_1000T }, 920 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V, 921 "82567V-3? LAN Controller", 922 WM_T_ICH10, WMP_F_1000T }, 923 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE, 924 "HANKSVILLE LAN Controller", 925 WM_T_ICH10, WMP_F_1000T }, 926 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM, 927 "PCH LAN (82577LM) Controller", 928 WM_T_PCH, WMP_F_1000T }, 929 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC, 930 "PCH LAN (82577LC) Controller", 931 WM_T_PCH, WMP_F_1000T }, 932 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM, 933 "PCH LAN (82578DM) Controller", 934 WM_T_PCH, WMP_F_1000T }, 935 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC, 936 "PCH LAN (82578DC) Controller", 937 WM_T_PCH, WMP_F_1000T }, 938 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM, 939 "PCH2 LAN (82579LM) Controller", 940 WM_T_PCH2, WMP_F_1000T }, 941 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V, 942 "PCH2 LAN (82579V) Controller", 943 WM_T_PCH2, WMP_F_1000T }, 944 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER, 945 "82575EB dual-1000baseT Ethernet", 946 WM_T_82575, WMP_F_1000T }, 947 #if 0 948 /* 949 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so 950 * disabled for now ... 951 */ 952 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES, 953 "82575EB dual-1000baseX Ethernet (SERDES)", 954 WM_T_82575, WMP_F_SERDES }, 955 #endif 956 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER, 957 "82575GB quad-1000baseT Ethernet", 958 WM_T_82575, WMP_F_1000T }, 959 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM, 960 "82575GB quad-1000baseT Ethernet (PM)", 961 WM_T_82575, WMP_F_1000T }, 962 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER, 963 "82576 1000BaseT Ethernet", 964 WM_T_82576, WMP_F_1000T }, 965 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER, 966 "82576 1000BaseX Ethernet", 967 WM_T_82576, WMP_F_1000X }, 968 #if 0 969 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES, 970 "82576 gigabit Ethernet (SERDES)", 971 WM_T_82576, WMP_F_SERDES }, 972 #endif 973 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER, 974 "82576 quad-1000BaseT Ethernet", 975 WM_T_82576, WMP_F_1000T }, 976 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS, 977 "82576 gigabit Ethernet", 978 WM_T_82576, WMP_F_1000T }, 979 #if 0 980 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES, 981 "82576 gigabit Ethernet (SERDES)", 982 WM_T_82576, WMP_F_SERDES }, 983 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD, 984 "82576 quad-gigabit Ethernet (SERDES)", 985 WM_T_82576, WMP_F_SERDES }, 986 #endif 987 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER, 988 "82580 1000BaseT Ethernet", 989 WM_T_82580, WMP_F_1000T }, 990 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER, 991 "82580 1000BaseX Ethernet", 992 WM_T_82580, WMP_F_1000X }, 993 #if 0 994 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES, 995 "82580 1000BaseT Ethernet (SERDES)", 996 WM_T_82580, WMP_F_SERDES }, 997 #endif 998 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII, 999 "82580 gigabit Ethernet (SGMII)", 1000 WM_T_82580, WMP_F_1000T }, 1001 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL, 1002 "82580 dual-1000BaseT Ethernet", 1003 WM_T_82580, WMP_F_1000T }, 1004 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER, 1005 "82580 1000BaseT Ethernet", 1006 WM_T_82580ER, WMP_F_1000T }, 1007 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL, 1008 "82580 dual-1000BaseT Ethernet", 1009 WM_T_82580ER, WMP_F_1000T }, 1010 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER, 1011 "82580 quad-1000BaseX Ethernet", 1012 WM_T_82580, WMP_F_1000X }, 1013 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER, 1014 "I350 Gigabit Network Connection", 1015 WM_T_I350, WMP_F_1000T }, 1016 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER, 1017 "I350 Gigabit Fiber Network Connection", 1018 WM_T_I350, WMP_F_1000X }, 1019 #if 0 1020 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES, 1021 "I350 Gigabit Backplane Connection", 1022 WM_T_I350, WMP_F_SERDES }, 1023 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII, 1024 "I350 Gigabit Connection", 1025 WM_T_I350, WMP_F_1000T }, 1026 #endif 1027 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1, 1028 "I210-T1 Ethernet Server Adapter", 1029 WM_T_I210, WMP_F_1000T }, 1030 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1, 1031 "I210 Ethernet (Copper OEM)", 1032 WM_T_I210, WMP_F_1000T }, 1033 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT, 1034 "I210 Ethernet (Copper IT)", 1035 WM_T_I210, WMP_F_1000T }, 1036 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER, 1037 "I210 Gigabit Ethernet (Fiber)", 1038 WM_T_I210, WMP_F_1000X }, 1039 #if 0 1040 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES, 1041 "I210 Gigabit Ethernet (SERDES)", 1042 WM_T_I210, WMP_F_SERDES }, 1043 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII, 1044 "I210 Gigabit Ethernet (SGMII)", 1045 WM_T_I210, WMP_F_SERDES }, 1046 #endif 1047 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER, 1048 "I211 Ethernet (COPPER)", 1049 WM_T_I211, WMP_F_1000T }, 1050 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V, 1051 "I217 V Ethernet Connection", 1052 WM_T_PCH_LPT, WMP_F_1000T }, 1053 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM, 1054 "I217 LM Ethernet Connection", 1055 WM_T_PCH_LPT, WMP_F_1000T }, 1056 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V, 1057 "I218 V Ethernet Connection", 1058 WM_T_PCH_LPT, WMP_F_1000T }, 1059 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM, 1060 "I218 LM Ethernet Connection", 1061 WM_T_PCH_LPT, WMP_F_1000T }, 1062 { 0, 0, 1063 NULL, 1064 0, 0 }, 1065 }; 1066 1067 #ifdef WM_EVENT_COUNTERS 1068 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")]; 1069 #endif /* WM_EVENT_COUNTERS */ 1070 1071 #if 0 /* Not currently used */ 1072 static inline uint32_t 1073 wm_io_read(struct wm_softc *sc, int reg) 1074 { 1075 1076 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 1077 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4)); 1078 } 1079 #endif 1080 1081 static inline void 1082 wm_io_write(struct wm_softc *sc, int reg, uint32_t val) 1083 { 1084 1085 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 1086 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val); 1087 } 1088 1089 static inline void 1090 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off, 1091 uint32_t data) 1092 { 1093 uint32_t regval; 1094 int i; 1095 1096 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT); 1097 1098 CSR_WRITE(sc, reg, regval); 1099 1100 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) { 1101 delay(5); 1102 if (CSR_READ(sc, reg) & SCTL_CTL_READY) 1103 break; 1104 } 1105 if (i == SCTL_CTL_POLL_TIMEOUT) { 1106 aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n", 1107 device_xname(sc->sc_dev), reg); 1108 } 1109 } 1110 1111 static inline void 1112 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v) 1113 { 1114 wa->wa_low = htole32(v & 0xffffffffU); 1115 if (sizeof(bus_addr_t) == 8) 1116 wa->wa_high = htole32((uint64_t) v >> 32); 1117 else 1118 wa->wa_high = 0; 1119 } 1120 1121 static void 1122 wm_set_spiaddrbits(struct wm_softc *sc) 1123 { 1124 uint32_t reg; 1125 1126 sc->sc_flags |= WM_F_EEPROM_SPI; 1127 reg = CSR_READ(sc, WMREG_EECD); 1128 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8; 1129 } 1130 1131 static const struct wm_product * 1132 wm_lookup(const struct pci_attach_args *pa) 1133 { 1134 const struct wm_product *wmp; 1135 1136 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) { 1137 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor && 1138 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product) 1139 return wmp; 1140 } 1141 return NULL; 1142 } 1143 1144 static int 1145 wm_match(device_t parent, cfdata_t cf, void *aux) 1146 { 1147 struct pci_attach_args *pa = aux; 1148 1149 if (wm_lookup(pa) != NULL) 1150 return 1; 1151 1152 return 0; 1153 } 1154 1155 static void 1156 wm_attach(device_t parent, device_t self, void *aux) 1157 { 1158 struct wm_softc *sc = device_private(self); 1159 struct pci_attach_args *pa = aux; 1160 prop_dictionary_t dict; 1161 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1162 pci_chipset_tag_t pc = pa->pa_pc; 1163 pci_intr_handle_t ih; 1164 const char *intrstr = NULL; 1165 const char *eetype, *xname; 1166 bus_space_tag_t memt; 1167 bus_space_handle_t memh; 1168 bus_size_t memsize; 1169 int memh_valid; 1170 int i, error; 1171 const struct wm_product *wmp; 1172 prop_data_t ea; 1173 prop_number_t pn; 1174 uint8_t enaddr[ETHER_ADDR_LEN]; 1175 uint16_t cfg1, cfg2, swdpin, io3; 1176 pcireg_t preg, memtype; 1177 uint16_t eeprom_data, apme_mask; 1178 uint32_t reg; 1179 1180 sc->sc_dev = self; 1181 callout_init(&sc->sc_tick_ch, 0); 1182 1183 sc->sc_wmp = wmp = wm_lookup(pa); 1184 if (wmp == NULL) { 1185 printf("\n"); 1186 panic("wm_attach: impossible"); 1187 } 1188 1189 sc->sc_pc = pa->pa_pc; 1190 sc->sc_pcitag = pa->pa_tag; 1191 1192 if (pci_dma64_available(pa)) 1193 sc->sc_dmat = pa->pa_dmat64; 1194 else 1195 sc->sc_dmat = pa->pa_dmat; 1196 1197 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG)); 1198 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1); 1199 1200 sc->sc_type = wmp->wmp_type; 1201 if (sc->sc_type < WM_T_82543) { 1202 if (sc->sc_rev < 2) { 1203 aprint_error_dev(sc->sc_dev, 1204 "i82542 must be at least rev. 2\n"); 1205 return; 1206 } 1207 if (sc->sc_rev < 3) 1208 sc->sc_type = WM_T_82542_2_0; 1209 } 1210 1211 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 1212 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER) 1213 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210) 1214 || (sc->sc_type == WM_T_I211)) 1215 sc->sc_flags |= WM_F_NEWQUEUE; 1216 1217 /* Set device properties (mactype) */ 1218 dict = device_properties(sc->sc_dev); 1219 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type); 1220 1221 /* 1222 * Map the device. All devices support memory-mapped acccess, 1223 * and it is really required for normal operation. 1224 */ 1225 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA); 1226 switch (memtype) { 1227 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 1228 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 1229 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA, 1230 memtype, 0, &memt, &memh, NULL, &memsize) == 0); 1231 break; 1232 default: 1233 memh_valid = 0; 1234 break; 1235 } 1236 1237 if (memh_valid) { 1238 sc->sc_st = memt; 1239 sc->sc_sh = memh; 1240 sc->sc_ss = memsize; 1241 } else { 1242 aprint_error_dev(sc->sc_dev, 1243 "unable to map device registers\n"); 1244 return; 1245 } 1246 1247 /* 1248 * In addition, i82544 and later support I/O mapped indirect 1249 * register access. It is not desirable (nor supported in 1250 * this driver) to use it for normal operation, though it is 1251 * required to work around bugs in some chip versions. 1252 */ 1253 if (sc->sc_type >= WM_T_82544) { 1254 /* First we have to find the I/O BAR. */ 1255 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) { 1256 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i); 1257 if (memtype == PCI_MAPREG_TYPE_IO) 1258 break; 1259 if (PCI_MAPREG_MEM_TYPE(memtype) == 1260 PCI_MAPREG_MEM_TYPE_64BIT) 1261 i += 4; /* skip high bits, too */ 1262 } 1263 if (i < PCI_MAPREG_END) { 1264 /* 1265 * We found PCI_MAPREG_TYPE_IO. Note that 82580 1266 * (and newer?) chip has no PCI_MAPREG_TYPE_IO. 1267 * It's no problem because newer chips has no this 1268 * bug. 1269 * 1270 * The i8254x doesn't apparently respond when the 1271 * I/O BAR is 0, which looks somewhat like it's not 1272 * been configured. 1273 */ 1274 preg = pci_conf_read(pc, pa->pa_tag, i); 1275 if (PCI_MAPREG_MEM_ADDR(preg) == 0) { 1276 aprint_error_dev(sc->sc_dev, 1277 "WARNING: I/O BAR at zero.\n"); 1278 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO, 1279 0, &sc->sc_iot, &sc->sc_ioh, 1280 NULL, &sc->sc_ios) == 0) { 1281 sc->sc_flags |= WM_F_IOH_VALID; 1282 } else { 1283 aprint_error_dev(sc->sc_dev, 1284 "WARNING: unable to map I/O space\n"); 1285 } 1286 } 1287 1288 } 1289 1290 /* Enable bus mastering. Disable MWI on the i82542 2.0. */ 1291 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 1292 preg |= PCI_COMMAND_MASTER_ENABLE; 1293 if (sc->sc_type < WM_T_82542_2_1) 1294 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE; 1295 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg); 1296 1297 /* power up chip */ 1298 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, 1299 NULL)) && error != EOPNOTSUPP) { 1300 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error); 1301 return; 1302 } 1303 1304 /* 1305 * Map and establish our interrupt. 1306 */ 1307 if (pci_intr_map(pa, &ih)) { 1308 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n"); 1309 return; 1310 } 1311 intrstr = pci_intr_string(pc, ih); 1312 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc); 1313 if (sc->sc_ih == NULL) { 1314 aprint_error_dev(sc->sc_dev, "unable to establish interrupt"); 1315 if (intrstr != NULL) 1316 aprint_error(" at %s", intrstr); 1317 aprint_error("\n"); 1318 return; 1319 } 1320 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); 1321 1322 /* 1323 * Check the function ID (unit number of the chip). 1324 */ 1325 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3) 1326 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003) 1327 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 1328 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER) 1329 || (sc->sc_type == WM_T_I350)) 1330 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS) 1331 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK; 1332 else 1333 sc->sc_funcid = 0; 1334 1335 /* 1336 * Determine a few things about the bus we're connected to. 1337 */ 1338 if (sc->sc_type < WM_T_82543) { 1339 /* We don't really know the bus characteristics here. */ 1340 sc->sc_bus_speed = 33; 1341 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) { 1342 /* 1343 * CSA (Communication Streaming Architecture) is about as fast 1344 * a 32-bit 66MHz PCI Bus. 1345 */ 1346 sc->sc_flags |= WM_F_CSA; 1347 sc->sc_bus_speed = 66; 1348 aprint_verbose_dev(sc->sc_dev, 1349 "Communication Streaming Architecture\n"); 1350 if (sc->sc_type == WM_T_82547) { 1351 callout_init(&sc->sc_txfifo_ch, 0); 1352 callout_setfunc(&sc->sc_txfifo_ch, 1353 wm_82547_txfifo_stall, sc); 1354 aprint_verbose_dev(sc->sc_dev, 1355 "using 82547 Tx FIFO stall work-around\n"); 1356 } 1357 } else if (sc->sc_type >= WM_T_82571) { 1358 sc->sc_flags |= WM_F_PCIE; 1359 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9) 1360 && (sc->sc_type != WM_T_ICH10) 1361 && (sc->sc_type != WM_T_PCH) 1362 && (sc->sc_type != WM_T_PCH2) 1363 && (sc->sc_type != WM_T_PCH_LPT)) { 1364 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE; 1365 /* ICH* and PCH* have no PCIe capability registers */ 1366 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 1367 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff, 1368 NULL) == 0) 1369 aprint_error_dev(sc->sc_dev, 1370 "unable to find PCIe capability\n"); 1371 } 1372 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n"); 1373 } else { 1374 reg = CSR_READ(sc, WMREG_STATUS); 1375 if (reg & STATUS_BUS64) 1376 sc->sc_flags |= WM_F_BUS64; 1377 if ((reg & STATUS_PCIX_MODE) != 0) { 1378 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb; 1379 1380 sc->sc_flags |= WM_F_PCIX; 1381 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 1382 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0) 1383 aprint_error_dev(sc->sc_dev, 1384 "unable to find PCIX capability\n"); 1385 else if (sc->sc_type != WM_T_82545_3 && 1386 sc->sc_type != WM_T_82546_3) { 1387 /* 1388 * Work around a problem caused by the BIOS 1389 * setting the max memory read byte count 1390 * incorrectly. 1391 */ 1392 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, 1393 sc->sc_pcixe_capoff + PCIX_CMD); 1394 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag, 1395 sc->sc_pcixe_capoff + PCIX_STATUS); 1396 1397 bytecnt = 1398 (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >> 1399 PCIX_CMD_BYTECNT_SHIFT; 1400 maxb = 1401 (pcix_sts & PCIX_STATUS_MAXB_MASK) >> 1402 PCIX_STATUS_MAXB_SHIFT; 1403 if (bytecnt > maxb) { 1404 aprint_verbose_dev(sc->sc_dev, 1405 "resetting PCI-X MMRBC: %d -> %d\n", 1406 512 << bytecnt, 512 << maxb); 1407 pcix_cmd = (pcix_cmd & 1408 ~PCIX_CMD_BYTECNT_MASK) | 1409 (maxb << PCIX_CMD_BYTECNT_SHIFT); 1410 pci_conf_write(pa->pa_pc, pa->pa_tag, 1411 sc->sc_pcixe_capoff + PCIX_CMD, 1412 pcix_cmd); 1413 } 1414 } 1415 } 1416 /* 1417 * The quad port adapter is special; it has a PCIX-PCIX 1418 * bridge on the board, and can run the secondary bus at 1419 * a higher speed. 1420 */ 1421 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) { 1422 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120 1423 : 66; 1424 } else if (sc->sc_flags & WM_F_PCIX) { 1425 switch (reg & STATUS_PCIXSPD_MASK) { 1426 case STATUS_PCIXSPD_50_66: 1427 sc->sc_bus_speed = 66; 1428 break; 1429 case STATUS_PCIXSPD_66_100: 1430 sc->sc_bus_speed = 100; 1431 break; 1432 case STATUS_PCIXSPD_100_133: 1433 sc->sc_bus_speed = 133; 1434 break; 1435 default: 1436 aprint_error_dev(sc->sc_dev, 1437 "unknown PCIXSPD %d; assuming 66MHz\n", 1438 reg & STATUS_PCIXSPD_MASK); 1439 sc->sc_bus_speed = 66; 1440 break; 1441 } 1442 } else 1443 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33; 1444 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n", 1445 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed, 1446 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI"); 1447 } 1448 1449 /* 1450 * Allocate the control data structures, and create and load the 1451 * DMA map for it. 1452 * 1453 * NOTE: All Tx descriptors must be in the same 4G segment of 1454 * memory. So must Rx descriptors. We simplify by allocating 1455 * both sets within the same 4G segment. 1456 */ 1457 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ? 1458 WM_NTXDESC_82542 : WM_NTXDESC_82544; 1459 sc->sc_cd_size = sc->sc_type < WM_T_82544 ? 1460 sizeof(struct wm_control_data_82542) : 1461 sizeof(struct wm_control_data_82544); 1462 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE, 1463 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1, 1464 &sc->sc_cd_rseg, 0)) != 0) { 1465 aprint_error_dev(sc->sc_dev, 1466 "unable to allocate control data, error = %d\n", 1467 error); 1468 goto fail_0; 1469 } 1470 1471 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg, 1472 sc->sc_cd_rseg, sc->sc_cd_size, 1473 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) { 1474 aprint_error_dev(sc->sc_dev, 1475 "unable to map control data, error = %d\n", error); 1476 goto fail_1; 1477 } 1478 1479 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1, 1480 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) { 1481 aprint_error_dev(sc->sc_dev, 1482 "unable to create control data DMA map, error = %d\n", 1483 error); 1484 goto fail_2; 1485 } 1486 1487 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 1488 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) { 1489 aprint_error_dev(sc->sc_dev, 1490 "unable to load control data DMA map, error = %d\n", 1491 error); 1492 goto fail_3; 1493 } 1494 1495 /* 1496 * Create the transmit buffer DMA maps. 1497 */ 1498 WM_TXQUEUELEN(sc) = 1499 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ? 1500 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX; 1501 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 1502 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA, 1503 WM_NTXSEGS, WTX_MAX_LEN, 0, 0, 1504 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 1505 aprint_error_dev(sc->sc_dev, 1506 "unable to create Tx DMA map %d, error = %d\n", 1507 i, error); 1508 goto fail_4; 1509 } 1510 } 1511 1512 /* 1513 * Create the receive buffer DMA maps. 1514 */ 1515 for (i = 0; i < WM_NRXDESC; i++) { 1516 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1517 MCLBYTES, 0, 0, 1518 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 1519 aprint_error_dev(sc->sc_dev, 1520 "unable to create Rx DMA map %d error = %d\n", 1521 i, error); 1522 goto fail_5; 1523 } 1524 sc->sc_rxsoft[i].rxs_mbuf = NULL; 1525 } 1526 1527 /* clear interesting stat counters */ 1528 CSR_READ(sc, WMREG_COLC); 1529 CSR_READ(sc, WMREG_RXERRC); 1530 1531 /* get PHY control from SMBus to PCIe */ 1532 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2) 1533 || (sc->sc_type == WM_T_PCH_LPT)) 1534 wm_smbustopci(sc); 1535 1536 /* 1537 * Reset the chip to a known state. 1538 */ 1539 wm_reset(sc); 1540 1541 /* 1542 * Get some information about the EEPROM. 1543 */ 1544 switch (sc->sc_type) { 1545 case WM_T_82542_2_0: 1546 case WM_T_82542_2_1: 1547 case WM_T_82543: 1548 case WM_T_82544: 1549 /* Microwire */ 1550 sc->sc_ee_addrbits = 6; 1551 break; 1552 case WM_T_82540: 1553 case WM_T_82545: 1554 case WM_T_82545_3: 1555 case WM_T_82546: 1556 case WM_T_82546_3: 1557 /* Microwire */ 1558 reg = CSR_READ(sc, WMREG_EECD); 1559 if (reg & EECD_EE_SIZE) 1560 sc->sc_ee_addrbits = 8; 1561 else 1562 sc->sc_ee_addrbits = 6; 1563 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE; 1564 break; 1565 case WM_T_82541: 1566 case WM_T_82541_2: 1567 case WM_T_82547: 1568 case WM_T_82547_2: 1569 reg = CSR_READ(sc, WMREG_EECD); 1570 if (reg & EECD_EE_TYPE) { 1571 /* SPI */ 1572 wm_set_spiaddrbits(sc); 1573 } else 1574 /* Microwire */ 1575 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6; 1576 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE; 1577 break; 1578 case WM_T_82571: 1579 case WM_T_82572: 1580 /* SPI */ 1581 wm_set_spiaddrbits(sc); 1582 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE; 1583 break; 1584 case WM_T_82573: 1585 case WM_T_82574: 1586 case WM_T_82583: 1587 if (wm_is_onboard_nvm_eeprom(sc) == 0) 1588 sc->sc_flags |= WM_F_EEPROM_FLASH; 1589 else { 1590 /* SPI */ 1591 wm_set_spiaddrbits(sc); 1592 } 1593 sc->sc_flags |= WM_F_EEPROM_EERDEEWR; 1594 break; 1595 case WM_T_82575: 1596 case WM_T_82576: 1597 case WM_T_82580: 1598 case WM_T_82580ER: 1599 case WM_T_I350: 1600 case WM_T_80003: 1601 /* SPI */ 1602 wm_set_spiaddrbits(sc); 1603 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC; 1604 break; 1605 case WM_T_ICH8: 1606 case WM_T_ICH9: 1607 case WM_T_ICH10: 1608 case WM_T_PCH: 1609 case WM_T_PCH2: 1610 case WM_T_PCH_LPT: 1611 /* FLASH */ 1612 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC; 1613 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH); 1614 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0, 1615 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) { 1616 aprint_error_dev(sc->sc_dev, 1617 "can't map FLASH registers\n"); 1618 return; 1619 } 1620 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG); 1621 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) * 1622 ICH_FLASH_SECTOR_SIZE; 1623 sc->sc_ich8_flash_bank_size = 1624 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1; 1625 sc->sc_ich8_flash_bank_size -= 1626 (reg & ICH_GFPREG_BASE_MASK); 1627 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE; 1628 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t); 1629 break; 1630 case WM_T_I210: 1631 case WM_T_I211: 1632 sc->sc_flags |= WM_F_EEPROM_FLASH_HW; 1633 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC; 1634 break; 1635 default: 1636 break; 1637 } 1638 1639 /* 1640 * Defer printing the EEPROM type until after verifying the checksum 1641 * This allows the EEPROM type to be printed correctly in the case 1642 * that no EEPROM is attached. 1643 */ 1644 /* 1645 * Validate the EEPROM checksum. If the checksum fails, flag 1646 * this for later, so we can fail future reads from the EEPROM. 1647 */ 1648 if (wm_validate_eeprom_checksum(sc)) { 1649 /* 1650 * Read twice again because some PCI-e parts fail the 1651 * first check due to the link being in sleep state. 1652 */ 1653 if (wm_validate_eeprom_checksum(sc)) 1654 sc->sc_flags |= WM_F_EEPROM_INVALID; 1655 } 1656 1657 /* Set device properties (macflags) */ 1658 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags); 1659 1660 if (sc->sc_flags & WM_F_EEPROM_INVALID) 1661 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n"); 1662 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) { 1663 aprint_verbose_dev(sc->sc_dev, "FLASH(HW)\n"); 1664 } else if (sc->sc_flags & WM_F_EEPROM_FLASH) { 1665 aprint_verbose_dev(sc->sc_dev, "FLASH\n"); 1666 } else { 1667 if (sc->sc_flags & WM_F_EEPROM_SPI) 1668 eetype = "SPI"; 1669 else 1670 eetype = "MicroWire"; 1671 aprint_verbose_dev(sc->sc_dev, 1672 "%u word (%d address bits) %s EEPROM\n", 1673 1U << sc->sc_ee_addrbits, 1674 sc->sc_ee_addrbits, eetype); 1675 } 1676 1677 switch (sc->sc_type) { 1678 case WM_T_82571: 1679 case WM_T_82572: 1680 case WM_T_82573: 1681 case WM_T_82574: 1682 case WM_T_82583: 1683 case WM_T_80003: 1684 case WM_T_ICH8: 1685 case WM_T_ICH9: 1686 case WM_T_ICH10: 1687 case WM_T_PCH: 1688 case WM_T_PCH2: 1689 case WM_T_PCH_LPT: 1690 if (wm_check_mng_mode(sc) != 0) { 1691 printf ("get hw control (1)\n"); 1692 wm_get_hw_control(sc); 1693 } 1694 break; 1695 default: 1696 break; 1697 } 1698 wm_get_wakeup(sc); 1699 /* 1700 * Read the Ethernet address from the EEPROM, if not first found 1701 * in device properties. 1702 */ 1703 ea = prop_dictionary_get(dict, "mac-address"); 1704 if (ea != NULL) { 1705 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 1706 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 1707 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN); 1708 } else { 1709 if (wm_read_mac_addr(sc, enaddr) != 0) { 1710 aprint_error_dev(sc->sc_dev, 1711 "unable to read Ethernet address\n"); 1712 return; 1713 } 1714 } 1715 1716 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 1717 ether_sprintf(enaddr)); 1718 1719 /* 1720 * Read the config info from the EEPROM, and set up various 1721 * bits in the control registers based on their contents. 1722 */ 1723 pn = prop_dictionary_get(dict, "i82543-cfg1"); 1724 if (pn != NULL) { 1725 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1726 cfg1 = (uint16_t) prop_number_integer_value(pn); 1727 } else { 1728 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) { 1729 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n"); 1730 return; 1731 } 1732 } 1733 1734 pn = prop_dictionary_get(dict, "i82543-cfg2"); 1735 if (pn != NULL) { 1736 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1737 cfg2 = (uint16_t) prop_number_integer_value(pn); 1738 } else { 1739 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) { 1740 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n"); 1741 return; 1742 } 1743 } 1744 1745 /* check for WM_F_WOL */ 1746 switch (sc->sc_type) { 1747 case WM_T_82542_2_0: 1748 case WM_T_82542_2_1: 1749 case WM_T_82543: 1750 /* dummy? */ 1751 eeprom_data = 0; 1752 apme_mask = EEPROM_CFG3_APME; 1753 break; 1754 case WM_T_82544: 1755 apme_mask = EEPROM_CFG2_82544_APM_EN; 1756 eeprom_data = cfg2; 1757 break; 1758 case WM_T_82546: 1759 case WM_T_82546_3: 1760 case WM_T_82571: 1761 case WM_T_82572: 1762 case WM_T_82573: 1763 case WM_T_82574: 1764 case WM_T_82583: 1765 case WM_T_80003: 1766 default: 1767 apme_mask = EEPROM_CFG3_APME; 1768 wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB 1769 : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data); 1770 break; 1771 case WM_T_82575: 1772 case WM_T_82576: 1773 case WM_T_82580: 1774 case WM_T_82580ER: 1775 case WM_T_I350: 1776 case WM_T_ICH8: 1777 case WM_T_ICH9: 1778 case WM_T_ICH10: 1779 case WM_T_PCH: 1780 case WM_T_PCH2: 1781 case WM_T_PCH_LPT: 1782 /* XXX The funcid should be checked on some devices */ 1783 apme_mask = WUC_APME; 1784 eeprom_data = CSR_READ(sc, WMREG_WUC); 1785 break; 1786 } 1787 1788 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */ 1789 if ((eeprom_data & apme_mask) != 0) 1790 sc->sc_flags |= WM_F_WOL; 1791 #ifdef WM_DEBUG 1792 if ((sc->sc_flags & WM_F_WOL) != 0) 1793 printf("WOL\n"); 1794 #endif 1795 1796 /* 1797 * XXX need special handling for some multiple port cards 1798 * to disable a paticular port. 1799 */ 1800 1801 if (sc->sc_type >= WM_T_82544) { 1802 pn = prop_dictionary_get(dict, "i82543-swdpin"); 1803 if (pn != NULL) { 1804 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1805 swdpin = (uint16_t) prop_number_integer_value(pn); 1806 } else { 1807 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) { 1808 aprint_error_dev(sc->sc_dev, 1809 "unable to read SWDPIN\n"); 1810 return; 1811 } 1812 } 1813 } 1814 1815 if (cfg1 & EEPROM_CFG1_ILOS) 1816 sc->sc_ctrl |= CTRL_ILOS; 1817 if (sc->sc_type >= WM_T_82544) { 1818 sc->sc_ctrl |= 1819 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) << 1820 CTRL_SWDPIO_SHIFT; 1821 sc->sc_ctrl |= 1822 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) << 1823 CTRL_SWDPINS_SHIFT; 1824 } else { 1825 sc->sc_ctrl |= 1826 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) << 1827 CTRL_SWDPIO_SHIFT; 1828 } 1829 1830 #if 0 1831 if (sc->sc_type >= WM_T_82544) { 1832 if (cfg1 & EEPROM_CFG1_IPS0) 1833 sc->sc_ctrl_ext |= CTRL_EXT_IPS; 1834 if (cfg1 & EEPROM_CFG1_IPS1) 1835 sc->sc_ctrl_ext |= CTRL_EXT_IPS1; 1836 sc->sc_ctrl_ext |= 1837 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) << 1838 CTRL_EXT_SWDPIO_SHIFT; 1839 sc->sc_ctrl_ext |= 1840 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) << 1841 CTRL_EXT_SWDPINS_SHIFT; 1842 } else { 1843 sc->sc_ctrl_ext |= 1844 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) << 1845 CTRL_EXT_SWDPIO_SHIFT; 1846 } 1847 #endif 1848 1849 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 1850 #if 0 1851 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 1852 #endif 1853 1854 /* 1855 * Set up some register offsets that are different between 1856 * the i82542 and the i82543 and later chips. 1857 */ 1858 if (sc->sc_type < WM_T_82543) { 1859 sc->sc_rdt_reg = WMREG_OLD_RDT0; 1860 sc->sc_tdt_reg = WMREG_OLD_TDT; 1861 } else { 1862 sc->sc_rdt_reg = WMREG_RDT; 1863 sc->sc_tdt_reg = WMREG_TDT; 1864 } 1865 1866 if (sc->sc_type == WM_T_PCH) { 1867 uint16_t val; 1868 1869 /* Save the NVM K1 bit setting */ 1870 wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val); 1871 1872 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0) 1873 sc->sc_nvm_k1_enabled = 1; 1874 else 1875 sc->sc_nvm_k1_enabled = 0; 1876 } 1877 1878 /* 1879 * Determine if we're TBI,GMII or SGMII mode, and initialize the 1880 * media structures accordingly. 1881 */ 1882 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9 1883 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH 1884 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT 1885 || sc->sc_type == WM_T_82573 1886 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) { 1887 /* STATUS_TBIMODE reserved/reused, can't rely on it */ 1888 wm_gmii_mediainit(sc, wmp->wmp_product); 1889 } else if (sc->sc_type < WM_T_82543 || 1890 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) { 1891 if (wmp->wmp_flags & WMP_F_1000T) 1892 aprint_error_dev(sc->sc_dev, 1893 "WARNING: TBIMODE set on 1000BASE-T product!\n"); 1894 wm_tbi_mediainit(sc); 1895 } else { 1896 switch (sc->sc_type) { 1897 case WM_T_82575: 1898 case WM_T_82576: 1899 case WM_T_82580: 1900 case WM_T_82580ER: 1901 case WM_T_I350: 1902 case WM_T_I210: 1903 case WM_T_I211: 1904 reg = CSR_READ(sc, WMREG_CTRL_EXT); 1905 switch (reg & CTRL_EXT_LINK_MODE_MASK) { 1906 case CTRL_EXT_LINK_MODE_SGMII: 1907 aprint_verbose_dev(sc->sc_dev, "SGMII\n"); 1908 sc->sc_flags |= WM_F_SGMII; 1909 CSR_WRITE(sc, WMREG_CTRL_EXT, 1910 reg | CTRL_EXT_I2C_ENA); 1911 wm_gmii_mediainit(sc, wmp->wmp_product); 1912 break; 1913 case CTRL_EXT_LINK_MODE_1000KX: 1914 case CTRL_EXT_LINK_MODE_PCIE_SERDES: 1915 aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n"); 1916 CSR_WRITE(sc, WMREG_CTRL_EXT, 1917 reg | CTRL_EXT_I2C_ENA); 1918 panic("not supported yet\n"); 1919 break; 1920 case CTRL_EXT_LINK_MODE_GMII: 1921 default: 1922 CSR_WRITE(sc, WMREG_CTRL_EXT, 1923 reg & ~CTRL_EXT_I2C_ENA); 1924 wm_gmii_mediainit(sc, wmp->wmp_product); 1925 break; 1926 } 1927 break; 1928 default: 1929 if (wmp->wmp_flags & WMP_F_1000X) 1930 aprint_error_dev(sc->sc_dev, 1931 "WARNING: TBIMODE clear on 1000BASE-X product!\n"); 1932 wm_gmii_mediainit(sc, wmp->wmp_product); 1933 } 1934 } 1935 1936 ifp = &sc->sc_ethercom.ec_if; 1937 xname = device_xname(sc->sc_dev); 1938 strlcpy(ifp->if_xname, xname, IFNAMSIZ); 1939 ifp->if_softc = sc; 1940 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1941 ifp->if_ioctl = wm_ioctl; 1942 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 1943 ifp->if_start = wm_nq_start; 1944 else 1945 ifp->if_start = wm_start; 1946 ifp->if_watchdog = wm_watchdog; 1947 ifp->if_init = wm_init; 1948 ifp->if_stop = wm_stop; 1949 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN)); 1950 IFQ_SET_READY(&ifp->if_snd); 1951 1952 /* Check for jumbo frame */ 1953 switch (sc->sc_type) { 1954 case WM_T_82573: 1955 /* XXX limited to 9234 if ASPM is disabled */ 1956 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3); 1957 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0) 1958 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1959 break; 1960 case WM_T_82571: 1961 case WM_T_82572: 1962 case WM_T_82574: 1963 case WM_T_82575: 1964 case WM_T_82576: 1965 case WM_T_82580: 1966 case WM_T_82580ER: 1967 case WM_T_I350: 1968 case WM_T_I210: 1969 case WM_T_I211: 1970 case WM_T_80003: 1971 case WM_T_ICH9: 1972 case WM_T_ICH10: 1973 case WM_T_PCH2: /* PCH2 supports 9K frame size */ 1974 case WM_T_PCH_LPT: 1975 /* XXX limited to 9234 */ 1976 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1977 break; 1978 case WM_T_PCH: 1979 /* XXX limited to 4096 */ 1980 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1981 break; 1982 case WM_T_82542_2_0: 1983 case WM_T_82542_2_1: 1984 case WM_T_82583: 1985 case WM_T_ICH8: 1986 /* No support for jumbo frame */ 1987 break; 1988 default: 1989 /* ETHER_MAX_LEN_JUMBO */ 1990 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1991 break; 1992 } 1993 1994 /* 1995 * If we're a i82543 or greater, we can support VLANs. 1996 */ 1997 if (sc->sc_type >= WM_T_82543) 1998 sc->sc_ethercom.ec_capabilities |= 1999 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING; 2000 2001 /* 2002 * We can perform TCPv4 and UDPv4 checkums in-bound. Only 2003 * on i82543 and later. 2004 */ 2005 if (sc->sc_type >= WM_T_82543) { 2006 ifp->if_capabilities |= 2007 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 2008 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 2009 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx | 2010 IFCAP_CSUM_TCPv6_Tx | 2011 IFCAP_CSUM_UDPv6_Tx; 2012 } 2013 2014 /* 2015 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL. 2016 * 2017 * 82541GI (8086:1076) ... no 2018 * 82572EI (8086:10b9) ... yes 2019 */ 2020 if (sc->sc_type >= WM_T_82571) { 2021 ifp->if_capabilities |= 2022 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 2023 } 2024 2025 /* 2026 * If we're a i82544 or greater (except i82547), we can do 2027 * TCP segmentation offload. 2028 */ 2029 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) { 2030 ifp->if_capabilities |= IFCAP_TSOv4; 2031 } 2032 2033 if (sc->sc_type >= WM_T_82571) { 2034 ifp->if_capabilities |= IFCAP_TSOv6; 2035 } 2036 2037 /* 2038 * Attach the interface. 2039 */ 2040 if_attach(ifp); 2041 ether_ifattach(ifp, enaddr); 2042 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb); 2043 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0); 2044 2045 #ifdef WM_EVENT_COUNTERS 2046 /* Attach event counters. */ 2047 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC, 2048 NULL, xname, "txsstall"); 2049 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC, 2050 NULL, xname, "txdstall"); 2051 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC, 2052 NULL, xname, "txfifo_stall"); 2053 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR, 2054 NULL, xname, "txdw"); 2055 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR, 2056 NULL, xname, "txqe"); 2057 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR, 2058 NULL, xname, "rxintr"); 2059 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR, 2060 NULL, xname, "linkintr"); 2061 2062 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC, 2063 NULL, xname, "rxipsum"); 2064 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC, 2065 NULL, xname, "rxtusum"); 2066 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC, 2067 NULL, xname, "txipsum"); 2068 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC, 2069 NULL, xname, "txtusum"); 2070 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC, 2071 NULL, xname, "txtusum6"); 2072 2073 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC, 2074 NULL, xname, "txtso"); 2075 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC, 2076 NULL, xname, "txtso6"); 2077 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC, 2078 NULL, xname, "txtsopain"); 2079 2080 for (i = 0; i < WM_NTXSEGS; i++) { 2081 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i); 2082 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC, 2083 NULL, xname, wm_txseg_evcnt_names[i]); 2084 } 2085 2086 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC, 2087 NULL, xname, "txdrop"); 2088 2089 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC, 2090 NULL, xname, "tu"); 2091 2092 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC, 2093 NULL, xname, "tx_xoff"); 2094 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC, 2095 NULL, xname, "tx_xon"); 2096 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC, 2097 NULL, xname, "rx_xoff"); 2098 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC, 2099 NULL, xname, "rx_xon"); 2100 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC, 2101 NULL, xname, "rx_macctl"); 2102 #endif /* WM_EVENT_COUNTERS */ 2103 2104 if (pmf_device_register(self, wm_suspend, wm_resume)) 2105 pmf_class_network_register(self, ifp); 2106 else 2107 aprint_error_dev(self, "couldn't establish power handler\n"); 2108 2109 return; 2110 2111 /* 2112 * Free any resources we've allocated during the failed attach 2113 * attempt. Do this in reverse order and fall through. 2114 */ 2115 fail_5: 2116 for (i = 0; i < WM_NRXDESC; i++) { 2117 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 2118 bus_dmamap_destroy(sc->sc_dmat, 2119 sc->sc_rxsoft[i].rxs_dmamap); 2120 } 2121 fail_4: 2122 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 2123 if (sc->sc_txsoft[i].txs_dmamap != NULL) 2124 bus_dmamap_destroy(sc->sc_dmat, 2125 sc->sc_txsoft[i].txs_dmamap); 2126 } 2127 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 2128 fail_3: 2129 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 2130 fail_2: 2131 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 2132 sc->sc_cd_size); 2133 fail_1: 2134 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg); 2135 fail_0: 2136 return; 2137 } 2138 2139 static int 2140 wm_detach(device_t self, int flags __unused) 2141 { 2142 struct wm_softc *sc = device_private(self); 2143 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2144 int i, s; 2145 2146 s = splnet(); 2147 /* Stop the interface. Callouts are stopped in it. */ 2148 wm_stop(ifp, 1); 2149 splx(s); 2150 2151 pmf_device_deregister(self); 2152 2153 /* Tell the firmware about the release */ 2154 wm_release_manageability(sc); 2155 wm_release_hw_control(sc); 2156 2157 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 2158 2159 /* Delete all remaining media. */ 2160 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 2161 2162 ether_ifdetach(ifp); 2163 if_detach(ifp); 2164 2165 2166 /* Unload RX dmamaps and free mbufs */ 2167 wm_rxdrain(sc); 2168 2169 /* Free dmamap. It's the same as the end of the wm_attach() function */ 2170 for (i = 0; i < WM_NRXDESC; i++) { 2171 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 2172 bus_dmamap_destroy(sc->sc_dmat, 2173 sc->sc_rxsoft[i].rxs_dmamap); 2174 } 2175 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 2176 if (sc->sc_txsoft[i].txs_dmamap != NULL) 2177 bus_dmamap_destroy(sc->sc_dmat, 2178 sc->sc_txsoft[i].txs_dmamap); 2179 } 2180 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 2181 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 2182 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 2183 sc->sc_cd_size); 2184 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg); 2185 2186 /* Disestablish the interrupt handler */ 2187 if (sc->sc_ih != NULL) { 2188 pci_intr_disestablish(sc->sc_pc, sc->sc_ih); 2189 sc->sc_ih = NULL; 2190 } 2191 2192 /* Unmap the registers */ 2193 if (sc->sc_ss) { 2194 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss); 2195 sc->sc_ss = 0; 2196 } 2197 2198 if (sc->sc_ios) { 2199 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios); 2200 sc->sc_ios = 0; 2201 } 2202 2203 return 0; 2204 } 2205 2206 /* 2207 * wm_tx_offload: 2208 * 2209 * Set up TCP/IP checksumming parameters for the 2210 * specified packet. 2211 */ 2212 static int 2213 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp, 2214 uint8_t *fieldsp) 2215 { 2216 struct mbuf *m0 = txs->txs_mbuf; 2217 struct livengood_tcpip_ctxdesc *t; 2218 uint32_t ipcs, tucs, cmd, cmdlen, seg; 2219 uint32_t ipcse; 2220 struct ether_header *eh; 2221 int offset, iphl; 2222 uint8_t fields; 2223 2224 /* 2225 * XXX It would be nice if the mbuf pkthdr had offset 2226 * fields for the protocol headers. 2227 */ 2228 2229 eh = mtod(m0, struct ether_header *); 2230 switch (htons(eh->ether_type)) { 2231 case ETHERTYPE_IP: 2232 case ETHERTYPE_IPV6: 2233 offset = ETHER_HDR_LEN; 2234 break; 2235 2236 case ETHERTYPE_VLAN: 2237 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2238 break; 2239 2240 default: 2241 /* 2242 * Don't support this protocol or encapsulation. 2243 */ 2244 *fieldsp = 0; 2245 *cmdp = 0; 2246 return 0; 2247 } 2248 2249 if ((m0->m_pkthdr.csum_flags & 2250 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) { 2251 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 2252 } else { 2253 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data); 2254 } 2255 ipcse = offset + iphl - 1; 2256 2257 cmd = WTX_CMD_DEXT | WTX_DTYP_D; 2258 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE; 2259 seg = 0; 2260 fields = 0; 2261 2262 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) { 2263 int hlen = offset + iphl; 2264 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 2265 2266 if (__predict_false(m0->m_len < 2267 (hlen + sizeof(struct tcphdr)))) { 2268 /* 2269 * TCP/IP headers are not in the first mbuf; we need 2270 * to do this the slow and painful way. Let's just 2271 * hope this doesn't happen very often. 2272 */ 2273 struct tcphdr th; 2274 2275 WM_EVCNT_INCR(&sc->sc_ev_txtsopain); 2276 2277 m_copydata(m0, hlen, sizeof(th), &th); 2278 if (v4) { 2279 struct ip ip; 2280 2281 m_copydata(m0, offset, sizeof(ip), &ip); 2282 ip.ip_len = 0; 2283 m_copyback(m0, 2284 offset + offsetof(struct ip, ip_len), 2285 sizeof(ip.ip_len), &ip.ip_len); 2286 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 2287 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 2288 } else { 2289 struct ip6_hdr ip6; 2290 2291 m_copydata(m0, offset, sizeof(ip6), &ip6); 2292 ip6.ip6_plen = 0; 2293 m_copyback(m0, 2294 offset + offsetof(struct ip6_hdr, ip6_plen), 2295 sizeof(ip6.ip6_plen), &ip6.ip6_plen); 2296 th.th_sum = in6_cksum_phdr(&ip6.ip6_src, 2297 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP)); 2298 } 2299 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 2300 sizeof(th.th_sum), &th.th_sum); 2301 2302 hlen += th.th_off << 2; 2303 } else { 2304 /* 2305 * TCP/IP headers are in the first mbuf; we can do 2306 * this the easy way. 2307 */ 2308 struct tcphdr *th; 2309 2310 if (v4) { 2311 struct ip *ip = 2312 (void *)(mtod(m0, char *) + offset); 2313 th = (void *)(mtod(m0, char *) + hlen); 2314 2315 ip->ip_len = 0; 2316 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 2317 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 2318 } else { 2319 struct ip6_hdr *ip6 = 2320 (void *)(mtod(m0, char *) + offset); 2321 th = (void *)(mtod(m0, char *) + hlen); 2322 2323 ip6->ip6_plen = 0; 2324 th->th_sum = in6_cksum_phdr(&ip6->ip6_src, 2325 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); 2326 } 2327 hlen += th->th_off << 2; 2328 } 2329 2330 if (v4) { 2331 WM_EVCNT_INCR(&sc->sc_ev_txtso); 2332 cmdlen |= WTX_TCPIP_CMD_IP; 2333 } else { 2334 WM_EVCNT_INCR(&sc->sc_ev_txtso6); 2335 ipcse = 0; 2336 } 2337 cmd |= WTX_TCPIP_CMD_TSE; 2338 cmdlen |= WTX_TCPIP_CMD_TSE | 2339 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen); 2340 seg = WTX_TCPIP_SEG_HDRLEN(hlen) | 2341 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz); 2342 } 2343 2344 /* 2345 * NOTE: Even if we're not using the IP or TCP/UDP checksum 2346 * offload feature, if we load the context descriptor, we 2347 * MUST provide valid values for IPCSS and TUCSS fields. 2348 */ 2349 2350 ipcs = WTX_TCPIP_IPCSS(offset) | 2351 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) | 2352 WTX_TCPIP_IPCSE(ipcse); 2353 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) { 2354 WM_EVCNT_INCR(&sc->sc_ev_txipsum); 2355 fields |= WTX_IXSM; 2356 } 2357 2358 offset += iphl; 2359 2360 if (m0->m_pkthdr.csum_flags & 2361 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) { 2362 WM_EVCNT_INCR(&sc->sc_ev_txtusum); 2363 fields |= WTX_TXSM; 2364 tucs = WTX_TCPIP_TUCSS(offset) | 2365 WTX_TCPIP_TUCSO(offset + 2366 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) | 2367 WTX_TCPIP_TUCSE(0) /* rest of packet */; 2368 } else if ((m0->m_pkthdr.csum_flags & 2369 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) { 2370 WM_EVCNT_INCR(&sc->sc_ev_txtusum6); 2371 fields |= WTX_TXSM; 2372 tucs = WTX_TCPIP_TUCSS(offset) | 2373 WTX_TCPIP_TUCSO(offset + 2374 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) | 2375 WTX_TCPIP_TUCSE(0) /* rest of packet */; 2376 } else { 2377 /* Just initialize it to a valid TCP context. */ 2378 tucs = WTX_TCPIP_TUCSS(offset) | 2379 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) | 2380 WTX_TCPIP_TUCSE(0) /* rest of packet */; 2381 } 2382 2383 /* Fill in the context descriptor. */ 2384 t = (struct livengood_tcpip_ctxdesc *) 2385 &sc->sc_txdescs[sc->sc_txnext]; 2386 t->tcpip_ipcs = htole32(ipcs); 2387 t->tcpip_tucs = htole32(tucs); 2388 t->tcpip_cmdlen = htole32(cmdlen); 2389 t->tcpip_seg = htole32(seg); 2390 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE); 2391 2392 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext); 2393 txs->txs_ndesc++; 2394 2395 *cmdp = cmd; 2396 *fieldsp = fields; 2397 2398 return 0; 2399 } 2400 2401 static void 2402 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0) 2403 { 2404 struct mbuf *m; 2405 int i; 2406 2407 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev)); 2408 for (m = m0, i = 0; m != NULL; m = m->m_next, i++) 2409 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, " 2410 "m_flags = 0x%08x\n", device_xname(sc->sc_dev), 2411 m->m_data, m->m_len, m->m_flags); 2412 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev), 2413 i, i == 1 ? "" : "s"); 2414 } 2415 2416 /* 2417 * wm_82547_txfifo_stall: 2418 * 2419 * Callout used to wait for the 82547 Tx FIFO to drain, 2420 * reset the FIFO pointers, and restart packet transmission. 2421 */ 2422 static void 2423 wm_82547_txfifo_stall(void *arg) 2424 { 2425 struct wm_softc *sc = arg; 2426 int s; 2427 2428 s = splnet(); 2429 2430 if (sc->sc_txfifo_stall) { 2431 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) && 2432 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) && 2433 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) { 2434 /* 2435 * Packets have drained. Stop transmitter, reset 2436 * FIFO pointers, restart transmitter, and kick 2437 * the packet queue. 2438 */ 2439 uint32_t tctl = CSR_READ(sc, WMREG_TCTL); 2440 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN); 2441 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr); 2442 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr); 2443 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr); 2444 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr); 2445 CSR_WRITE(sc, WMREG_TCTL, tctl); 2446 CSR_WRITE_FLUSH(sc); 2447 2448 sc->sc_txfifo_head = 0; 2449 sc->sc_txfifo_stall = 0; 2450 wm_start(&sc->sc_ethercom.ec_if); 2451 } else { 2452 /* 2453 * Still waiting for packets to drain; try again in 2454 * another tick. 2455 */ 2456 callout_schedule(&sc->sc_txfifo_ch, 1); 2457 } 2458 } 2459 2460 splx(s); 2461 } 2462 2463 static void 2464 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on) 2465 { 2466 uint32_t reg; 2467 2468 reg = CSR_READ(sc, WMREG_EXTCNFCTR); 2469 2470 if (on != 0) 2471 reg |= EXTCNFCTR_GATE_PHY_CFG; 2472 else 2473 reg &= ~EXTCNFCTR_GATE_PHY_CFG; 2474 2475 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg); 2476 } 2477 2478 /* 2479 * wm_82547_txfifo_bugchk: 2480 * 2481 * Check for bug condition in the 82547 Tx FIFO. We need to 2482 * prevent enqueueing a packet that would wrap around the end 2483 * if the Tx FIFO ring buffer, otherwise the chip will croak. 2484 * 2485 * We do this by checking the amount of space before the end 2486 * of the Tx FIFO buffer. If the packet will not fit, we "stall" 2487 * the Tx FIFO, wait for all remaining packets to drain, reset 2488 * the internal FIFO pointers to the beginning, and restart 2489 * transmission on the interface. 2490 */ 2491 #define WM_FIFO_HDR 0x10 2492 #define WM_82547_PAD_LEN 0x3e0 2493 static int 2494 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0) 2495 { 2496 int space = sc->sc_txfifo_size - sc->sc_txfifo_head; 2497 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR); 2498 2499 /* Just return if already stalled. */ 2500 if (sc->sc_txfifo_stall) 2501 return 1; 2502 2503 if (sc->sc_mii.mii_media_active & IFM_FDX) { 2504 /* Stall only occurs in half-duplex mode. */ 2505 goto send_packet; 2506 } 2507 2508 if (len >= WM_82547_PAD_LEN + space) { 2509 sc->sc_txfifo_stall = 1; 2510 callout_schedule(&sc->sc_txfifo_ch, 1); 2511 return 1; 2512 } 2513 2514 send_packet: 2515 sc->sc_txfifo_head += len; 2516 if (sc->sc_txfifo_head >= sc->sc_txfifo_size) 2517 sc->sc_txfifo_head -= sc->sc_txfifo_size; 2518 2519 return 0; 2520 } 2521 2522 /* 2523 * wm_start: [ifnet interface function] 2524 * 2525 * Start packet transmission on the interface. 2526 */ 2527 static void 2528 wm_start(struct ifnet *ifp) 2529 { 2530 struct wm_softc *sc = ifp->if_softc; 2531 struct mbuf *m0; 2532 struct m_tag *mtag; 2533 struct wm_txsoft *txs; 2534 bus_dmamap_t dmamap; 2535 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso; 2536 bus_addr_t curaddr; 2537 bus_size_t seglen, curlen; 2538 uint32_t cksumcmd; 2539 uint8_t cksumfields; 2540 2541 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 2542 return; 2543 2544 /* 2545 * Remember the previous number of free descriptors. 2546 */ 2547 ofree = sc->sc_txfree; 2548 2549 /* 2550 * Loop through the send queue, setting up transmit descriptors 2551 * until we drain the queue, or use up all available transmit 2552 * descriptors. 2553 */ 2554 for (;;) { 2555 /* Grab a packet off the queue. */ 2556 IFQ_POLL(&ifp->if_snd, m0); 2557 if (m0 == NULL) 2558 break; 2559 2560 DPRINTF(WM_DEBUG_TX, 2561 ("%s: TX: have packet to transmit: %p\n", 2562 device_xname(sc->sc_dev), m0)); 2563 2564 /* Get a work queue entry. */ 2565 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) { 2566 wm_txintr(sc); 2567 if (sc->sc_txsfree == 0) { 2568 DPRINTF(WM_DEBUG_TX, 2569 ("%s: TX: no free job descriptors\n", 2570 device_xname(sc->sc_dev))); 2571 WM_EVCNT_INCR(&sc->sc_ev_txsstall); 2572 break; 2573 } 2574 } 2575 2576 txs = &sc->sc_txsoft[sc->sc_txsnext]; 2577 dmamap = txs->txs_dmamap; 2578 2579 use_tso = (m0->m_pkthdr.csum_flags & 2580 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0; 2581 2582 /* 2583 * So says the Linux driver: 2584 * The controller does a simple calculation to make sure 2585 * there is enough room in the FIFO before initiating the 2586 * DMA for each buffer. The calc is: 2587 * 4 = ceil(buffer len / MSS) 2588 * To make sure we don't overrun the FIFO, adjust the max 2589 * buffer len if the MSS drops. 2590 */ 2591 dmamap->dm_maxsegsz = 2592 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN) 2593 ? m0->m_pkthdr.segsz << 2 2594 : WTX_MAX_LEN; 2595 2596 /* 2597 * Load the DMA map. If this fails, the packet either 2598 * didn't fit in the allotted number of segments, or we 2599 * were short on resources. For the too-many-segments 2600 * case, we simply report an error and drop the packet, 2601 * since we can't sanely copy a jumbo packet to a single 2602 * buffer. 2603 */ 2604 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 2605 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 2606 if (error) { 2607 if (error == EFBIG) { 2608 WM_EVCNT_INCR(&sc->sc_ev_txdrop); 2609 log(LOG_ERR, "%s: Tx packet consumes too many " 2610 "DMA segments, dropping...\n", 2611 device_xname(sc->sc_dev)); 2612 IFQ_DEQUEUE(&ifp->if_snd, m0); 2613 wm_dump_mbuf_chain(sc, m0); 2614 m_freem(m0); 2615 continue; 2616 } 2617 /* 2618 * Short on resources, just stop for now. 2619 */ 2620 DPRINTF(WM_DEBUG_TX, 2621 ("%s: TX: dmamap load failed: %d\n", 2622 device_xname(sc->sc_dev), error)); 2623 break; 2624 } 2625 2626 segs_needed = dmamap->dm_nsegs; 2627 if (use_tso) { 2628 /* For sentinel descriptor; see below. */ 2629 segs_needed++; 2630 } 2631 2632 /* 2633 * Ensure we have enough descriptors free to describe 2634 * the packet. Note, we always reserve one descriptor 2635 * at the end of the ring due to the semantics of the 2636 * TDT register, plus one more in the event we need 2637 * to load offload context. 2638 */ 2639 if (segs_needed > sc->sc_txfree - 2) { 2640 /* 2641 * Not enough free descriptors to transmit this 2642 * packet. We haven't committed anything yet, 2643 * so just unload the DMA map, put the packet 2644 * pack on the queue, and punt. Notify the upper 2645 * layer that there are no more slots left. 2646 */ 2647 DPRINTF(WM_DEBUG_TX, 2648 ("%s: TX: need %d (%d) descriptors, have %d\n", 2649 device_xname(sc->sc_dev), dmamap->dm_nsegs, 2650 segs_needed, sc->sc_txfree - 1)); 2651 ifp->if_flags |= IFF_OACTIVE; 2652 bus_dmamap_unload(sc->sc_dmat, dmamap); 2653 WM_EVCNT_INCR(&sc->sc_ev_txdstall); 2654 break; 2655 } 2656 2657 /* 2658 * Check for 82547 Tx FIFO bug. We need to do this 2659 * once we know we can transmit the packet, since we 2660 * do some internal FIFO space accounting here. 2661 */ 2662 if (sc->sc_type == WM_T_82547 && 2663 wm_82547_txfifo_bugchk(sc, m0)) { 2664 DPRINTF(WM_DEBUG_TX, 2665 ("%s: TX: 82547 Tx FIFO bug detected\n", 2666 device_xname(sc->sc_dev))); 2667 ifp->if_flags |= IFF_OACTIVE; 2668 bus_dmamap_unload(sc->sc_dmat, dmamap); 2669 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall); 2670 break; 2671 } 2672 2673 IFQ_DEQUEUE(&ifp->if_snd, m0); 2674 2675 /* 2676 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 2677 */ 2678 2679 DPRINTF(WM_DEBUG_TX, 2680 ("%s: TX: packet has %d (%d) DMA segments\n", 2681 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed)); 2682 2683 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]); 2684 2685 /* 2686 * Store a pointer to the packet so that we can free it 2687 * later. 2688 * 2689 * Initially, we consider the number of descriptors the 2690 * packet uses the number of DMA segments. This may be 2691 * incremented by 1 if we do checksum offload (a descriptor 2692 * is used to set the checksum context). 2693 */ 2694 txs->txs_mbuf = m0; 2695 txs->txs_firstdesc = sc->sc_txnext; 2696 txs->txs_ndesc = segs_needed; 2697 2698 /* Set up offload parameters for this packet. */ 2699 if (m0->m_pkthdr.csum_flags & 2700 (M_CSUM_TSOv4|M_CSUM_TSOv6| 2701 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4| 2702 M_CSUM_TCPv6|M_CSUM_UDPv6)) { 2703 if (wm_tx_offload(sc, txs, &cksumcmd, 2704 &cksumfields) != 0) { 2705 /* Error message already displayed. */ 2706 bus_dmamap_unload(sc->sc_dmat, dmamap); 2707 continue; 2708 } 2709 } else { 2710 cksumcmd = 0; 2711 cksumfields = 0; 2712 } 2713 2714 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS; 2715 2716 /* Sync the DMA map. */ 2717 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 2718 BUS_DMASYNC_PREWRITE); 2719 2720 /* 2721 * Initialize the transmit descriptor. 2722 */ 2723 for (nexttx = sc->sc_txnext, seg = 0; 2724 seg < dmamap->dm_nsegs; seg++) { 2725 for (seglen = dmamap->dm_segs[seg].ds_len, 2726 curaddr = dmamap->dm_segs[seg].ds_addr; 2727 seglen != 0; 2728 curaddr += curlen, seglen -= curlen, 2729 nexttx = WM_NEXTTX(sc, nexttx)) { 2730 curlen = seglen; 2731 2732 /* 2733 * So says the Linux driver: 2734 * Work around for premature descriptor 2735 * write-backs in TSO mode. Append a 2736 * 4-byte sentinel descriptor. 2737 */ 2738 if (use_tso && 2739 seg == dmamap->dm_nsegs - 1 && 2740 curlen > 8) 2741 curlen -= 4; 2742 2743 wm_set_dma_addr( 2744 &sc->sc_txdescs[nexttx].wtx_addr, 2745 curaddr); 2746 sc->sc_txdescs[nexttx].wtx_cmdlen = 2747 htole32(cksumcmd | curlen); 2748 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 2749 0; 2750 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 2751 cksumfields; 2752 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0; 2753 lasttx = nexttx; 2754 2755 DPRINTF(WM_DEBUG_TX, 2756 ("%s: TX: desc %d: low %#" PRIx64 ", " 2757 "len %#04zx\n", 2758 device_xname(sc->sc_dev), nexttx, 2759 (uint64_t)curaddr, curlen)); 2760 } 2761 } 2762 2763 KASSERT(lasttx != -1); 2764 2765 /* 2766 * Set up the command byte on the last descriptor of 2767 * the packet. If we're in the interrupt delay window, 2768 * delay the interrupt. 2769 */ 2770 sc->sc_txdescs[lasttx].wtx_cmdlen |= 2771 htole32(WTX_CMD_EOP | WTX_CMD_RS); 2772 2773 /* 2774 * If VLANs are enabled and the packet has a VLAN tag, set 2775 * up the descriptor to encapsulate the packet for us. 2776 * 2777 * This is only valid on the last descriptor of the packet. 2778 */ 2779 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) { 2780 sc->sc_txdescs[lasttx].wtx_cmdlen |= 2781 htole32(WTX_CMD_VLE); 2782 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan 2783 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff); 2784 } 2785 2786 txs->txs_lastdesc = lasttx; 2787 2788 DPRINTF(WM_DEBUG_TX, 2789 ("%s: TX: desc %d: cmdlen 0x%08x\n", 2790 device_xname(sc->sc_dev), 2791 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen))); 2792 2793 /* Sync the descriptors we're using. */ 2794 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc, 2795 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2796 2797 /* Give the packet to the chip. */ 2798 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx); 2799 2800 DPRINTF(WM_DEBUG_TX, 2801 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx)); 2802 2803 DPRINTF(WM_DEBUG_TX, 2804 ("%s: TX: finished transmitting packet, job %d\n", 2805 device_xname(sc->sc_dev), sc->sc_txsnext)); 2806 2807 /* Advance the tx pointer. */ 2808 sc->sc_txfree -= txs->txs_ndesc; 2809 sc->sc_txnext = nexttx; 2810 2811 sc->sc_txsfree--; 2812 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext); 2813 2814 /* Pass the packet to any BPF listeners. */ 2815 bpf_mtap(ifp, m0); 2816 } 2817 2818 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) { 2819 /* No more slots; notify upper layer. */ 2820 ifp->if_flags |= IFF_OACTIVE; 2821 } 2822 2823 if (sc->sc_txfree != ofree) { 2824 /* Set a watchdog timer in case the chip flakes out. */ 2825 ifp->if_timer = 5; 2826 } 2827 } 2828 2829 /* 2830 * wm_nq_tx_offload: 2831 * 2832 * Set up TCP/IP checksumming parameters for the 2833 * specified packet, for NEWQUEUE devices 2834 */ 2835 static int 2836 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, 2837 uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum) 2838 { 2839 struct mbuf *m0 = txs->txs_mbuf; 2840 struct m_tag *mtag; 2841 uint32_t vl_len, mssidx, cmdc; 2842 struct ether_header *eh; 2843 int offset, iphl; 2844 2845 /* 2846 * XXX It would be nice if the mbuf pkthdr had offset 2847 * fields for the protocol headers. 2848 */ 2849 *cmdlenp = 0; 2850 *fieldsp = 0; 2851 2852 eh = mtod(m0, struct ether_header *); 2853 switch (htons(eh->ether_type)) { 2854 case ETHERTYPE_IP: 2855 case ETHERTYPE_IPV6: 2856 offset = ETHER_HDR_LEN; 2857 break; 2858 2859 case ETHERTYPE_VLAN: 2860 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2861 break; 2862 2863 default: 2864 /* 2865 * Don't support this protocol or encapsulation. 2866 */ 2867 *do_csum = false; 2868 return 0; 2869 } 2870 *do_csum = true; 2871 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS; 2872 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT; 2873 2874 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT); 2875 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0); 2876 2877 if ((m0->m_pkthdr.csum_flags & 2878 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) { 2879 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 2880 } else { 2881 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data); 2882 } 2883 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT); 2884 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0); 2885 2886 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) { 2887 vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK) 2888 << NQTXC_VLLEN_VLAN_SHIFT); 2889 *cmdlenp |= NQTX_CMD_VLE; 2890 } 2891 2892 mssidx = 0; 2893 2894 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) { 2895 int hlen = offset + iphl; 2896 int tcp_hlen; 2897 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 2898 2899 if (__predict_false(m0->m_len < 2900 (hlen + sizeof(struct tcphdr)))) { 2901 /* 2902 * TCP/IP headers are not in the first mbuf; we need 2903 * to do this the slow and painful way. Let's just 2904 * hope this doesn't happen very often. 2905 */ 2906 struct tcphdr th; 2907 2908 WM_EVCNT_INCR(&sc->sc_ev_txtsopain); 2909 2910 m_copydata(m0, hlen, sizeof(th), &th); 2911 if (v4) { 2912 struct ip ip; 2913 2914 m_copydata(m0, offset, sizeof(ip), &ip); 2915 ip.ip_len = 0; 2916 m_copyback(m0, 2917 offset + offsetof(struct ip, ip_len), 2918 sizeof(ip.ip_len), &ip.ip_len); 2919 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 2920 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 2921 } else { 2922 struct ip6_hdr ip6; 2923 2924 m_copydata(m0, offset, sizeof(ip6), &ip6); 2925 ip6.ip6_plen = 0; 2926 m_copyback(m0, 2927 offset + offsetof(struct ip6_hdr, ip6_plen), 2928 sizeof(ip6.ip6_plen), &ip6.ip6_plen); 2929 th.th_sum = in6_cksum_phdr(&ip6.ip6_src, 2930 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP)); 2931 } 2932 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 2933 sizeof(th.th_sum), &th.th_sum); 2934 2935 tcp_hlen = th.th_off << 2; 2936 } else { 2937 /* 2938 * TCP/IP headers are in the first mbuf; we can do 2939 * this the easy way. 2940 */ 2941 struct tcphdr *th; 2942 2943 if (v4) { 2944 struct ip *ip = 2945 (void *)(mtod(m0, char *) + offset); 2946 th = (void *)(mtod(m0, char *) + hlen); 2947 2948 ip->ip_len = 0; 2949 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 2950 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 2951 } else { 2952 struct ip6_hdr *ip6 = 2953 (void *)(mtod(m0, char *) + offset); 2954 th = (void *)(mtod(m0, char *) + hlen); 2955 2956 ip6->ip6_plen = 0; 2957 th->th_sum = in6_cksum_phdr(&ip6->ip6_src, 2958 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); 2959 } 2960 tcp_hlen = th->th_off << 2; 2961 } 2962 hlen += tcp_hlen; 2963 *cmdlenp |= NQTX_CMD_TSE; 2964 2965 if (v4) { 2966 WM_EVCNT_INCR(&sc->sc_ev_txtso); 2967 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM; 2968 } else { 2969 WM_EVCNT_INCR(&sc->sc_ev_txtso6); 2970 *fieldsp |= NQTXD_FIELDS_TUXSM; 2971 } 2972 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT); 2973 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0); 2974 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT); 2975 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0); 2976 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT); 2977 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0); 2978 } else { 2979 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT); 2980 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0); 2981 } 2982 2983 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) { 2984 *fieldsp |= NQTXD_FIELDS_IXSM; 2985 cmdc |= NQTXC_CMD_IP4; 2986 } 2987 2988 if (m0->m_pkthdr.csum_flags & 2989 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) { 2990 WM_EVCNT_INCR(&sc->sc_ev_txtusum); 2991 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) { 2992 cmdc |= NQTXC_CMD_TCP; 2993 } else { 2994 cmdc |= NQTXC_CMD_UDP; 2995 } 2996 cmdc |= NQTXC_CMD_IP4; 2997 *fieldsp |= NQTXD_FIELDS_TUXSM; 2998 } 2999 if (m0->m_pkthdr.csum_flags & 3000 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) { 3001 WM_EVCNT_INCR(&sc->sc_ev_txtusum6); 3002 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) { 3003 cmdc |= NQTXC_CMD_TCP; 3004 } else { 3005 cmdc |= NQTXC_CMD_UDP; 3006 } 3007 cmdc |= NQTXC_CMD_IP6; 3008 *fieldsp |= NQTXD_FIELDS_TUXSM; 3009 } 3010 3011 /* Fill in the context descriptor. */ 3012 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len = 3013 htole32(vl_len); 3014 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0; 3015 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd = 3016 htole32(cmdc); 3017 sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx = 3018 htole32(mssidx); 3019 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE); 3020 DPRINTF(WM_DEBUG_TX, 3021 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev), 3022 sc->sc_txnext, 0, vl_len)); 3023 DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc)); 3024 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext); 3025 txs->txs_ndesc++; 3026 return 0; 3027 } 3028 3029 /* 3030 * wm_nq_start: [ifnet interface function] 3031 * 3032 * Start packet transmission on the interface for NEWQUEUE devices 3033 */ 3034 static void 3035 wm_nq_start(struct ifnet *ifp) 3036 { 3037 struct wm_softc *sc = ifp->if_softc; 3038 struct mbuf *m0; 3039 struct m_tag *mtag; 3040 struct wm_txsoft *txs; 3041 bus_dmamap_t dmamap; 3042 int error, nexttx, lasttx = -1, seg, segs_needed; 3043 bool do_csum, sent; 3044 3045 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 3046 return; 3047 3048 sent = false; 3049 3050 /* 3051 * Loop through the send queue, setting up transmit descriptors 3052 * until we drain the queue, or use up all available transmit 3053 * descriptors. 3054 */ 3055 for (;;) { 3056 /* Grab a packet off the queue. */ 3057 IFQ_POLL(&ifp->if_snd, m0); 3058 if (m0 == NULL) 3059 break; 3060 3061 DPRINTF(WM_DEBUG_TX, 3062 ("%s: TX: have packet to transmit: %p\n", 3063 device_xname(sc->sc_dev), m0)); 3064 3065 /* Get a work queue entry. */ 3066 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) { 3067 wm_txintr(sc); 3068 if (sc->sc_txsfree == 0) { 3069 DPRINTF(WM_DEBUG_TX, 3070 ("%s: TX: no free job descriptors\n", 3071 device_xname(sc->sc_dev))); 3072 WM_EVCNT_INCR(&sc->sc_ev_txsstall); 3073 break; 3074 } 3075 } 3076 3077 txs = &sc->sc_txsoft[sc->sc_txsnext]; 3078 dmamap = txs->txs_dmamap; 3079 3080 /* 3081 * Load the DMA map. If this fails, the packet either 3082 * didn't fit in the allotted number of segments, or we 3083 * were short on resources. For the too-many-segments 3084 * case, we simply report an error and drop the packet, 3085 * since we can't sanely copy a jumbo packet to a single 3086 * buffer. 3087 */ 3088 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 3089 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 3090 if (error) { 3091 if (error == EFBIG) { 3092 WM_EVCNT_INCR(&sc->sc_ev_txdrop); 3093 log(LOG_ERR, "%s: Tx packet consumes too many " 3094 "DMA segments, dropping...\n", 3095 device_xname(sc->sc_dev)); 3096 IFQ_DEQUEUE(&ifp->if_snd, m0); 3097 wm_dump_mbuf_chain(sc, m0); 3098 m_freem(m0); 3099 continue; 3100 } 3101 /* 3102 * Short on resources, just stop for now. 3103 */ 3104 DPRINTF(WM_DEBUG_TX, 3105 ("%s: TX: dmamap load failed: %d\n", 3106 device_xname(sc->sc_dev), error)); 3107 break; 3108 } 3109 3110 segs_needed = dmamap->dm_nsegs; 3111 3112 /* 3113 * Ensure we have enough descriptors free to describe 3114 * the packet. Note, we always reserve one descriptor 3115 * at the end of the ring due to the semantics of the 3116 * TDT register, plus one more in the event we need 3117 * to load offload context. 3118 */ 3119 if (segs_needed > sc->sc_txfree - 2) { 3120 /* 3121 * Not enough free descriptors to transmit this 3122 * packet. We haven't committed anything yet, 3123 * so just unload the DMA map, put the packet 3124 * pack on the queue, and punt. Notify the upper 3125 * layer that there are no more slots left. 3126 */ 3127 DPRINTF(WM_DEBUG_TX, 3128 ("%s: TX: need %d (%d) descriptors, have %d\n", 3129 device_xname(sc->sc_dev), dmamap->dm_nsegs, 3130 segs_needed, sc->sc_txfree - 1)); 3131 ifp->if_flags |= IFF_OACTIVE; 3132 bus_dmamap_unload(sc->sc_dmat, dmamap); 3133 WM_EVCNT_INCR(&sc->sc_ev_txdstall); 3134 break; 3135 } 3136 3137 IFQ_DEQUEUE(&ifp->if_snd, m0); 3138 3139 /* 3140 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 3141 */ 3142 3143 DPRINTF(WM_DEBUG_TX, 3144 ("%s: TX: packet has %d (%d) DMA segments\n", 3145 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed)); 3146 3147 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]); 3148 3149 /* 3150 * Store a pointer to the packet so that we can free it 3151 * later. 3152 * 3153 * Initially, we consider the number of descriptors the 3154 * packet uses the number of DMA segments. This may be 3155 * incremented by 1 if we do checksum offload (a descriptor 3156 * is used to set the checksum context). 3157 */ 3158 txs->txs_mbuf = m0; 3159 txs->txs_firstdesc = sc->sc_txnext; 3160 txs->txs_ndesc = segs_needed; 3161 3162 /* Set up offload parameters for this packet. */ 3163 uint32_t cmdlen, fields, dcmdlen; 3164 if (m0->m_pkthdr.csum_flags & 3165 (M_CSUM_TSOv4|M_CSUM_TSOv6| 3166 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4| 3167 M_CSUM_TCPv6|M_CSUM_UDPv6)) { 3168 if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields, 3169 &do_csum) != 0) { 3170 /* Error message already displayed. */ 3171 bus_dmamap_unload(sc->sc_dmat, dmamap); 3172 continue; 3173 } 3174 } else { 3175 do_csum = false; 3176 cmdlen = 0; 3177 fields = 0; 3178 } 3179 3180 /* Sync the DMA map. */ 3181 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 3182 BUS_DMASYNC_PREWRITE); 3183 3184 /* 3185 * Initialize the first transmit descriptor. 3186 */ 3187 nexttx = sc->sc_txnext; 3188 if (!do_csum) { 3189 /* setup a legacy descriptor */ 3190 wm_set_dma_addr( 3191 &sc->sc_txdescs[nexttx].wtx_addr, 3192 dmamap->dm_segs[0].ds_addr); 3193 sc->sc_txdescs[nexttx].wtx_cmdlen = 3194 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len); 3195 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0; 3196 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0; 3197 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != 3198 NULL) { 3199 sc->sc_txdescs[nexttx].wtx_cmdlen |= 3200 htole32(WTX_CMD_VLE); 3201 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 3202 htole16(VLAN_TAG_VALUE(mtag) & 0xffff); 3203 } else { 3204 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0; 3205 } 3206 dcmdlen = 0; 3207 } else { 3208 /* setup an advanced data descriptor */ 3209 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr = 3210 htole64(dmamap->dm_segs[0].ds_addr); 3211 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0); 3212 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen = 3213 htole32(dmamap->dm_segs[0].ds_len | cmdlen ); 3214 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 3215 htole32(fields); 3216 DPRINTF(WM_DEBUG_TX, 3217 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n", 3218 device_xname(sc->sc_dev), nexttx, 3219 (uint64_t)dmamap->dm_segs[0].ds_addr)); 3220 DPRINTF(WM_DEBUG_TX, 3221 ("\t 0x%08x%08x\n", fields, 3222 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen)); 3223 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT; 3224 } 3225 3226 lasttx = nexttx; 3227 nexttx = WM_NEXTTX(sc, nexttx); 3228 /* 3229 * fill in the next descriptors. legacy or adcanced format 3230 * is the same here 3231 */ 3232 for (seg = 1; seg < dmamap->dm_nsegs; 3233 seg++, nexttx = WM_NEXTTX(sc, nexttx)) { 3234 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr = 3235 htole64(dmamap->dm_segs[seg].ds_addr); 3236 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen = 3237 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len); 3238 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0); 3239 sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0; 3240 lasttx = nexttx; 3241 3242 DPRINTF(WM_DEBUG_TX, 3243 ("%s: TX: desc %d: %#" PRIx64 ", " 3244 "len %#04zx\n", 3245 device_xname(sc->sc_dev), nexttx, 3246 (uint64_t)dmamap->dm_segs[seg].ds_addr, 3247 dmamap->dm_segs[seg].ds_len)); 3248 } 3249 3250 KASSERT(lasttx != -1); 3251 3252 /* 3253 * Set up the command byte on the last descriptor of 3254 * the packet. If we're in the interrupt delay window, 3255 * delay the interrupt. 3256 */ 3257 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) == 3258 (NQTX_CMD_EOP | NQTX_CMD_RS)); 3259 sc->sc_txdescs[lasttx].wtx_cmdlen |= 3260 htole32(WTX_CMD_EOP | WTX_CMD_RS); 3261 3262 txs->txs_lastdesc = lasttx; 3263 3264 DPRINTF(WM_DEBUG_TX, 3265 ("%s: TX: desc %d: cmdlen 0x%08x\n", 3266 device_xname(sc->sc_dev), 3267 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen))); 3268 3269 /* Sync the descriptors we're using. */ 3270 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc, 3271 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3272 3273 /* Give the packet to the chip. */ 3274 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx); 3275 sent = true; 3276 3277 DPRINTF(WM_DEBUG_TX, 3278 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx)); 3279 3280 DPRINTF(WM_DEBUG_TX, 3281 ("%s: TX: finished transmitting packet, job %d\n", 3282 device_xname(sc->sc_dev), sc->sc_txsnext)); 3283 3284 /* Advance the tx pointer. */ 3285 sc->sc_txfree -= txs->txs_ndesc; 3286 sc->sc_txnext = nexttx; 3287 3288 sc->sc_txsfree--; 3289 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext); 3290 3291 /* Pass the packet to any BPF listeners. */ 3292 bpf_mtap(ifp, m0); 3293 } 3294 3295 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) { 3296 /* No more slots; notify upper layer. */ 3297 ifp->if_flags |= IFF_OACTIVE; 3298 } 3299 3300 if (sent) { 3301 /* Set a watchdog timer in case the chip flakes out. */ 3302 ifp->if_timer = 5; 3303 } 3304 } 3305 3306 /* 3307 * wm_watchdog: [ifnet interface function] 3308 * 3309 * Watchdog timer handler. 3310 */ 3311 static void 3312 wm_watchdog(struct ifnet *ifp) 3313 { 3314 struct wm_softc *sc = ifp->if_softc; 3315 3316 /* 3317 * Since we're using delayed interrupts, sweep up 3318 * before we report an error. 3319 */ 3320 wm_txintr(sc); 3321 3322 if (sc->sc_txfree != WM_NTXDESC(sc)) { 3323 #ifdef WM_DEBUG 3324 int i, j; 3325 struct wm_txsoft *txs; 3326 #endif 3327 log(LOG_ERR, 3328 "%s: device timeout (txfree %d txsfree %d txnext %d)\n", 3329 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree, 3330 sc->sc_txnext); 3331 ifp->if_oerrors++; 3332 #ifdef WM_DEBUG 3333 for (i = sc->sc_txsdirty; i != sc->sc_txsnext ; 3334 i = WM_NEXTTXS(sc, i)) { 3335 txs = &sc->sc_txsoft[i]; 3336 printf("txs %d tx %d -> %d\n", 3337 i, txs->txs_firstdesc, txs->txs_lastdesc); 3338 for (j = txs->txs_firstdesc; ; 3339 j = WM_NEXTTX(sc, j)) { 3340 printf("\tdesc %d: 0x%" PRIx64 "\n", j, 3341 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr); 3342 printf("\t %#08x%08x\n", 3343 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields, 3344 sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen); 3345 if (j == txs->txs_lastdesc) 3346 break; 3347 } 3348 } 3349 #endif 3350 /* Reset the interface. */ 3351 (void) wm_init(ifp); 3352 } 3353 3354 /* Try to get more packets going. */ 3355 ifp->if_start(ifp); 3356 } 3357 3358 static int 3359 wm_ifflags_cb(struct ethercom *ec) 3360 { 3361 struct ifnet *ifp = &ec->ec_if; 3362 struct wm_softc *sc = ifp->if_softc; 3363 int change = ifp->if_flags ^ sc->sc_if_flags; 3364 3365 if (change != 0) 3366 sc->sc_if_flags = ifp->if_flags; 3367 3368 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) 3369 return ENETRESET; 3370 3371 if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 3372 wm_set_filter(sc); 3373 3374 wm_set_vlan(sc); 3375 3376 return 0; 3377 } 3378 3379 /* 3380 * wm_ioctl: [ifnet interface function] 3381 * 3382 * Handle control requests from the operator. 3383 */ 3384 static int 3385 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data) 3386 { 3387 struct wm_softc *sc = ifp->if_softc; 3388 struct ifreq *ifr = (struct ifreq *) data; 3389 struct ifaddr *ifa = (struct ifaddr *)data; 3390 struct sockaddr_dl *sdl; 3391 int s, error; 3392 3393 s = splnet(); 3394 3395 switch (cmd) { 3396 case SIOCSIFMEDIA: 3397 case SIOCGIFMEDIA: 3398 /* Flow control requires full-duplex mode. */ 3399 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 3400 (ifr->ifr_media & IFM_FDX) == 0) 3401 ifr->ifr_media &= ~IFM_ETH_FMASK; 3402 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 3403 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 3404 /* We can do both TXPAUSE and RXPAUSE. */ 3405 ifr->ifr_media |= 3406 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 3407 } 3408 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 3409 } 3410 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 3411 break; 3412 case SIOCINITIFADDR: 3413 if (ifa->ifa_addr->sa_family == AF_LINK) { 3414 sdl = satosdl(ifp->if_dl->ifa_addr); 3415 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len, 3416 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen); 3417 /* unicast address is first multicast entry */ 3418 wm_set_filter(sc); 3419 error = 0; 3420 break; 3421 } 3422 /*FALLTHROUGH*/ 3423 default: 3424 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) 3425 break; 3426 3427 error = 0; 3428 3429 if (cmd == SIOCSIFCAP) 3430 error = (*ifp->if_init)(ifp); 3431 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 3432 ; 3433 else if (ifp->if_flags & IFF_RUNNING) { 3434 /* 3435 * Multicast list has changed; set the hardware filter 3436 * accordingly. 3437 */ 3438 wm_set_filter(sc); 3439 } 3440 break; 3441 } 3442 3443 /* Try to get more packets going. */ 3444 ifp->if_start(ifp); 3445 3446 splx(s); 3447 return error; 3448 } 3449 3450 /* 3451 * wm_intr: 3452 * 3453 * Interrupt service routine. 3454 */ 3455 static int 3456 wm_intr(void *arg) 3457 { 3458 struct wm_softc *sc = arg; 3459 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3460 uint32_t icr; 3461 int handled = 0; 3462 3463 while (1 /* CONSTCOND */) { 3464 icr = CSR_READ(sc, WMREG_ICR); 3465 if ((icr & sc->sc_icr) == 0) 3466 break; 3467 rnd_add_uint32(&sc->rnd_source, icr); 3468 3469 handled = 1; 3470 3471 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 3472 if (icr & (ICR_RXDMT0|ICR_RXT0)) { 3473 DPRINTF(WM_DEBUG_RX, 3474 ("%s: RX: got Rx intr 0x%08x\n", 3475 device_xname(sc->sc_dev), 3476 icr & (ICR_RXDMT0|ICR_RXT0))); 3477 WM_EVCNT_INCR(&sc->sc_ev_rxintr); 3478 } 3479 #endif 3480 wm_rxintr(sc); 3481 3482 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 3483 if (icr & ICR_TXDW) { 3484 DPRINTF(WM_DEBUG_TX, 3485 ("%s: TX: got TXDW interrupt\n", 3486 device_xname(sc->sc_dev))); 3487 WM_EVCNT_INCR(&sc->sc_ev_txdw); 3488 } 3489 #endif 3490 wm_txintr(sc); 3491 3492 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) { 3493 WM_EVCNT_INCR(&sc->sc_ev_linkintr); 3494 wm_linkintr(sc, icr); 3495 } 3496 3497 if (icr & ICR_RXO) { 3498 #if defined(WM_DEBUG) 3499 log(LOG_WARNING, "%s: Receive overrun\n", 3500 device_xname(sc->sc_dev)); 3501 #endif /* defined(WM_DEBUG) */ 3502 } 3503 } 3504 3505 if (handled) { 3506 /* Try to get more packets going. */ 3507 ifp->if_start(ifp); 3508 } 3509 3510 return handled; 3511 } 3512 3513 /* 3514 * wm_txintr: 3515 * 3516 * Helper; handle transmit interrupts. 3517 */ 3518 static void 3519 wm_txintr(struct wm_softc *sc) 3520 { 3521 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3522 struct wm_txsoft *txs; 3523 uint8_t status; 3524 int i; 3525 3526 ifp->if_flags &= ~IFF_OACTIVE; 3527 3528 /* 3529 * Go through the Tx list and free mbufs for those 3530 * frames which have been transmitted. 3531 */ 3532 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc); 3533 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) { 3534 txs = &sc->sc_txsoft[i]; 3535 3536 DPRINTF(WM_DEBUG_TX, 3537 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i)); 3538 3539 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc, 3540 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3541 3542 status = 3543 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status; 3544 if ((status & WTX_ST_DD) == 0) { 3545 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1, 3546 BUS_DMASYNC_PREREAD); 3547 break; 3548 } 3549 3550 DPRINTF(WM_DEBUG_TX, 3551 ("%s: TX: job %d done: descs %d..%d\n", 3552 device_xname(sc->sc_dev), i, txs->txs_firstdesc, 3553 txs->txs_lastdesc)); 3554 3555 /* 3556 * XXX We should probably be using the statistics 3557 * XXX registers, but I don't know if they exist 3558 * XXX on chips before the i82544. 3559 */ 3560 3561 #ifdef WM_EVENT_COUNTERS 3562 if (status & WTX_ST_TU) 3563 WM_EVCNT_INCR(&sc->sc_ev_tu); 3564 #endif /* WM_EVENT_COUNTERS */ 3565 3566 if (status & (WTX_ST_EC|WTX_ST_LC)) { 3567 ifp->if_oerrors++; 3568 if (status & WTX_ST_LC) 3569 log(LOG_WARNING, "%s: late collision\n", 3570 device_xname(sc->sc_dev)); 3571 else if (status & WTX_ST_EC) { 3572 ifp->if_collisions += 16; 3573 log(LOG_WARNING, "%s: excessive collisions\n", 3574 device_xname(sc->sc_dev)); 3575 } 3576 } else 3577 ifp->if_opackets++; 3578 3579 sc->sc_txfree += txs->txs_ndesc; 3580 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 3581 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 3582 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 3583 m_freem(txs->txs_mbuf); 3584 txs->txs_mbuf = NULL; 3585 } 3586 3587 /* Update the dirty transmit buffer pointer. */ 3588 sc->sc_txsdirty = i; 3589 DPRINTF(WM_DEBUG_TX, 3590 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i)); 3591 3592 /* 3593 * If there are no more pending transmissions, cancel the watchdog 3594 * timer. 3595 */ 3596 if (sc->sc_txsfree == WM_TXQUEUELEN(sc)) 3597 ifp->if_timer = 0; 3598 } 3599 3600 /* 3601 * wm_rxintr: 3602 * 3603 * Helper; handle receive interrupts. 3604 */ 3605 static void 3606 wm_rxintr(struct wm_softc *sc) 3607 { 3608 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3609 struct wm_rxsoft *rxs; 3610 struct mbuf *m; 3611 int i, len; 3612 uint8_t status, errors; 3613 uint16_t vlantag; 3614 3615 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) { 3616 rxs = &sc->sc_rxsoft[i]; 3617 3618 DPRINTF(WM_DEBUG_RX, 3619 ("%s: RX: checking descriptor %d\n", 3620 device_xname(sc->sc_dev), i)); 3621 3622 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 3623 3624 status = sc->sc_rxdescs[i].wrx_status; 3625 errors = sc->sc_rxdescs[i].wrx_errors; 3626 len = le16toh(sc->sc_rxdescs[i].wrx_len); 3627 vlantag = sc->sc_rxdescs[i].wrx_special; 3628 3629 if ((status & WRX_ST_DD) == 0) { 3630 /* 3631 * We have processed all of the receive descriptors. 3632 */ 3633 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD); 3634 break; 3635 } 3636 3637 if (__predict_false(sc->sc_rxdiscard)) { 3638 DPRINTF(WM_DEBUG_RX, 3639 ("%s: RX: discarding contents of descriptor %d\n", 3640 device_xname(sc->sc_dev), i)); 3641 WM_INIT_RXDESC(sc, i); 3642 if (status & WRX_ST_EOP) { 3643 /* Reset our state. */ 3644 DPRINTF(WM_DEBUG_RX, 3645 ("%s: RX: resetting rxdiscard -> 0\n", 3646 device_xname(sc->sc_dev))); 3647 sc->sc_rxdiscard = 0; 3648 } 3649 continue; 3650 } 3651 3652 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 3653 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 3654 3655 m = rxs->rxs_mbuf; 3656 3657 /* 3658 * Add a new receive buffer to the ring, unless of 3659 * course the length is zero. Treat the latter as a 3660 * failed mapping. 3661 */ 3662 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) { 3663 /* 3664 * Failed, throw away what we've done so 3665 * far, and discard the rest of the packet. 3666 */ 3667 ifp->if_ierrors++; 3668 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 3669 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 3670 WM_INIT_RXDESC(sc, i); 3671 if ((status & WRX_ST_EOP) == 0) 3672 sc->sc_rxdiscard = 1; 3673 if (sc->sc_rxhead != NULL) 3674 m_freem(sc->sc_rxhead); 3675 WM_RXCHAIN_RESET(sc); 3676 DPRINTF(WM_DEBUG_RX, 3677 ("%s: RX: Rx buffer allocation failed, " 3678 "dropping packet%s\n", device_xname(sc->sc_dev), 3679 sc->sc_rxdiscard ? " (discard)" : "")); 3680 continue; 3681 } 3682 3683 m->m_len = len; 3684 sc->sc_rxlen += len; 3685 DPRINTF(WM_DEBUG_RX, 3686 ("%s: RX: buffer at %p len %d\n", 3687 device_xname(sc->sc_dev), m->m_data, len)); 3688 3689 /* 3690 * If this is not the end of the packet, keep 3691 * looking. 3692 */ 3693 if ((status & WRX_ST_EOP) == 0) { 3694 WM_RXCHAIN_LINK(sc, m); 3695 DPRINTF(WM_DEBUG_RX, 3696 ("%s: RX: not yet EOP, rxlen -> %d\n", 3697 device_xname(sc->sc_dev), sc->sc_rxlen)); 3698 continue; 3699 } 3700 3701 /* 3702 * Okay, we have the entire packet now. The chip is 3703 * configured to include the FCS except I350 and I21[01] 3704 * (not all chips can be configured to strip it), 3705 * so we need to trim it. 3706 * May need to adjust length of previous mbuf in the 3707 * chain if the current mbuf is too short. 3708 * For an eratta, the RCTL_SECRC bit in RCTL register 3709 * is always set in I350, so we don't trim it. 3710 */ 3711 if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I210) 3712 && (sc->sc_type != WM_T_I211)) { 3713 if (m->m_len < ETHER_CRC_LEN) { 3714 sc->sc_rxtail->m_len 3715 -= (ETHER_CRC_LEN - m->m_len); 3716 m->m_len = 0; 3717 } else 3718 m->m_len -= ETHER_CRC_LEN; 3719 len = sc->sc_rxlen - ETHER_CRC_LEN; 3720 } else 3721 len = sc->sc_rxlen; 3722 3723 WM_RXCHAIN_LINK(sc, m); 3724 3725 *sc->sc_rxtailp = NULL; 3726 m = sc->sc_rxhead; 3727 3728 WM_RXCHAIN_RESET(sc); 3729 3730 DPRINTF(WM_DEBUG_RX, 3731 ("%s: RX: have entire packet, len -> %d\n", 3732 device_xname(sc->sc_dev), len)); 3733 3734 /* 3735 * If an error occurred, update stats and drop the packet. 3736 */ 3737 if (errors & 3738 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) { 3739 if (errors & WRX_ER_SE) 3740 log(LOG_WARNING, "%s: symbol error\n", 3741 device_xname(sc->sc_dev)); 3742 else if (errors & WRX_ER_SEQ) 3743 log(LOG_WARNING, "%s: receive sequence error\n", 3744 device_xname(sc->sc_dev)); 3745 else if (errors & WRX_ER_CE) 3746 log(LOG_WARNING, "%s: CRC error\n", 3747 device_xname(sc->sc_dev)); 3748 m_freem(m); 3749 continue; 3750 } 3751 3752 /* 3753 * No errors. Receive the packet. 3754 */ 3755 m->m_pkthdr.rcvif = ifp; 3756 m->m_pkthdr.len = len; 3757 3758 /* 3759 * If VLANs are enabled, VLAN packets have been unwrapped 3760 * for us. Associate the tag with the packet. 3761 */ 3762 if ((status & WRX_ST_VP) != 0) { 3763 VLAN_INPUT_TAG(ifp, m, 3764 le16toh(vlantag), 3765 continue); 3766 } 3767 3768 /* 3769 * Set up checksum info for this packet. 3770 */ 3771 if ((status & WRX_ST_IXSM) == 0) { 3772 if (status & WRX_ST_IPCS) { 3773 WM_EVCNT_INCR(&sc->sc_ev_rxipsum); 3774 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 3775 if (errors & WRX_ER_IPE) 3776 m->m_pkthdr.csum_flags |= 3777 M_CSUM_IPv4_BAD; 3778 } 3779 if (status & WRX_ST_TCPCS) { 3780 /* 3781 * Note: we don't know if this was TCP or UDP, 3782 * so we just set both bits, and expect the 3783 * upper layers to deal. 3784 */ 3785 WM_EVCNT_INCR(&sc->sc_ev_rxtusum); 3786 m->m_pkthdr.csum_flags |= 3787 M_CSUM_TCPv4 | M_CSUM_UDPv4 | 3788 M_CSUM_TCPv6 | M_CSUM_UDPv6; 3789 if (errors & WRX_ER_TCPE) 3790 m->m_pkthdr.csum_flags |= 3791 M_CSUM_TCP_UDP_BAD; 3792 } 3793 } 3794 3795 ifp->if_ipackets++; 3796 3797 /* Pass this up to any BPF listeners. */ 3798 bpf_mtap(ifp, m); 3799 3800 /* Pass it on. */ 3801 (*ifp->if_input)(ifp, m); 3802 } 3803 3804 /* Update the receive pointer. */ 3805 sc->sc_rxptr = i; 3806 3807 DPRINTF(WM_DEBUG_RX, 3808 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i)); 3809 } 3810 3811 /* 3812 * wm_linkintr_gmii: 3813 * 3814 * Helper; handle link interrupts for GMII. 3815 */ 3816 static void 3817 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr) 3818 { 3819 3820 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), 3821 __func__)); 3822 3823 if (icr & ICR_LSC) { 3824 DPRINTF(WM_DEBUG_LINK, 3825 ("%s: LINK: LSC -> mii_pollstat\n", 3826 device_xname(sc->sc_dev))); 3827 mii_pollstat(&sc->sc_mii); 3828 if (sc->sc_type == WM_T_82543) { 3829 int miistatus, active; 3830 3831 /* 3832 * With 82543, we need to force speed and 3833 * duplex on the MAC equal to what the PHY 3834 * speed and duplex configuration is. 3835 */ 3836 miistatus = sc->sc_mii.mii_media_status; 3837 3838 if (miistatus & IFM_ACTIVE) { 3839 active = sc->sc_mii.mii_media_active; 3840 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); 3841 switch (IFM_SUBTYPE(active)) { 3842 case IFM_10_T: 3843 sc->sc_ctrl |= CTRL_SPEED_10; 3844 break; 3845 case IFM_100_TX: 3846 sc->sc_ctrl |= CTRL_SPEED_100; 3847 break; 3848 case IFM_1000_T: 3849 sc->sc_ctrl |= CTRL_SPEED_1000; 3850 break; 3851 default: 3852 /* 3853 * fiber? 3854 * Shoud not enter here. 3855 */ 3856 printf("unknown media (%x)\n", 3857 active); 3858 break; 3859 } 3860 if (active & IFM_FDX) 3861 sc->sc_ctrl |= CTRL_FD; 3862 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3863 } 3864 } else if ((sc->sc_type == WM_T_ICH8) 3865 && (sc->sc_phytype == WMPHY_IGP_3)) { 3866 wm_kmrn_lock_loss_workaround_ich8lan(sc); 3867 } else if (sc->sc_type == WM_T_PCH) { 3868 wm_k1_gig_workaround_hv(sc, 3869 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0)); 3870 } 3871 3872 if ((sc->sc_phytype == WMPHY_82578) 3873 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active) 3874 == IFM_1000_T)) { 3875 3876 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) { 3877 delay(200*1000); /* XXX too big */ 3878 3879 /* Link stall fix for link up */ 3880 wm_gmii_hv_writereg(sc->sc_dev, 1, 3881 HV_MUX_DATA_CTRL, 3882 HV_MUX_DATA_CTRL_GEN_TO_MAC 3883 | HV_MUX_DATA_CTRL_FORCE_SPEED); 3884 wm_gmii_hv_writereg(sc->sc_dev, 1, 3885 HV_MUX_DATA_CTRL, 3886 HV_MUX_DATA_CTRL_GEN_TO_MAC); 3887 } 3888 } 3889 } else if (icr & ICR_RXSEQ) { 3890 DPRINTF(WM_DEBUG_LINK, 3891 ("%s: LINK Receive sequence error\n", 3892 device_xname(sc->sc_dev))); 3893 } 3894 } 3895 3896 /* 3897 * wm_linkintr_tbi: 3898 * 3899 * Helper; handle link interrupts for TBI mode. 3900 */ 3901 static void 3902 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr) 3903 { 3904 uint32_t status; 3905 3906 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), 3907 __func__)); 3908 3909 status = CSR_READ(sc, WMREG_STATUS); 3910 if (icr & ICR_LSC) { 3911 if (status & STATUS_LU) { 3912 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n", 3913 device_xname(sc->sc_dev), 3914 (status & STATUS_FD) ? "FDX" : "HDX")); 3915 /* 3916 * NOTE: CTRL will update TFCE and RFCE automatically, 3917 * so we should update sc->sc_ctrl 3918 */ 3919 3920 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); 3921 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 3922 sc->sc_fcrtl &= ~FCRTL_XONE; 3923 if (status & STATUS_FD) 3924 sc->sc_tctl |= 3925 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 3926 else 3927 sc->sc_tctl |= 3928 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 3929 if (sc->sc_ctrl & CTRL_TFCE) 3930 sc->sc_fcrtl |= FCRTL_XONE; 3931 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 3932 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 3933 WMREG_OLD_FCRTL : WMREG_FCRTL, 3934 sc->sc_fcrtl); 3935 sc->sc_tbi_linkup = 1; 3936 } else { 3937 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", 3938 device_xname(sc->sc_dev))); 3939 sc->sc_tbi_linkup = 0; 3940 } 3941 wm_tbi_set_linkled(sc); 3942 } else if (icr & ICR_RXCFG) { 3943 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n", 3944 device_xname(sc->sc_dev))); 3945 sc->sc_tbi_nrxcfg++; 3946 wm_check_for_link(sc); 3947 } else if (icr & ICR_RXSEQ) { 3948 DPRINTF(WM_DEBUG_LINK, 3949 ("%s: LINK: Receive sequence error\n", 3950 device_xname(sc->sc_dev))); 3951 } 3952 } 3953 3954 /* 3955 * wm_linkintr: 3956 * 3957 * Helper; handle link interrupts. 3958 */ 3959 static void 3960 wm_linkintr(struct wm_softc *sc, uint32_t icr) 3961 { 3962 3963 if (sc->sc_flags & WM_F_HAS_MII) 3964 wm_linkintr_gmii(sc, icr); 3965 else 3966 wm_linkintr_tbi(sc, icr); 3967 } 3968 3969 /* 3970 * wm_tick: 3971 * 3972 * One second timer, used to check link status, sweep up 3973 * completed transmit jobs, etc. 3974 */ 3975 static void 3976 wm_tick(void *arg) 3977 { 3978 struct wm_softc *sc = arg; 3979 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3980 int s; 3981 3982 s = splnet(); 3983 3984 if (sc->sc_type >= WM_T_82542_2_1) { 3985 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC)); 3986 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC)); 3987 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC)); 3988 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC)); 3989 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC)); 3990 } 3991 3992 ifp->if_collisions += CSR_READ(sc, WMREG_COLC); 3993 ifp->if_ierrors += 0ULL + /* ensure quad_t */ 3994 + CSR_READ(sc, WMREG_CRCERRS) 3995 + CSR_READ(sc, WMREG_ALGNERRC) 3996 + CSR_READ(sc, WMREG_SYMERRC) 3997 + CSR_READ(sc, WMREG_RXERRC) 3998 + CSR_READ(sc, WMREG_SEC) 3999 + CSR_READ(sc, WMREG_CEXTERR) 4000 + CSR_READ(sc, WMREG_RLEC); 4001 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC); 4002 4003 if (sc->sc_flags & WM_F_HAS_MII) 4004 mii_tick(&sc->sc_mii); 4005 else 4006 wm_tbi_check_link(sc); 4007 4008 splx(s); 4009 4010 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); 4011 } 4012 4013 /* 4014 * wm_reset: 4015 * 4016 * Reset the i82542 chip. 4017 */ 4018 static void 4019 wm_reset(struct wm_softc *sc) 4020 { 4021 int phy_reset = 0; 4022 uint32_t reg, mask; 4023 4024 /* 4025 * Allocate on-chip memory according to the MTU size. 4026 * The Packet Buffer Allocation register must be written 4027 * before the chip is reset. 4028 */ 4029 switch (sc->sc_type) { 4030 case WM_T_82547: 4031 case WM_T_82547_2: 4032 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? 4033 PBA_22K : PBA_30K; 4034 sc->sc_txfifo_head = 0; 4035 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT; 4036 sc->sc_txfifo_size = 4037 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT; 4038 sc->sc_txfifo_stall = 0; 4039 break; 4040 case WM_T_82571: 4041 case WM_T_82572: 4042 case WM_T_82575: /* XXX need special handing for jumbo frames */ 4043 case WM_T_I350: 4044 case WM_T_80003: 4045 sc->sc_pba = PBA_32K; 4046 break; 4047 case WM_T_82580: 4048 case WM_T_82580ER: 4049 sc->sc_pba = PBA_35K; 4050 break; 4051 case WM_T_I210: 4052 case WM_T_I211: 4053 sc->sc_pba = PBA_34K; 4054 break; 4055 case WM_T_82576: 4056 sc->sc_pba = PBA_64K; 4057 break; 4058 case WM_T_82573: 4059 sc->sc_pba = PBA_12K; 4060 break; 4061 case WM_T_82574: 4062 case WM_T_82583: 4063 sc->sc_pba = PBA_20K; 4064 break; 4065 case WM_T_ICH8: 4066 sc->sc_pba = PBA_8K; 4067 CSR_WRITE(sc, WMREG_PBS, PBA_16K); 4068 break; 4069 case WM_T_ICH9: 4070 case WM_T_ICH10: 4071 sc->sc_pba = PBA_10K; 4072 break; 4073 case WM_T_PCH: 4074 case WM_T_PCH2: 4075 case WM_T_PCH_LPT: 4076 sc->sc_pba = PBA_26K; 4077 break; 4078 default: 4079 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? 4080 PBA_40K : PBA_48K; 4081 break; 4082 } 4083 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba); 4084 4085 /* Prevent the PCI-E bus from sticking */ 4086 if (sc->sc_flags & WM_F_PCIE) { 4087 int timeout = 800; 4088 4089 sc->sc_ctrl |= CTRL_GIO_M_DIS; 4090 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4091 4092 while (timeout--) { 4093 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) 4094 == 0) 4095 break; 4096 delay(100); 4097 } 4098 } 4099 4100 /* Set the completion timeout for interface */ 4101 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 4102 || (sc->sc_type == WM_T_I350)) 4103 wm_set_pcie_completion_timeout(sc); 4104 4105 /* Clear interrupt */ 4106 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 4107 4108 /* Stop the transmit and receive processes. */ 4109 CSR_WRITE(sc, WMREG_RCTL, 0); 4110 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP); 4111 sc->sc_rctl &= ~RCTL_EN; 4112 4113 /* XXX set_tbi_sbp_82543() */ 4114 4115 delay(10*1000); 4116 4117 /* Must acquire the MDIO ownership before MAC reset */ 4118 switch (sc->sc_type) { 4119 case WM_T_82573: 4120 case WM_T_82574: 4121 case WM_T_82583: 4122 wm_get_hw_semaphore_82573(sc); 4123 break; 4124 default: 4125 break; 4126 } 4127 4128 /* 4129 * 82541 Errata 29? & 82547 Errata 28? 4130 * See also the description about PHY_RST bit in CTRL register 4131 * in 8254x_GBe_SDM.pdf. 4132 */ 4133 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) { 4134 CSR_WRITE(sc, WMREG_CTRL, 4135 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET); 4136 delay(5000); 4137 } 4138 4139 switch (sc->sc_type) { 4140 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */ 4141 case WM_T_82541: 4142 case WM_T_82541_2: 4143 case WM_T_82547: 4144 case WM_T_82547_2: 4145 /* 4146 * On some chipsets, a reset through a memory-mapped write 4147 * cycle can cause the chip to reset before completing the 4148 * write cycle. This causes major headache that can be 4149 * avoided by issuing the reset via indirect register writes 4150 * through I/O space. 4151 * 4152 * So, if we successfully mapped the I/O BAR at attach time, 4153 * use that. Otherwise, try our luck with a memory-mapped 4154 * reset. 4155 */ 4156 if (sc->sc_flags & WM_F_IOH_VALID) 4157 wm_io_write(sc, WMREG_CTRL, CTRL_RST); 4158 else 4159 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST); 4160 break; 4161 case WM_T_82545_3: 4162 case WM_T_82546_3: 4163 /* Use the shadow control register on these chips. */ 4164 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST); 4165 break; 4166 case WM_T_80003: 4167 mask = swfwphysem[sc->sc_funcid]; 4168 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST; 4169 wm_get_swfw_semaphore(sc, mask); 4170 CSR_WRITE(sc, WMREG_CTRL, reg); 4171 wm_put_swfw_semaphore(sc, mask); 4172 break; 4173 case WM_T_ICH8: 4174 case WM_T_ICH9: 4175 case WM_T_ICH10: 4176 case WM_T_PCH: 4177 case WM_T_PCH2: 4178 case WM_T_PCH_LPT: 4179 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST; 4180 if (wm_check_reset_block(sc) == 0) { 4181 /* 4182 * Gate automatic PHY configuration by hardware on 4183 * non-managed 82579 4184 */ 4185 if ((sc->sc_type == WM_T_PCH2) 4186 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) 4187 != 0)) 4188 wm_gate_hw_phy_config_ich8lan(sc, 1); 4189 4190 4191 reg |= CTRL_PHY_RESET; 4192 phy_reset = 1; 4193 } 4194 wm_get_swfwhw_semaphore(sc); 4195 CSR_WRITE(sc, WMREG_CTRL, reg); 4196 delay(20*1000); 4197 wm_put_swfwhw_semaphore(sc); 4198 break; 4199 case WM_T_82542_2_0: 4200 case WM_T_82542_2_1: 4201 case WM_T_82543: 4202 case WM_T_82540: 4203 case WM_T_82545: 4204 case WM_T_82546: 4205 case WM_T_82571: 4206 case WM_T_82572: 4207 case WM_T_82573: 4208 case WM_T_82574: 4209 case WM_T_82575: 4210 case WM_T_82576: 4211 case WM_T_82580: 4212 case WM_T_82580ER: 4213 case WM_T_82583: 4214 case WM_T_I350: 4215 case WM_T_I210: 4216 case WM_T_I211: 4217 default: 4218 /* Everything else can safely use the documented method. */ 4219 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST); 4220 break; 4221 } 4222 4223 /* Must release the MDIO ownership after MAC reset */ 4224 switch (sc->sc_type) { 4225 case WM_T_82574: 4226 case WM_T_82583: 4227 wm_put_hw_semaphore_82573(sc); 4228 break; 4229 default: 4230 break; 4231 } 4232 4233 if (phy_reset != 0) 4234 wm_get_cfg_done(sc); 4235 4236 /* reload EEPROM */ 4237 switch (sc->sc_type) { 4238 case WM_T_82542_2_0: 4239 case WM_T_82542_2_1: 4240 case WM_T_82543: 4241 case WM_T_82544: 4242 delay(10); 4243 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; 4244 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 4245 delay(2000); 4246 break; 4247 case WM_T_82540: 4248 case WM_T_82545: 4249 case WM_T_82545_3: 4250 case WM_T_82546: 4251 case WM_T_82546_3: 4252 delay(5*1000); 4253 /* XXX Disable HW ARPs on ASF enabled adapters */ 4254 break; 4255 case WM_T_82541: 4256 case WM_T_82541_2: 4257 case WM_T_82547: 4258 case WM_T_82547_2: 4259 delay(20000); 4260 /* XXX Disable HW ARPs on ASF enabled adapters */ 4261 break; 4262 case WM_T_82571: 4263 case WM_T_82572: 4264 case WM_T_82573: 4265 case WM_T_82574: 4266 case WM_T_82583: 4267 if (sc->sc_flags & WM_F_EEPROM_FLASH) { 4268 delay(10); 4269 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; 4270 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 4271 } 4272 /* check EECD_EE_AUTORD */ 4273 wm_get_auto_rd_done(sc); 4274 /* 4275 * Phy configuration from NVM just starts after EECD_AUTO_RD 4276 * is set. 4277 */ 4278 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574) 4279 || (sc->sc_type == WM_T_82583)) 4280 delay(25*1000); 4281 break; 4282 case WM_T_82575: 4283 case WM_T_82576: 4284 case WM_T_82580: 4285 case WM_T_82580ER: 4286 case WM_T_I350: 4287 case WM_T_I210: 4288 case WM_T_I211: 4289 case WM_T_80003: 4290 /* check EECD_EE_AUTORD */ 4291 wm_get_auto_rd_done(sc); 4292 break; 4293 case WM_T_ICH8: 4294 case WM_T_ICH9: 4295 case WM_T_ICH10: 4296 case WM_T_PCH: 4297 case WM_T_PCH2: 4298 case WM_T_PCH_LPT: 4299 break; 4300 default: 4301 panic("%s: unknown type\n", __func__); 4302 } 4303 4304 /* Check whether EEPROM is present or not */ 4305 switch (sc->sc_type) { 4306 case WM_T_82575: 4307 case WM_T_82576: 4308 #if 0 /* XXX */ 4309 case WM_T_82580: 4310 case WM_T_82580ER: 4311 #endif 4312 case WM_T_I350: 4313 case WM_T_ICH8: 4314 case WM_T_ICH9: 4315 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) { 4316 /* Not found */ 4317 sc->sc_flags |= WM_F_EEPROM_INVALID; 4318 if ((sc->sc_type == WM_T_82575) 4319 || (sc->sc_type == WM_T_82576) 4320 || (sc->sc_type == WM_T_82580) 4321 || (sc->sc_type == WM_T_82580ER) 4322 || (sc->sc_type == WM_T_I350)) 4323 wm_reset_init_script_82575(sc); 4324 } 4325 break; 4326 default: 4327 break; 4328 } 4329 4330 if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER) 4331 || (sc->sc_type == WM_T_I350)) { 4332 /* clear global device reset status bit */ 4333 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET); 4334 } 4335 4336 /* Clear any pending interrupt events. */ 4337 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 4338 reg = CSR_READ(sc, WMREG_ICR); 4339 4340 /* reload sc_ctrl */ 4341 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); 4342 4343 if (sc->sc_type == WM_T_I350) 4344 wm_set_eee_i350(sc); 4345 4346 /* dummy read from WUC */ 4347 if (sc->sc_type == WM_T_PCH) 4348 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC); 4349 /* 4350 * For PCH, this write will make sure that any noise will be detected 4351 * as a CRC error and be dropped rather than show up as a bad packet 4352 * to the DMA engine 4353 */ 4354 if (sc->sc_type == WM_T_PCH) 4355 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565); 4356 4357 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 4358 CSR_WRITE(sc, WMREG_WUC, 0); 4359 4360 /* XXX need special handling for 82580 */ 4361 } 4362 4363 static void 4364 wm_set_vlan(struct wm_softc *sc) 4365 { 4366 /* Deal with VLAN enables. */ 4367 if (VLAN_ATTACHED(&sc->sc_ethercom)) 4368 sc->sc_ctrl |= CTRL_VME; 4369 else 4370 sc->sc_ctrl &= ~CTRL_VME; 4371 4372 /* Write the control registers. */ 4373 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4374 } 4375 4376 /* 4377 * wm_init: [ifnet interface function] 4378 * 4379 * Initialize the interface. Must be called at splnet(). 4380 */ 4381 static int 4382 wm_init(struct ifnet *ifp) 4383 { 4384 struct wm_softc *sc = ifp->if_softc; 4385 struct wm_rxsoft *rxs; 4386 int i, j, trynum, error = 0; 4387 uint32_t reg; 4388 4389 /* 4390 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set. 4391 * There is a small but measurable benefit to avoiding the adjusment 4392 * of the descriptor so that the headers are aligned, for normal mtu, 4393 * on such platforms. One possibility is that the DMA itself is 4394 * slightly more efficient if the front of the entire packet (instead 4395 * of the front of the headers) is aligned. 4396 * 4397 * Note we must always set align_tweak to 0 if we are using 4398 * jumbo frames. 4399 */ 4400 #ifdef __NO_STRICT_ALIGNMENT 4401 sc->sc_align_tweak = 0; 4402 #else 4403 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2)) 4404 sc->sc_align_tweak = 0; 4405 else 4406 sc->sc_align_tweak = 2; 4407 #endif /* __NO_STRICT_ALIGNMENT */ 4408 4409 /* Cancel any pending I/O. */ 4410 wm_stop(ifp, 0); 4411 4412 /* update statistics before reset */ 4413 ifp->if_collisions += CSR_READ(sc, WMREG_COLC); 4414 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC); 4415 4416 /* Reset the chip to a known state. */ 4417 wm_reset(sc); 4418 4419 switch (sc->sc_type) { 4420 case WM_T_82571: 4421 case WM_T_82572: 4422 case WM_T_82573: 4423 case WM_T_82574: 4424 case WM_T_82583: 4425 case WM_T_80003: 4426 case WM_T_ICH8: 4427 case WM_T_ICH9: 4428 case WM_T_ICH10: 4429 case WM_T_PCH: 4430 case WM_T_PCH2: 4431 case WM_T_PCH_LPT: 4432 if (wm_check_mng_mode(sc) != 0) 4433 wm_get_hw_control(sc); 4434 break; 4435 default: 4436 break; 4437 } 4438 4439 /* Reset the PHY. */ 4440 if (sc->sc_flags & WM_F_HAS_MII) 4441 wm_gmii_reset(sc); 4442 4443 reg = CSR_READ(sc, WMREG_CTRL_EXT); 4444 /* Enable PHY low-power state when MAC is at D3 w/o WoL */ 4445 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2) 4446 || (sc->sc_type == WM_T_PCH_LPT)) 4447 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN); 4448 4449 /* Initialize the transmit descriptor ring. */ 4450 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc)); 4451 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc), 4452 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 4453 sc->sc_txfree = WM_NTXDESC(sc); 4454 sc->sc_txnext = 0; 4455 4456 if (sc->sc_type < WM_T_82543) { 4457 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0)); 4458 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0)); 4459 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc)); 4460 CSR_WRITE(sc, WMREG_OLD_TDH, 0); 4461 CSR_WRITE(sc, WMREG_OLD_TDT, 0); 4462 CSR_WRITE(sc, WMREG_OLD_TIDV, 128); 4463 } else { 4464 CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0)); 4465 CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0)); 4466 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc)); 4467 CSR_WRITE(sc, WMREG_TDH, 0); 4468 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */ 4469 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */ 4470 4471 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 4472 /* 4473 * Don't write TDT before TCTL.EN is set. 4474 * See the document. 4475 */ 4476 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE 4477 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0) 4478 | TXDCTL_WTHRESH(0)); 4479 else { 4480 CSR_WRITE(sc, WMREG_TDT, 0); 4481 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) | 4482 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0)); 4483 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) | 4484 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1)); 4485 } 4486 } 4487 CSR_WRITE(sc, WMREG_TQSA_LO, 0); 4488 CSR_WRITE(sc, WMREG_TQSA_HI, 0); 4489 4490 /* Initialize the transmit job descriptors. */ 4491 for (i = 0; i < WM_TXQUEUELEN(sc); i++) 4492 sc->sc_txsoft[i].txs_mbuf = NULL; 4493 sc->sc_txsfree = WM_TXQUEUELEN(sc); 4494 sc->sc_txsnext = 0; 4495 sc->sc_txsdirty = 0; 4496 4497 /* 4498 * Initialize the receive descriptor and receive job 4499 * descriptor rings. 4500 */ 4501 if (sc->sc_type < WM_T_82543) { 4502 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0)); 4503 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0)); 4504 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs)); 4505 CSR_WRITE(sc, WMREG_OLD_RDH0, 0); 4506 CSR_WRITE(sc, WMREG_OLD_RDT0, 0); 4507 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD); 4508 4509 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0); 4510 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0); 4511 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0); 4512 CSR_WRITE(sc, WMREG_OLD_RDH1, 0); 4513 CSR_WRITE(sc, WMREG_OLD_RDT1, 0); 4514 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0); 4515 } else { 4516 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0)); 4517 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0)); 4518 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs)); 4519 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 4520 CSR_WRITE(sc, WMREG_EITR(0), 450); 4521 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1)) 4522 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES); 4523 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY 4524 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT)); 4525 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE 4526 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8) 4527 | RXDCTL_WTHRESH(1)); 4528 } else { 4529 CSR_WRITE(sc, WMREG_RDH, 0); 4530 CSR_WRITE(sc, WMREG_RDT, 0); 4531 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */ 4532 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */ 4533 } 4534 } 4535 for (i = 0; i < WM_NRXDESC; i++) { 4536 rxs = &sc->sc_rxsoft[i]; 4537 if (rxs->rxs_mbuf == NULL) { 4538 if ((error = wm_add_rxbuf(sc, i)) != 0) { 4539 log(LOG_ERR, "%s: unable to allocate or map " 4540 "rx buffer %d, error = %d\n", 4541 device_xname(sc->sc_dev), i, error); 4542 /* 4543 * XXX Should attempt to run with fewer receive 4544 * XXX buffers instead of just failing. 4545 */ 4546 wm_rxdrain(sc); 4547 goto out; 4548 } 4549 } else { 4550 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0) 4551 WM_INIT_RXDESC(sc, i); 4552 /* 4553 * For 82575 and newer device, the RX descriptors 4554 * must be initialized after the setting of RCTL.EN in 4555 * wm_set_filter() 4556 */ 4557 } 4558 } 4559 sc->sc_rxptr = 0; 4560 sc->sc_rxdiscard = 0; 4561 WM_RXCHAIN_RESET(sc); 4562 4563 /* 4564 * Clear out the VLAN table -- we don't use it (yet). 4565 */ 4566 CSR_WRITE(sc, WMREG_VET, 0); 4567 if (sc->sc_type == WM_T_I350) 4568 trynum = 10; /* Due to hw errata */ 4569 else 4570 trynum = 1; 4571 for (i = 0; i < WM_VLAN_TABSIZE; i++) 4572 for (j = 0; j < trynum; j++) 4573 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0); 4574 4575 /* 4576 * Set up flow-control parameters. 4577 * 4578 * XXX Values could probably stand some tuning. 4579 */ 4580 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9) 4581 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH) 4582 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) { 4583 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST); 4584 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST); 4585 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL); 4586 } 4587 4588 sc->sc_fcrtl = FCRTL_DFLT; 4589 if (sc->sc_type < WM_T_82543) { 4590 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT); 4591 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl); 4592 } else { 4593 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT); 4594 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl); 4595 } 4596 4597 if (sc->sc_type == WM_T_80003) 4598 CSR_WRITE(sc, WMREG_FCTTV, 0xffff); 4599 else 4600 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT); 4601 4602 /* Writes the control register. */ 4603 wm_set_vlan(sc); 4604 4605 if (sc->sc_flags & WM_F_HAS_MII) { 4606 int val; 4607 4608 switch (sc->sc_type) { 4609 case WM_T_80003: 4610 case WM_T_ICH8: 4611 case WM_T_ICH9: 4612 case WM_T_ICH10: 4613 case WM_T_PCH: 4614 case WM_T_PCH2: 4615 case WM_T_PCH_LPT: 4616 /* 4617 * Set the mac to wait the maximum time between each 4618 * iteration and increase the max iterations when 4619 * polling the phy; this fixes erroneous timeouts at 4620 * 10Mbps. 4621 */ 4622 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, 4623 0xFFFF); 4624 val = wm_kmrn_readreg(sc, 4625 KUMCTRLSTA_OFFSET_INB_PARAM); 4626 val |= 0x3F; 4627 wm_kmrn_writereg(sc, 4628 KUMCTRLSTA_OFFSET_INB_PARAM, val); 4629 break; 4630 default: 4631 break; 4632 } 4633 4634 if (sc->sc_type == WM_T_80003) { 4635 val = CSR_READ(sc, WMREG_CTRL_EXT); 4636 val &= ~CTRL_EXT_LINK_MODE_MASK; 4637 CSR_WRITE(sc, WMREG_CTRL_EXT, val); 4638 4639 /* Bypass RX and TX FIFO's */ 4640 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL, 4641 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 4642 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS); 4643 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL, 4644 KUMCTRLSTA_INB_CTRL_DIS_PADDING | 4645 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT); 4646 } 4647 } 4648 #if 0 4649 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 4650 #endif 4651 4652 /* 4653 * Set up checksum offload parameters. 4654 */ 4655 reg = CSR_READ(sc, WMREG_RXCSUM); 4656 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL); 4657 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) 4658 reg |= RXCSUM_IPOFL; 4659 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) 4660 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL; 4661 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx)) 4662 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL; 4663 CSR_WRITE(sc, WMREG_RXCSUM, reg); 4664 4665 /* Reset TBI's RXCFG count */ 4666 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0; 4667 4668 /* 4669 * Set up the interrupt registers. 4670 */ 4671 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 4672 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 | 4673 ICR_RXO | ICR_RXT0; 4674 if ((sc->sc_flags & WM_F_HAS_MII) == 0) 4675 sc->sc_icr |= ICR_RXCFG; 4676 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr); 4677 4678 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 4679 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 4680 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) { 4681 reg = CSR_READ(sc, WMREG_KABGTXD); 4682 reg |= KABGTXD_BGSQLBIAS; 4683 CSR_WRITE(sc, WMREG_KABGTXD, reg); 4684 } 4685 4686 /* Set up the inter-packet gap. */ 4687 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); 4688 4689 if (sc->sc_type >= WM_T_82543) { 4690 /* 4691 * Set up the interrupt throttling register (units of 256ns) 4692 * Note that a footnote in Intel's documentation says this 4693 * ticker runs at 1/4 the rate when the chip is in 100Mbit 4694 * or 10Mbit mode. Empirically, it appears to be the case 4695 * that that is also true for the 1024ns units of the other 4696 * interrupt-related timer registers -- so, really, we ought 4697 * to divide this value by 4 when the link speed is low. 4698 * 4699 * XXX implement this division at link speed change! 4700 */ 4701 4702 /* 4703 * For N interrupts/sec, set this value to: 4704 * 1000000000 / (N * 256). Note that we set the 4705 * absolute and packet timer values to this value 4706 * divided by 4 to get "simple timer" behavior. 4707 */ 4708 4709 sc->sc_itr = 1500; /* 2604 ints/sec */ 4710 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr); 4711 } 4712 4713 /* Set the VLAN ethernetype. */ 4714 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN); 4715 4716 /* 4717 * Set up the transmit control register; we start out with 4718 * a collision distance suitable for FDX, but update it whe 4719 * we resolve the media type. 4720 */ 4721 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC 4722 | TCTL_CT(TX_COLLISION_THRESHOLD) 4723 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 4724 if (sc->sc_type >= WM_T_82571) 4725 sc->sc_tctl |= TCTL_MULR; 4726 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 4727 4728 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 4729 /* 4730 * Write TDT after TCTL.EN is set. 4731 * See the document. 4732 */ 4733 CSR_WRITE(sc, WMREG_TDT, 0); 4734 } 4735 4736 if (sc->sc_type == WM_T_80003) { 4737 reg = CSR_READ(sc, WMREG_TCTL_EXT); 4738 reg &= ~TCTL_EXT_GCEX_MASK; 4739 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX; 4740 CSR_WRITE(sc, WMREG_TCTL_EXT, reg); 4741 } 4742 4743 /* Set the media. */ 4744 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0) 4745 goto out; 4746 4747 /* Configure for OS presence */ 4748 wm_init_manageability(sc); 4749 4750 /* 4751 * Set up the receive control register; we actually program 4752 * the register when we set the receive filter. Use multicast 4753 * address offset type 0. 4754 * 4755 * Only the i82544 has the ability to strip the incoming 4756 * CRC, so we don't enable that feature. 4757 */ 4758 sc->sc_mchash_type = 0; 4759 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF 4760 | RCTL_MO(sc->sc_mchash_type); 4761 4762 /* 4763 * The I350 has a bug where it always strips the CRC whether 4764 * asked to or not. So ask for stripped CRC here and cope in rxeof 4765 */ 4766 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)) 4767 sc->sc_rctl |= RCTL_SECRC; 4768 4769 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0) 4770 && (ifp->if_mtu > ETHERMTU)) { 4771 sc->sc_rctl |= RCTL_LPE; 4772 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 4773 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO); 4774 } 4775 4776 if (MCLBYTES == 2048) { 4777 sc->sc_rctl |= RCTL_2k; 4778 } else { 4779 if (sc->sc_type >= WM_T_82543) { 4780 switch (MCLBYTES) { 4781 case 4096: 4782 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k; 4783 break; 4784 case 8192: 4785 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k; 4786 break; 4787 case 16384: 4788 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k; 4789 break; 4790 default: 4791 panic("wm_init: MCLBYTES %d unsupported", 4792 MCLBYTES); 4793 break; 4794 } 4795 } else panic("wm_init: i82542 requires MCLBYTES = 2048"); 4796 } 4797 4798 /* Set the receive filter. */ 4799 wm_set_filter(sc); 4800 4801 /* Enable ECC */ 4802 switch (sc->sc_type) { 4803 case WM_T_82571: 4804 reg = CSR_READ(sc, WMREG_PBA_ECC); 4805 reg |= PBA_ECC_CORR_EN; 4806 CSR_WRITE(sc, WMREG_PBA_ECC, reg); 4807 break; 4808 case WM_T_PCH_LPT: 4809 reg = CSR_READ(sc, WMREG_PBECCSTS); 4810 reg |= PBECCSTS_UNCORR_ECC_ENABLE; 4811 CSR_WRITE(sc, WMREG_PBECCSTS, reg); 4812 4813 reg = CSR_READ(sc, WMREG_CTRL); 4814 reg |= CTRL_MEHE; 4815 CSR_WRITE(sc, WMREG_CTRL, reg); 4816 break; 4817 default: 4818 break; 4819 } 4820 4821 /* On 575 and later set RDT only if RX enabled */ 4822 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 4823 for (i = 0; i < WM_NRXDESC; i++) 4824 WM_INIT_RXDESC(sc, i); 4825 4826 /* Start the one second link check clock. */ 4827 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); 4828 4829 /* ...all done! */ 4830 ifp->if_flags |= IFF_RUNNING; 4831 ifp->if_flags &= ~IFF_OACTIVE; 4832 4833 out: 4834 sc->sc_if_flags = ifp->if_flags; 4835 if (error) 4836 log(LOG_ERR, "%s: interface not running\n", 4837 device_xname(sc->sc_dev)); 4838 return error; 4839 } 4840 4841 /* 4842 * wm_rxdrain: 4843 * 4844 * Drain the receive queue. 4845 */ 4846 static void 4847 wm_rxdrain(struct wm_softc *sc) 4848 { 4849 struct wm_rxsoft *rxs; 4850 int i; 4851 4852 for (i = 0; i < WM_NRXDESC; i++) { 4853 rxs = &sc->sc_rxsoft[i]; 4854 if (rxs->rxs_mbuf != NULL) { 4855 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 4856 m_freem(rxs->rxs_mbuf); 4857 rxs->rxs_mbuf = NULL; 4858 } 4859 } 4860 } 4861 4862 /* 4863 * wm_stop: [ifnet interface function] 4864 * 4865 * Stop transmission on the interface. 4866 */ 4867 static void 4868 wm_stop(struct ifnet *ifp, int disable) 4869 { 4870 struct wm_softc *sc = ifp->if_softc; 4871 struct wm_txsoft *txs; 4872 int i; 4873 4874 /* Stop the one second clock. */ 4875 callout_stop(&sc->sc_tick_ch); 4876 4877 /* Stop the 82547 Tx FIFO stall check timer. */ 4878 if (sc->sc_type == WM_T_82547) 4879 callout_stop(&sc->sc_txfifo_ch); 4880 4881 if (sc->sc_flags & WM_F_HAS_MII) { 4882 /* Down the MII. */ 4883 mii_down(&sc->sc_mii); 4884 } else { 4885 #if 0 4886 /* Should we clear PHY's status properly? */ 4887 wm_reset(sc); 4888 #endif 4889 } 4890 4891 /* Stop the transmit and receive processes. */ 4892 CSR_WRITE(sc, WMREG_TCTL, 0); 4893 CSR_WRITE(sc, WMREG_RCTL, 0); 4894 sc->sc_rctl &= ~RCTL_EN; 4895 4896 /* 4897 * Clear the interrupt mask to ensure the device cannot assert its 4898 * interrupt line. 4899 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service 4900 * any currently pending or shared interrupt. 4901 */ 4902 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 4903 sc->sc_icr = 0; 4904 4905 /* Release any queued transmit buffers. */ 4906 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 4907 txs = &sc->sc_txsoft[i]; 4908 if (txs->txs_mbuf != NULL) { 4909 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 4910 m_freem(txs->txs_mbuf); 4911 txs->txs_mbuf = NULL; 4912 } 4913 } 4914 4915 /* Mark the interface as down and cancel the watchdog timer. */ 4916 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 4917 ifp->if_timer = 0; 4918 4919 if (disable) 4920 wm_rxdrain(sc); 4921 4922 #if 0 /* notyet */ 4923 if (sc->sc_type >= WM_T_82544) 4924 CSR_WRITE(sc, WMREG_WUC, 0); 4925 #endif 4926 } 4927 4928 void 4929 wm_get_auto_rd_done(struct wm_softc *sc) 4930 { 4931 int i; 4932 4933 /* wait for eeprom to reload */ 4934 switch (sc->sc_type) { 4935 case WM_T_82571: 4936 case WM_T_82572: 4937 case WM_T_82573: 4938 case WM_T_82574: 4939 case WM_T_82583: 4940 case WM_T_82575: 4941 case WM_T_82576: 4942 case WM_T_82580: 4943 case WM_T_82580ER: 4944 case WM_T_I350: 4945 case WM_T_I210: 4946 case WM_T_I211: 4947 case WM_T_80003: 4948 case WM_T_ICH8: 4949 case WM_T_ICH9: 4950 for (i = 0; i < 10; i++) { 4951 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD) 4952 break; 4953 delay(1000); 4954 } 4955 if (i == 10) { 4956 log(LOG_ERR, "%s: auto read from eeprom failed to " 4957 "complete\n", device_xname(sc->sc_dev)); 4958 } 4959 break; 4960 default: 4961 break; 4962 } 4963 } 4964 4965 void 4966 wm_lan_init_done(struct wm_softc *sc) 4967 { 4968 uint32_t reg = 0; 4969 int i; 4970 4971 /* wait for eeprom to reload */ 4972 switch (sc->sc_type) { 4973 case WM_T_ICH10: 4974 case WM_T_PCH: 4975 case WM_T_PCH2: 4976 case WM_T_PCH_LPT: 4977 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) { 4978 reg = CSR_READ(sc, WMREG_STATUS); 4979 if ((reg & STATUS_LAN_INIT_DONE) != 0) 4980 break; 4981 delay(100); 4982 } 4983 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) { 4984 log(LOG_ERR, "%s: %s: lan_init_done failed to " 4985 "complete\n", device_xname(sc->sc_dev), __func__); 4986 } 4987 break; 4988 default: 4989 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev), 4990 __func__); 4991 break; 4992 } 4993 4994 reg &= ~STATUS_LAN_INIT_DONE; 4995 CSR_WRITE(sc, WMREG_STATUS, reg); 4996 } 4997 4998 void 4999 wm_get_cfg_done(struct wm_softc *sc) 5000 { 5001 int mask; 5002 uint32_t reg; 5003 int i; 5004 5005 /* wait for eeprom to reload */ 5006 switch (sc->sc_type) { 5007 case WM_T_82542_2_0: 5008 case WM_T_82542_2_1: 5009 /* null */ 5010 break; 5011 case WM_T_82543: 5012 case WM_T_82544: 5013 case WM_T_82540: 5014 case WM_T_82545: 5015 case WM_T_82545_3: 5016 case WM_T_82546: 5017 case WM_T_82546_3: 5018 case WM_T_82541: 5019 case WM_T_82541_2: 5020 case WM_T_82547: 5021 case WM_T_82547_2: 5022 case WM_T_82573: 5023 case WM_T_82574: 5024 case WM_T_82583: 5025 /* generic */ 5026 delay(10*1000); 5027 break; 5028 case WM_T_80003: 5029 case WM_T_82571: 5030 case WM_T_82572: 5031 case WM_T_82575: 5032 case WM_T_82576: 5033 case WM_T_82580: 5034 case WM_T_82580ER: 5035 case WM_T_I350: 5036 case WM_T_I210: 5037 case WM_T_I211: 5038 if (sc->sc_type == WM_T_82571) { 5039 /* Only 82571 shares port 0 */ 5040 mask = EEMNGCTL_CFGDONE_0; 5041 } else 5042 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid; 5043 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) { 5044 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask) 5045 break; 5046 delay(1000); 5047 } 5048 if (i >= WM_PHY_CFG_TIMEOUT) { 5049 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n", 5050 device_xname(sc->sc_dev), __func__)); 5051 } 5052 break; 5053 case WM_T_ICH8: 5054 case WM_T_ICH9: 5055 case WM_T_ICH10: 5056 case WM_T_PCH: 5057 case WM_T_PCH2: 5058 case WM_T_PCH_LPT: 5059 delay(10*1000); 5060 if (sc->sc_type >= WM_T_ICH10) 5061 wm_lan_init_done(sc); 5062 else 5063 wm_get_auto_rd_done(sc); 5064 5065 reg = CSR_READ(sc, WMREG_STATUS); 5066 if ((reg & STATUS_PHYRA) != 0) 5067 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA); 5068 break; 5069 default: 5070 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev), 5071 __func__); 5072 break; 5073 } 5074 } 5075 5076 /* 5077 * wm_acquire_eeprom: 5078 * 5079 * Perform the EEPROM handshake required on some chips. 5080 */ 5081 static int 5082 wm_acquire_eeprom(struct wm_softc *sc) 5083 { 5084 uint32_t reg; 5085 int x; 5086 int ret = 0; 5087 5088 /* always success */ 5089 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0) 5090 return 0; 5091 5092 if (sc->sc_flags & WM_F_SWFWHW_SYNC) { 5093 ret = wm_get_swfwhw_semaphore(sc); 5094 } else if (sc->sc_flags & WM_F_SWFW_SYNC) { 5095 /* this will also do wm_get_swsm_semaphore() if needed */ 5096 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM); 5097 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) { 5098 ret = wm_get_swsm_semaphore(sc); 5099 } 5100 5101 if (ret) { 5102 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 5103 __func__); 5104 return 1; 5105 } 5106 5107 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) { 5108 reg = CSR_READ(sc, WMREG_EECD); 5109 5110 /* Request EEPROM access. */ 5111 reg |= EECD_EE_REQ; 5112 CSR_WRITE(sc, WMREG_EECD, reg); 5113 5114 /* ..and wait for it to be granted. */ 5115 for (x = 0; x < 1000; x++) { 5116 reg = CSR_READ(sc, WMREG_EECD); 5117 if (reg & EECD_EE_GNT) 5118 break; 5119 delay(5); 5120 } 5121 if ((reg & EECD_EE_GNT) == 0) { 5122 aprint_error_dev(sc->sc_dev, 5123 "could not acquire EEPROM GNT\n"); 5124 reg &= ~EECD_EE_REQ; 5125 CSR_WRITE(sc, WMREG_EECD, reg); 5126 if (sc->sc_flags & WM_F_SWFWHW_SYNC) 5127 wm_put_swfwhw_semaphore(sc); 5128 if (sc->sc_flags & WM_F_SWFW_SYNC) 5129 wm_put_swfw_semaphore(sc, SWFW_EEP_SM); 5130 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 5131 wm_put_swsm_semaphore(sc); 5132 return 1; 5133 } 5134 } 5135 5136 return 0; 5137 } 5138 5139 /* 5140 * wm_release_eeprom: 5141 * 5142 * Release the EEPROM mutex. 5143 */ 5144 static void 5145 wm_release_eeprom(struct wm_softc *sc) 5146 { 5147 uint32_t reg; 5148 5149 /* always success */ 5150 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0) 5151 return; 5152 5153 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) { 5154 reg = CSR_READ(sc, WMREG_EECD); 5155 reg &= ~EECD_EE_REQ; 5156 CSR_WRITE(sc, WMREG_EECD, reg); 5157 } 5158 5159 if (sc->sc_flags & WM_F_SWFWHW_SYNC) 5160 wm_put_swfwhw_semaphore(sc); 5161 if (sc->sc_flags & WM_F_SWFW_SYNC) 5162 wm_put_swfw_semaphore(sc, SWFW_EEP_SM); 5163 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 5164 wm_put_swsm_semaphore(sc); 5165 } 5166 5167 /* 5168 * wm_eeprom_sendbits: 5169 * 5170 * Send a series of bits to the EEPROM. 5171 */ 5172 static void 5173 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits) 5174 { 5175 uint32_t reg; 5176 int x; 5177 5178 reg = CSR_READ(sc, WMREG_EECD); 5179 5180 for (x = nbits; x > 0; x--) { 5181 if (bits & (1U << (x - 1))) 5182 reg |= EECD_DI; 5183 else 5184 reg &= ~EECD_DI; 5185 CSR_WRITE(sc, WMREG_EECD, reg); 5186 delay(2); 5187 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK); 5188 delay(2); 5189 CSR_WRITE(sc, WMREG_EECD, reg); 5190 delay(2); 5191 } 5192 } 5193 5194 /* 5195 * wm_eeprom_recvbits: 5196 * 5197 * Receive a series of bits from the EEPROM. 5198 */ 5199 static void 5200 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits) 5201 { 5202 uint32_t reg, val; 5203 int x; 5204 5205 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI; 5206 5207 val = 0; 5208 for (x = nbits; x > 0; x--) { 5209 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK); 5210 delay(2); 5211 if (CSR_READ(sc, WMREG_EECD) & EECD_DO) 5212 val |= (1U << (x - 1)); 5213 CSR_WRITE(sc, WMREG_EECD, reg); 5214 delay(2); 5215 } 5216 *valp = val; 5217 } 5218 5219 /* 5220 * wm_read_eeprom_uwire: 5221 * 5222 * Read a word from the EEPROM using the MicroWire protocol. 5223 */ 5224 static int 5225 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 5226 { 5227 uint32_t reg, val; 5228 int i; 5229 5230 for (i = 0; i < wordcnt; i++) { 5231 /* Clear SK and DI. */ 5232 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI); 5233 CSR_WRITE(sc, WMREG_EECD, reg); 5234 5235 /* 5236 * XXX: workaround for a bug in qemu-0.12.x and prior 5237 * and Xen. 5238 * 5239 * We use this workaround only for 82540 because qemu's 5240 * e1000 act as 82540. 5241 */ 5242 if (sc->sc_type == WM_T_82540) { 5243 reg |= EECD_SK; 5244 CSR_WRITE(sc, WMREG_EECD, reg); 5245 reg &= ~EECD_SK; 5246 CSR_WRITE(sc, WMREG_EECD, reg); 5247 delay(2); 5248 } 5249 /* XXX: end of workaround */ 5250 5251 /* Set CHIP SELECT. */ 5252 reg |= EECD_CS; 5253 CSR_WRITE(sc, WMREG_EECD, reg); 5254 delay(2); 5255 5256 /* Shift in the READ command. */ 5257 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3); 5258 5259 /* Shift in address. */ 5260 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits); 5261 5262 /* Shift out the data. */ 5263 wm_eeprom_recvbits(sc, &val, 16); 5264 data[i] = val & 0xffff; 5265 5266 /* Clear CHIP SELECT. */ 5267 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS; 5268 CSR_WRITE(sc, WMREG_EECD, reg); 5269 delay(2); 5270 } 5271 5272 return 0; 5273 } 5274 5275 /* 5276 * wm_spi_eeprom_ready: 5277 * 5278 * Wait for a SPI EEPROM to be ready for commands. 5279 */ 5280 static int 5281 wm_spi_eeprom_ready(struct wm_softc *sc) 5282 { 5283 uint32_t val; 5284 int usec; 5285 5286 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) { 5287 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8); 5288 wm_eeprom_recvbits(sc, &val, 8); 5289 if ((val & SPI_SR_RDY) == 0) 5290 break; 5291 } 5292 if (usec >= SPI_MAX_RETRIES) { 5293 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n"); 5294 return 1; 5295 } 5296 return 0; 5297 } 5298 5299 /* 5300 * wm_read_eeprom_spi: 5301 * 5302 * Read a work from the EEPROM using the SPI protocol. 5303 */ 5304 static int 5305 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 5306 { 5307 uint32_t reg, val; 5308 int i; 5309 uint8_t opc; 5310 5311 /* Clear SK and CS. */ 5312 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS); 5313 CSR_WRITE(sc, WMREG_EECD, reg); 5314 delay(2); 5315 5316 if (wm_spi_eeprom_ready(sc)) 5317 return 1; 5318 5319 /* Toggle CS to flush commands. */ 5320 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS); 5321 delay(2); 5322 CSR_WRITE(sc, WMREG_EECD, reg); 5323 delay(2); 5324 5325 opc = SPI_OPC_READ; 5326 if (sc->sc_ee_addrbits == 8 && word >= 128) 5327 opc |= SPI_OPC_A8; 5328 5329 wm_eeprom_sendbits(sc, opc, 8); 5330 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits); 5331 5332 for (i = 0; i < wordcnt; i++) { 5333 wm_eeprom_recvbits(sc, &val, 16); 5334 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8); 5335 } 5336 5337 /* Raise CS and clear SK. */ 5338 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS; 5339 CSR_WRITE(sc, WMREG_EECD, reg); 5340 delay(2); 5341 5342 return 0; 5343 } 5344 5345 #define NVM_CHECKSUM 0xBABA 5346 #define EEPROM_SIZE 0x0040 5347 #define NVM_COMPAT 0x0003 5348 #define NVM_COMPAT_VALID_CHECKSUM 0x0001 5349 #define NVM_FUTURE_INIT_WORD1 0x0019 5350 #define NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM 0x0040 5351 5352 /* 5353 * wm_validate_eeprom_checksum 5354 * 5355 * The checksum is defined as the sum of the first 64 (16 bit) words. 5356 */ 5357 static int 5358 wm_validate_eeprom_checksum(struct wm_softc *sc) 5359 { 5360 uint16_t checksum, valid_checksum; 5361 uint16_t eeprom_data; 5362 uint16_t csum_wordaddr; 5363 int i; 5364 5365 checksum = 0; 5366 5367 /* Don't check for I211 */ 5368 if (sc->sc_type == WM_T_I211) 5369 return 0; 5370 5371 if (sc->sc_type == WM_T_PCH_LPT) { 5372 csum_wordaddr = NVM_COMPAT; 5373 valid_checksum = NVM_COMPAT_VALID_CHECKSUM; 5374 } else { 5375 csum_wordaddr = NVM_FUTURE_INIT_WORD1; 5376 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM; 5377 } 5378 5379 #ifdef WM_DEBUG 5380 /* Dump EEPROM image for debug */ 5381 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 5382 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 5383 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) { 5384 wm_read_eeprom(sc, csum_wordaddr, 1, &eeprom_data); 5385 if ((eeprom_data & valid_checksum) == 0) { 5386 DPRINTF(WM_DEBUG_NVM, 5387 ("%s: NVM need to be updated (%04x != %04x)\n", 5388 device_xname(sc->sc_dev), eeprom_data, 5389 valid_checksum)); 5390 } 5391 } 5392 5393 if ((wm_debug & WM_DEBUG_NVM) != 0) { 5394 printf("%s: NVM dump:\n", device_xname(sc->sc_dev)); 5395 for (i = 0; i < EEPROM_SIZE; i++) { 5396 if (wm_read_eeprom(sc, i, 1, &eeprom_data)) 5397 printf("XX "); 5398 else 5399 printf("%04x ", eeprom_data); 5400 if (i % 8 == 7) 5401 printf("\n"); 5402 } 5403 } 5404 5405 #endif /* WM_DEBUG */ 5406 5407 for (i = 0; i < EEPROM_SIZE; i++) { 5408 if (wm_read_eeprom(sc, i, 1, &eeprom_data)) 5409 return 1; 5410 checksum += eeprom_data; 5411 } 5412 5413 if (checksum != (uint16_t) NVM_CHECKSUM) { 5414 #ifdef WM_DEBUG 5415 printf("%s: NVM checksum mismatch (%04x != %04x)\n", 5416 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM); 5417 #endif 5418 } 5419 5420 return 0; 5421 } 5422 5423 /* 5424 * wm_read_eeprom: 5425 * 5426 * Read data from the serial EEPROM. 5427 */ 5428 static int 5429 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 5430 { 5431 int rv; 5432 5433 if (sc->sc_flags & WM_F_EEPROM_INVALID) 5434 return 1; 5435 5436 if (wm_acquire_eeprom(sc)) 5437 return 1; 5438 5439 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 5440 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 5441 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) 5442 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data); 5443 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR) 5444 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data); 5445 else if (sc->sc_flags & WM_F_EEPROM_SPI) 5446 rv = wm_read_eeprom_spi(sc, word, wordcnt, data); 5447 else 5448 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data); 5449 5450 wm_release_eeprom(sc); 5451 return rv; 5452 } 5453 5454 static int 5455 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt, 5456 uint16_t *data) 5457 { 5458 int i, eerd = 0; 5459 int error = 0; 5460 5461 for (i = 0; i < wordcnt; i++) { 5462 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START; 5463 5464 CSR_WRITE(sc, WMREG_EERD, eerd); 5465 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD); 5466 if (error != 0) 5467 break; 5468 5469 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT); 5470 } 5471 5472 return error; 5473 } 5474 5475 static int 5476 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw) 5477 { 5478 uint32_t attempts = 100000; 5479 uint32_t i, reg = 0; 5480 int32_t done = -1; 5481 5482 for (i = 0; i < attempts; i++) { 5483 reg = CSR_READ(sc, rw); 5484 5485 if (reg & EERD_DONE) { 5486 done = 0; 5487 break; 5488 } 5489 delay(5); 5490 } 5491 5492 return done; 5493 } 5494 5495 static int 5496 wm_check_alt_mac_addr(struct wm_softc *sc) 5497 { 5498 uint16_t myea[ETHER_ADDR_LEN / 2]; 5499 uint16_t offset = EEPROM_OFF_MACADDR; 5500 5501 /* Try to read alternative MAC address pointer */ 5502 if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0) 5503 return -1; 5504 5505 /* Check pointer */ 5506 if (offset == 0xffff) 5507 return -1; 5508 5509 /* 5510 * Check whether alternative MAC address is valid or not. 5511 * Some cards have non 0xffff pointer but those don't use 5512 * alternative MAC address in reality. 5513 * 5514 * Check whether the broadcast bit is set or not. 5515 */ 5516 if (wm_read_eeprom(sc, offset, 1, myea) == 0) 5517 if (((myea[0] & 0xff) & 0x01) == 0) 5518 return 0; /* found! */ 5519 5520 /* not found */ 5521 return -1; 5522 } 5523 5524 static int 5525 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr) 5526 { 5527 uint16_t myea[ETHER_ADDR_LEN / 2]; 5528 uint16_t offset = EEPROM_OFF_MACADDR; 5529 int do_invert = 0; 5530 5531 switch (sc->sc_type) { 5532 case WM_T_82580: 5533 case WM_T_82580ER: 5534 case WM_T_I350: 5535 switch (sc->sc_funcid) { 5536 case 0: 5537 /* default value (== EEPROM_OFF_MACADDR) */ 5538 break; 5539 case 1: 5540 offset = EEPROM_OFF_LAN1; 5541 break; 5542 case 2: 5543 offset = EEPROM_OFF_LAN2; 5544 break; 5545 case 3: 5546 offset = EEPROM_OFF_LAN3; 5547 break; 5548 default: 5549 goto bad; 5550 /* NOTREACHED */ 5551 break; 5552 } 5553 break; 5554 case WM_T_82571: 5555 case WM_T_82575: 5556 case WM_T_82576: 5557 case WM_T_80003: 5558 case WM_T_I210: 5559 case WM_T_I211: 5560 if (wm_check_alt_mac_addr(sc) != 0) { 5561 /* reset the offset to LAN0 */ 5562 offset = EEPROM_OFF_MACADDR; 5563 if ((sc->sc_funcid & 0x01) == 1) 5564 do_invert = 1; 5565 goto do_read; 5566 } 5567 switch (sc->sc_funcid) { 5568 case 0: 5569 /* 5570 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR 5571 * itself. 5572 */ 5573 break; 5574 case 1: 5575 offset += EEPROM_OFF_MACADDR_LAN1; 5576 break; 5577 case 2: 5578 offset += EEPROM_OFF_MACADDR_LAN2; 5579 break; 5580 case 3: 5581 offset += EEPROM_OFF_MACADDR_LAN3; 5582 break; 5583 default: 5584 goto bad; 5585 /* NOTREACHED */ 5586 break; 5587 } 5588 break; 5589 default: 5590 if ((sc->sc_funcid & 0x01) == 1) 5591 do_invert = 1; 5592 break; 5593 } 5594 5595 do_read: 5596 if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]), 5597 myea) != 0) { 5598 goto bad; 5599 } 5600 5601 enaddr[0] = myea[0] & 0xff; 5602 enaddr[1] = myea[0] >> 8; 5603 enaddr[2] = myea[1] & 0xff; 5604 enaddr[3] = myea[1] >> 8; 5605 enaddr[4] = myea[2] & 0xff; 5606 enaddr[5] = myea[2] >> 8; 5607 5608 /* 5609 * Toggle the LSB of the MAC address on the second port 5610 * of some dual port cards. 5611 */ 5612 if (do_invert != 0) 5613 enaddr[5] ^= 1; 5614 5615 return 0; 5616 5617 bad: 5618 aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n"); 5619 5620 return -1; 5621 } 5622 5623 /* 5624 * wm_add_rxbuf: 5625 * 5626 * Add a receive buffer to the indiciated descriptor. 5627 */ 5628 static int 5629 wm_add_rxbuf(struct wm_softc *sc, int idx) 5630 { 5631 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx]; 5632 struct mbuf *m; 5633 int error; 5634 5635 MGETHDR(m, M_DONTWAIT, MT_DATA); 5636 if (m == NULL) 5637 return ENOBUFS; 5638 5639 MCLGET(m, M_DONTWAIT); 5640 if ((m->m_flags & M_EXT) == 0) { 5641 m_freem(m); 5642 return ENOBUFS; 5643 } 5644 5645 if (rxs->rxs_mbuf != NULL) 5646 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 5647 5648 rxs->rxs_mbuf = m; 5649 5650 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 5651 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m, 5652 BUS_DMA_READ|BUS_DMA_NOWAIT); 5653 if (error) { 5654 /* XXX XXX XXX */ 5655 aprint_error_dev(sc->sc_dev, 5656 "unable to load rx DMA map %d, error = %d\n", 5657 idx, error); 5658 panic("wm_add_rxbuf"); 5659 } 5660 5661 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 5662 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 5663 5664 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 5665 if ((sc->sc_rctl & RCTL_EN) != 0) 5666 WM_INIT_RXDESC(sc, idx); 5667 } else 5668 WM_INIT_RXDESC(sc, idx); 5669 5670 return 0; 5671 } 5672 5673 /* 5674 * wm_set_ral: 5675 * 5676 * Set an entery in the receive address list. 5677 */ 5678 static void 5679 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx) 5680 { 5681 uint32_t ral_lo, ral_hi; 5682 5683 if (enaddr != NULL) { 5684 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) | 5685 (enaddr[3] << 24); 5686 ral_hi = enaddr[4] | (enaddr[5] << 8); 5687 ral_hi |= RAL_AV; 5688 } else { 5689 ral_lo = 0; 5690 ral_hi = 0; 5691 } 5692 5693 if (sc->sc_type >= WM_T_82544) { 5694 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx), 5695 ral_lo); 5696 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx), 5697 ral_hi); 5698 } else { 5699 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo); 5700 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi); 5701 } 5702 } 5703 5704 /* 5705 * wm_mchash: 5706 * 5707 * Compute the hash of the multicast address for the 4096-bit 5708 * multicast filter. 5709 */ 5710 static uint32_t 5711 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr) 5712 { 5713 static const int lo_shift[4] = { 4, 3, 2, 0 }; 5714 static const int hi_shift[4] = { 4, 5, 6, 8 }; 5715 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 }; 5716 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 }; 5717 uint32_t hash; 5718 5719 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 5720 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 5721 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) { 5722 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) | 5723 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]); 5724 return (hash & 0x3ff); 5725 } 5726 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) | 5727 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]); 5728 5729 return (hash & 0xfff); 5730 } 5731 5732 /* 5733 * wm_set_filter: 5734 * 5735 * Set up the receive filter. 5736 */ 5737 static void 5738 wm_set_filter(struct wm_softc *sc) 5739 { 5740 struct ethercom *ec = &sc->sc_ethercom; 5741 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 5742 struct ether_multi *enm; 5743 struct ether_multistep step; 5744 bus_addr_t mta_reg; 5745 uint32_t hash, reg, bit; 5746 int i, size; 5747 5748 if (sc->sc_type >= WM_T_82544) 5749 mta_reg = WMREG_CORDOVA_MTA; 5750 else 5751 mta_reg = WMREG_MTA; 5752 5753 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE); 5754 5755 if (ifp->if_flags & IFF_BROADCAST) 5756 sc->sc_rctl |= RCTL_BAM; 5757 if (ifp->if_flags & IFF_PROMISC) { 5758 sc->sc_rctl |= RCTL_UPE; 5759 goto allmulti; 5760 } 5761 5762 /* 5763 * Set the station address in the first RAL slot, and 5764 * clear the remaining slots. 5765 */ 5766 if (sc->sc_type == WM_T_ICH8) 5767 size = WM_RAL_TABSIZE_ICH8 -1; 5768 else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10) 5769 || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2) 5770 || (sc->sc_type == WM_T_PCH_LPT)) 5771 size = WM_RAL_TABSIZE_ICH8; 5772 else if (sc->sc_type == WM_T_82575) 5773 size = WM_RAL_TABSIZE_82575; 5774 else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580)) 5775 size = WM_RAL_TABSIZE_82576; 5776 else if (sc->sc_type == WM_T_I350) 5777 size = WM_RAL_TABSIZE_I350; 5778 else 5779 size = WM_RAL_TABSIZE; 5780 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0); 5781 for (i = 1; i < size; i++) 5782 wm_set_ral(sc, NULL, i); 5783 5784 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 5785 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 5786 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) 5787 size = WM_ICH8_MC_TABSIZE; 5788 else 5789 size = WM_MC_TABSIZE; 5790 /* Clear out the multicast table. */ 5791 for (i = 0; i < size; i++) 5792 CSR_WRITE(sc, mta_reg + (i << 2), 0); 5793 5794 ETHER_FIRST_MULTI(step, ec, enm); 5795 while (enm != NULL) { 5796 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 5797 /* 5798 * We must listen to a range of multicast addresses. 5799 * For now, just accept all multicasts, rather than 5800 * trying to set only those filter bits needed to match 5801 * the range. (At this time, the only use of address 5802 * ranges is for IP multicast routing, for which the 5803 * range is big enough to require all bits set.) 5804 */ 5805 goto allmulti; 5806 } 5807 5808 hash = wm_mchash(sc, enm->enm_addrlo); 5809 5810 reg = (hash >> 5); 5811 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 5812 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 5813 || (sc->sc_type == WM_T_PCH2) 5814 || (sc->sc_type == WM_T_PCH_LPT)) 5815 reg &= 0x1f; 5816 else 5817 reg &= 0x7f; 5818 bit = hash & 0x1f; 5819 5820 hash = CSR_READ(sc, mta_reg + (reg << 2)); 5821 hash |= 1U << bit; 5822 5823 /* XXX Hardware bug?? */ 5824 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) { 5825 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2)); 5826 CSR_WRITE(sc, mta_reg + (reg << 2), hash); 5827 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit); 5828 } else 5829 CSR_WRITE(sc, mta_reg + (reg << 2), hash); 5830 5831 ETHER_NEXT_MULTI(step, enm); 5832 } 5833 5834 ifp->if_flags &= ~IFF_ALLMULTI; 5835 goto setit; 5836 5837 allmulti: 5838 ifp->if_flags |= IFF_ALLMULTI; 5839 sc->sc_rctl |= RCTL_MPE; 5840 5841 setit: 5842 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl); 5843 } 5844 5845 /* 5846 * wm_tbi_mediainit: 5847 * 5848 * Initialize media for use on 1000BASE-X devices. 5849 */ 5850 static void 5851 wm_tbi_mediainit(struct wm_softc *sc) 5852 { 5853 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 5854 const char *sep = ""; 5855 5856 if (sc->sc_type < WM_T_82543) 5857 sc->sc_tipg = TIPG_WM_DFLT; 5858 else 5859 sc->sc_tipg = TIPG_LG_DFLT; 5860 5861 sc->sc_tbi_anegticks = 5; 5862 5863 /* Initialize our media structures */ 5864 sc->sc_mii.mii_ifp = ifp; 5865 5866 sc->sc_ethercom.ec_mii = &sc->sc_mii; 5867 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange, 5868 wm_tbi_mediastatus); 5869 5870 /* 5871 * SWD Pins: 5872 * 5873 * 0 = Link LED (output) 5874 * 1 = Loss Of Signal (input) 5875 */ 5876 sc->sc_ctrl |= CTRL_SWDPIO(0); 5877 sc->sc_ctrl &= ~CTRL_SWDPIO(1); 5878 5879 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 5880 5881 #define ADD(ss, mm, dd) \ 5882 do { \ 5883 aprint_normal("%s%s", sep, ss); \ 5884 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \ 5885 sep = ", "; \ 5886 } while (/*CONSTCOND*/0) 5887 5888 aprint_normal_dev(sc->sc_dev, ""); 5889 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD); 5890 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD); 5891 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD); 5892 aprint_normal("\n"); 5893 5894 #undef ADD 5895 5896 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 5897 } 5898 5899 /* 5900 * wm_tbi_mediastatus: [ifmedia interface function] 5901 * 5902 * Get the current interface media status on a 1000BASE-X device. 5903 */ 5904 static void 5905 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 5906 { 5907 struct wm_softc *sc = ifp->if_softc; 5908 uint32_t ctrl, status; 5909 5910 ifmr->ifm_status = IFM_AVALID; 5911 ifmr->ifm_active = IFM_ETHER; 5912 5913 status = CSR_READ(sc, WMREG_STATUS); 5914 if ((status & STATUS_LU) == 0) { 5915 ifmr->ifm_active |= IFM_NONE; 5916 return; 5917 } 5918 5919 ifmr->ifm_status |= IFM_ACTIVE; 5920 ifmr->ifm_active |= IFM_1000_SX; 5921 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD) 5922 ifmr->ifm_active |= IFM_FDX; 5923 ctrl = CSR_READ(sc, WMREG_CTRL); 5924 if (ctrl & CTRL_RFCE) 5925 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE; 5926 if (ctrl & CTRL_TFCE) 5927 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE; 5928 } 5929 5930 /* 5931 * wm_tbi_mediachange: [ifmedia interface function] 5932 * 5933 * Set hardware to newly-selected media on a 1000BASE-X device. 5934 */ 5935 static int 5936 wm_tbi_mediachange(struct ifnet *ifp) 5937 { 5938 struct wm_softc *sc = ifp->if_softc; 5939 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 5940 uint32_t status; 5941 int i; 5942 5943 sc->sc_txcw = 0; 5944 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO || 5945 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0) 5946 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE; 5947 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 5948 sc->sc_txcw |= TXCW_ANE; 5949 } else { 5950 /* 5951 * If autonegotiation is turned off, force link up and turn on 5952 * full duplex 5953 */ 5954 sc->sc_txcw &= ~TXCW_ANE; 5955 sc->sc_ctrl |= CTRL_SLU | CTRL_FD; 5956 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE); 5957 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 5958 delay(1000); 5959 } 5960 5961 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n", 5962 device_xname(sc->sc_dev),sc->sc_txcw)); 5963 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); 5964 delay(10000); 5965 5966 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1); 5967 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i)); 5968 5969 /* 5970 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the 5971 * optics detect a signal, 0 if they don't. 5972 */ 5973 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) { 5974 /* Have signal; wait for the link to come up. */ 5975 5976 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 5977 /* 5978 * Reset the link, and let autonegotiation do its thing 5979 */ 5980 sc->sc_ctrl |= CTRL_LRST; 5981 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 5982 delay(1000); 5983 sc->sc_ctrl &= ~CTRL_LRST; 5984 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 5985 delay(1000); 5986 } 5987 5988 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) { 5989 delay(10000); 5990 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU) 5991 break; 5992 } 5993 5994 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n", 5995 device_xname(sc->sc_dev),i)); 5996 5997 status = CSR_READ(sc, WMREG_STATUS); 5998 DPRINTF(WM_DEBUG_LINK, 5999 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n", 6000 device_xname(sc->sc_dev),status, STATUS_LU)); 6001 if (status & STATUS_LU) { 6002 /* Link is up. */ 6003 DPRINTF(WM_DEBUG_LINK, 6004 ("%s: LINK: set media -> link up %s\n", 6005 device_xname(sc->sc_dev), 6006 (status & STATUS_FD) ? "FDX" : "HDX")); 6007 6008 /* 6009 * NOTE: CTRL will update TFCE and RFCE automatically, 6010 * so we should update sc->sc_ctrl 6011 */ 6012 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); 6013 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 6014 sc->sc_fcrtl &= ~FCRTL_XONE; 6015 if (status & STATUS_FD) 6016 sc->sc_tctl |= 6017 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 6018 else 6019 sc->sc_tctl |= 6020 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 6021 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE) 6022 sc->sc_fcrtl |= FCRTL_XONE; 6023 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 6024 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 6025 WMREG_OLD_FCRTL : WMREG_FCRTL, 6026 sc->sc_fcrtl); 6027 sc->sc_tbi_linkup = 1; 6028 } else { 6029 if (i == WM_LINKUP_TIMEOUT) 6030 wm_check_for_link(sc); 6031 /* Link is down. */ 6032 DPRINTF(WM_DEBUG_LINK, 6033 ("%s: LINK: set media -> link down\n", 6034 device_xname(sc->sc_dev))); 6035 sc->sc_tbi_linkup = 0; 6036 } 6037 } else { 6038 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n", 6039 device_xname(sc->sc_dev))); 6040 sc->sc_tbi_linkup = 0; 6041 } 6042 6043 wm_tbi_set_linkled(sc); 6044 6045 return 0; 6046 } 6047 6048 /* 6049 * wm_tbi_set_linkled: 6050 * 6051 * Update the link LED on 1000BASE-X devices. 6052 */ 6053 static void 6054 wm_tbi_set_linkled(struct wm_softc *sc) 6055 { 6056 6057 if (sc->sc_tbi_linkup) 6058 sc->sc_ctrl |= CTRL_SWDPIN(0); 6059 else 6060 sc->sc_ctrl &= ~CTRL_SWDPIN(0); 6061 6062 /* 82540 or newer devices are active low */ 6063 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0; 6064 6065 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6066 } 6067 6068 /* 6069 * wm_tbi_check_link: 6070 * 6071 * Check the link on 1000BASE-X devices. 6072 */ 6073 static void 6074 wm_tbi_check_link(struct wm_softc *sc) 6075 { 6076 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 6077 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 6078 uint32_t rxcw, ctrl, status; 6079 6080 status = CSR_READ(sc, WMREG_STATUS); 6081 6082 rxcw = CSR_READ(sc, WMREG_RXCW); 6083 ctrl = CSR_READ(sc, WMREG_CTRL); 6084 6085 /* set link status */ 6086 if ((status & STATUS_LU) == 0) { 6087 DPRINTF(WM_DEBUG_LINK, 6088 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev))); 6089 sc->sc_tbi_linkup = 0; 6090 } else if (sc->sc_tbi_linkup == 0) { 6091 DPRINTF(WM_DEBUG_LINK, 6092 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev), 6093 (status & STATUS_FD) ? "FDX" : "HDX")); 6094 sc->sc_tbi_linkup = 1; 6095 } 6096 6097 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) 6098 && ((status & STATUS_LU) == 0)) { 6099 sc->sc_tbi_linkup = 0; 6100 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) { 6101 /* RXCFG storm! */ 6102 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n", 6103 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg)); 6104 wm_init(ifp); 6105 ifp->if_start(ifp); 6106 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 6107 /* If the timer expired, retry autonegotiation */ 6108 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) { 6109 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n")); 6110 sc->sc_tbi_ticks = 0; 6111 /* 6112 * Reset the link, and let autonegotiation do 6113 * its thing 6114 */ 6115 sc->sc_ctrl |= CTRL_LRST; 6116 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6117 delay(1000); 6118 sc->sc_ctrl &= ~CTRL_LRST; 6119 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6120 delay(1000); 6121 CSR_WRITE(sc, WMREG_TXCW, 6122 sc->sc_txcw & ~TXCW_ANE); 6123 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); 6124 } 6125 } 6126 } 6127 6128 wm_tbi_set_linkled(sc); 6129 } 6130 6131 /* 6132 * wm_gmii_reset: 6133 * 6134 * Reset the PHY. 6135 */ 6136 static void 6137 wm_gmii_reset(struct wm_softc *sc) 6138 { 6139 uint32_t reg; 6140 int rv; 6141 6142 /* get phy semaphore */ 6143 switch (sc->sc_type) { 6144 case WM_T_82571: 6145 case WM_T_82572: 6146 case WM_T_82573: 6147 case WM_T_82574: 6148 case WM_T_82583: 6149 /* XXX should get sw semaphore, too */ 6150 rv = wm_get_swsm_semaphore(sc); 6151 break; 6152 case WM_T_82575: 6153 case WM_T_82576: 6154 case WM_T_82580: 6155 case WM_T_82580ER: 6156 case WM_T_I350: 6157 case WM_T_I210: 6158 case WM_T_I211: 6159 case WM_T_80003: 6160 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]); 6161 break; 6162 case WM_T_ICH8: 6163 case WM_T_ICH9: 6164 case WM_T_ICH10: 6165 case WM_T_PCH: 6166 case WM_T_PCH2: 6167 case WM_T_PCH_LPT: 6168 rv = wm_get_swfwhw_semaphore(sc); 6169 break; 6170 default: 6171 /* nothing to do*/ 6172 rv = 0; 6173 break; 6174 } 6175 if (rv != 0) { 6176 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 6177 __func__); 6178 return; 6179 } 6180 6181 switch (sc->sc_type) { 6182 case WM_T_82542_2_0: 6183 case WM_T_82542_2_1: 6184 /* null */ 6185 break; 6186 case WM_T_82543: 6187 /* 6188 * With 82543, we need to force speed and duplex on the MAC 6189 * equal to what the PHY speed and duplex configuration is. 6190 * In addition, we need to perform a hardware reset on the PHY 6191 * to take it out of reset. 6192 */ 6193 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; 6194 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6195 6196 /* The PHY reset pin is active-low. */ 6197 reg = CSR_READ(sc, WMREG_CTRL_EXT); 6198 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) | 6199 CTRL_EXT_SWDPIN(4)); 6200 reg |= CTRL_EXT_SWDPIO(4); 6201 6202 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 6203 delay(10*1000); 6204 6205 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4)); 6206 delay(150); 6207 #if 0 6208 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4); 6209 #endif 6210 delay(20*1000); /* XXX extra delay to get PHY ID? */ 6211 break; 6212 case WM_T_82544: /* reset 10000us */ 6213 case WM_T_82540: 6214 case WM_T_82545: 6215 case WM_T_82545_3: 6216 case WM_T_82546: 6217 case WM_T_82546_3: 6218 case WM_T_82541: 6219 case WM_T_82541_2: 6220 case WM_T_82547: 6221 case WM_T_82547_2: 6222 case WM_T_82571: /* reset 100us */ 6223 case WM_T_82572: 6224 case WM_T_82573: 6225 case WM_T_82574: 6226 case WM_T_82575: 6227 case WM_T_82576: 6228 case WM_T_82580: 6229 case WM_T_82580ER: 6230 case WM_T_I350: 6231 case WM_T_I210: 6232 case WM_T_I211: 6233 case WM_T_82583: 6234 case WM_T_80003: 6235 /* generic reset */ 6236 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 6237 delay(20000); 6238 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6239 delay(20000); 6240 6241 if ((sc->sc_type == WM_T_82541) 6242 || (sc->sc_type == WM_T_82541_2) 6243 || (sc->sc_type == WM_T_82547) 6244 || (sc->sc_type == WM_T_82547_2)) { 6245 /* workaround for igp are done in igp_reset() */ 6246 /* XXX add code to set LED after phy reset */ 6247 } 6248 break; 6249 case WM_T_ICH8: 6250 case WM_T_ICH9: 6251 case WM_T_ICH10: 6252 case WM_T_PCH: 6253 case WM_T_PCH2: 6254 case WM_T_PCH_LPT: 6255 /* generic reset */ 6256 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 6257 delay(100); 6258 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6259 delay(150); 6260 break; 6261 default: 6262 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev), 6263 __func__); 6264 break; 6265 } 6266 6267 /* release PHY semaphore */ 6268 switch (sc->sc_type) { 6269 case WM_T_82571: 6270 case WM_T_82572: 6271 case WM_T_82573: 6272 case WM_T_82574: 6273 case WM_T_82583: 6274 /* XXX should put sw semaphore, too */ 6275 wm_put_swsm_semaphore(sc); 6276 break; 6277 case WM_T_82575: 6278 case WM_T_82576: 6279 case WM_T_82580: 6280 case WM_T_82580ER: 6281 case WM_T_I350: 6282 case WM_T_I210: 6283 case WM_T_I211: 6284 case WM_T_80003: 6285 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]); 6286 break; 6287 case WM_T_ICH8: 6288 case WM_T_ICH9: 6289 case WM_T_ICH10: 6290 case WM_T_PCH: 6291 case WM_T_PCH2: 6292 case WM_T_PCH_LPT: 6293 wm_put_swfwhw_semaphore(sc); 6294 break; 6295 default: 6296 /* nothing to do*/ 6297 rv = 0; 6298 break; 6299 } 6300 6301 /* get_cfg_done */ 6302 wm_get_cfg_done(sc); 6303 6304 /* extra setup */ 6305 switch (sc->sc_type) { 6306 case WM_T_82542_2_0: 6307 case WM_T_82542_2_1: 6308 case WM_T_82543: 6309 case WM_T_82544: 6310 case WM_T_82540: 6311 case WM_T_82545: 6312 case WM_T_82545_3: 6313 case WM_T_82546: 6314 case WM_T_82546_3: 6315 case WM_T_82541_2: 6316 case WM_T_82547_2: 6317 case WM_T_82571: 6318 case WM_T_82572: 6319 case WM_T_82573: 6320 case WM_T_82574: 6321 case WM_T_82575: 6322 case WM_T_82576: 6323 case WM_T_82580: 6324 case WM_T_82580ER: 6325 case WM_T_I350: 6326 case WM_T_I210: 6327 case WM_T_I211: 6328 case WM_T_82583: 6329 case WM_T_80003: 6330 /* null */ 6331 break; 6332 case WM_T_82541: 6333 case WM_T_82547: 6334 /* XXX Configure actively LED after PHY reset */ 6335 break; 6336 case WM_T_ICH8: 6337 case WM_T_ICH9: 6338 case WM_T_ICH10: 6339 case WM_T_PCH: 6340 case WM_T_PCH2: 6341 case WM_T_PCH_LPT: 6342 /* Allow time for h/w to get to a quiescent state afer reset */ 6343 delay(10*1000); 6344 6345 if (sc->sc_type == WM_T_PCH) 6346 wm_hv_phy_workaround_ich8lan(sc); 6347 6348 if (sc->sc_type == WM_T_PCH2) 6349 wm_lv_phy_workaround_ich8lan(sc); 6350 6351 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) { 6352 /* 6353 * dummy read to clear the phy wakeup bit after lcd 6354 * reset 6355 */ 6356 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC); 6357 } 6358 6359 /* 6360 * XXX Configure the LCD with th extended configuration region 6361 * in NVM 6362 */ 6363 6364 /* Configure the LCD with the OEM bits in NVM */ 6365 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2) 6366 || (sc->sc_type == WM_T_PCH_LPT)) { 6367 /* 6368 * Disable LPLU. 6369 * XXX It seems that 82567 has LPLU, too. 6370 */ 6371 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS); 6372 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU); 6373 reg |= HV_OEM_BITS_ANEGNOW; 6374 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg); 6375 } 6376 break; 6377 default: 6378 panic("%s: unknown type\n", __func__); 6379 break; 6380 } 6381 } 6382 6383 /* 6384 * wm_gmii_mediainit: 6385 * 6386 * Initialize media for use on 1000BASE-T devices. 6387 */ 6388 static void 6389 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid) 6390 { 6391 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 6392 struct mii_data *mii = &sc->sc_mii; 6393 6394 /* We have MII. */ 6395 sc->sc_flags |= WM_F_HAS_MII; 6396 6397 if (sc->sc_type == WM_T_80003) 6398 sc->sc_tipg = TIPG_1000T_80003_DFLT; 6399 else 6400 sc->sc_tipg = TIPG_1000T_DFLT; 6401 6402 /* 6403 * Let the chip set speed/duplex on its own based on 6404 * signals from the PHY. 6405 * XXXbouyer - I'm not sure this is right for the 80003, 6406 * the em driver only sets CTRL_SLU here - but it seems to work. 6407 */ 6408 sc->sc_ctrl |= CTRL_SLU; 6409 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6410 6411 /* Initialize our media structures and probe the GMII. */ 6412 mii->mii_ifp = ifp; 6413 6414 /* 6415 * Determine the PHY access method. 6416 * 6417 * For SGMII, use SGMII specific method. 6418 * 6419 * For some devices, we can determine the PHY access method 6420 * from sc_type. 6421 * 6422 * For ICH8 variants, it's difficult to detemine the PHY access 6423 * method by sc_type, so use the PCI product ID for some devices. 6424 * For other ICH8 variants, try to use igp's method. If the PHY 6425 * can't detect, then use bm's method. 6426 */ 6427 switch (prodid) { 6428 case PCI_PRODUCT_INTEL_PCH_M_LM: 6429 case PCI_PRODUCT_INTEL_PCH_M_LC: 6430 /* 82577 */ 6431 sc->sc_phytype = WMPHY_82577; 6432 mii->mii_readreg = wm_gmii_hv_readreg; 6433 mii->mii_writereg = wm_gmii_hv_writereg; 6434 break; 6435 case PCI_PRODUCT_INTEL_PCH_D_DM: 6436 case PCI_PRODUCT_INTEL_PCH_D_DC: 6437 /* 82578 */ 6438 sc->sc_phytype = WMPHY_82578; 6439 mii->mii_readreg = wm_gmii_hv_readreg; 6440 mii->mii_writereg = wm_gmii_hv_writereg; 6441 break; 6442 case PCI_PRODUCT_INTEL_PCH2_LV_LM: 6443 case PCI_PRODUCT_INTEL_PCH2_LV_V: 6444 /* 82579 */ 6445 sc->sc_phytype = WMPHY_82579; 6446 mii->mii_readreg = wm_gmii_hv_readreg; 6447 mii->mii_writereg = wm_gmii_hv_writereg; 6448 break; 6449 case PCI_PRODUCT_INTEL_I217_LM: 6450 case PCI_PRODUCT_INTEL_I217_V: 6451 case PCI_PRODUCT_INTEL_I218_LM: 6452 case PCI_PRODUCT_INTEL_I218_V: 6453 /* I21[78] */ 6454 mii->mii_readreg = wm_gmii_hv_readreg; 6455 mii->mii_writereg = wm_gmii_hv_writereg; 6456 break; 6457 case PCI_PRODUCT_INTEL_82801I_BM: 6458 case PCI_PRODUCT_INTEL_82801J_R_BM_LM: 6459 case PCI_PRODUCT_INTEL_82801J_R_BM_LF: 6460 case PCI_PRODUCT_INTEL_82801J_D_BM_LM: 6461 case PCI_PRODUCT_INTEL_82801J_D_BM_LF: 6462 case PCI_PRODUCT_INTEL_82801J_R_BM_V: 6463 /* 82567 */ 6464 sc->sc_phytype = WMPHY_BM; 6465 mii->mii_readreg = wm_gmii_bm_readreg; 6466 mii->mii_writereg = wm_gmii_bm_writereg; 6467 break; 6468 default: 6469 if ((sc->sc_flags & WM_F_SGMII) != 0) { 6470 mii->mii_readreg = wm_sgmii_readreg; 6471 mii->mii_writereg = wm_sgmii_writereg; 6472 } else if (sc->sc_type >= WM_T_80003) { 6473 mii->mii_readreg = wm_gmii_i80003_readreg; 6474 mii->mii_writereg = wm_gmii_i80003_writereg; 6475 } else if (sc->sc_type >= WM_T_I210) { 6476 mii->mii_readreg = wm_gmii_i82544_readreg; 6477 mii->mii_writereg = wm_gmii_i82544_writereg; 6478 } else if (sc->sc_type >= WM_T_82580) { 6479 sc->sc_phytype = WMPHY_82580; 6480 mii->mii_readreg = wm_gmii_82580_readreg; 6481 mii->mii_writereg = wm_gmii_82580_writereg; 6482 } else if (sc->sc_type >= WM_T_82544) { 6483 mii->mii_readreg = wm_gmii_i82544_readreg; 6484 mii->mii_writereg = wm_gmii_i82544_writereg; 6485 } else { 6486 mii->mii_readreg = wm_gmii_i82543_readreg; 6487 mii->mii_writereg = wm_gmii_i82543_writereg; 6488 } 6489 break; 6490 } 6491 mii->mii_statchg = wm_gmii_statchg; 6492 6493 wm_gmii_reset(sc); 6494 6495 sc->sc_ethercom.ec_mii = &sc->sc_mii; 6496 ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange, 6497 wm_gmii_mediastatus); 6498 6499 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 6500 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER) 6501 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210) 6502 || (sc->sc_type == WM_T_I211)) { 6503 if ((sc->sc_flags & WM_F_SGMII) == 0) { 6504 /* Attach only one port */ 6505 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1, 6506 MII_OFFSET_ANY, MIIF_DOPAUSE); 6507 } else { 6508 int i; 6509 uint32_t ctrl_ext; 6510 6511 /* Power on sgmii phy if it is disabled */ 6512 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT); 6513 CSR_WRITE(sc, WMREG_CTRL_EXT, 6514 ctrl_ext &~ CTRL_EXT_SWDPIN(3)); 6515 CSR_WRITE_FLUSH(sc); 6516 delay(300*1000); /* XXX too long */ 6517 6518 /* from 1 to 8 */ 6519 for (i = 1; i < 8; i++) 6520 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 6521 i, MII_OFFSET_ANY, MIIF_DOPAUSE); 6522 6523 /* restore previous sfp cage power state */ 6524 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext); 6525 } 6526 } else { 6527 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 6528 MII_OFFSET_ANY, MIIF_DOPAUSE); 6529 } 6530 6531 /* 6532 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call 6533 * wm_set_mdio_slow_mode_hv() for a workaround and retry. 6534 */ 6535 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) && 6536 (LIST_FIRST(&mii->mii_phys) == NULL)) { 6537 wm_set_mdio_slow_mode_hv(sc); 6538 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 6539 MII_OFFSET_ANY, MIIF_DOPAUSE); 6540 } 6541 6542 /* 6543 * (For ICH8 variants) 6544 * If PHY detection failed, use BM's r/w function and retry. 6545 */ 6546 if (LIST_FIRST(&mii->mii_phys) == NULL) { 6547 /* if failed, retry with *_bm_* */ 6548 mii->mii_readreg = wm_gmii_bm_readreg; 6549 mii->mii_writereg = wm_gmii_bm_writereg; 6550 6551 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 6552 MII_OFFSET_ANY, MIIF_DOPAUSE); 6553 } 6554 6555 if (LIST_FIRST(&mii->mii_phys) == NULL) { 6556 /* Any PHY wasn't find */ 6557 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 6558 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE); 6559 sc->sc_phytype = WMPHY_NONE; 6560 } else { 6561 /* 6562 * PHY Found! 6563 * Check PHY type. 6564 */ 6565 uint32_t model; 6566 struct mii_softc *child; 6567 6568 child = LIST_FIRST(&mii->mii_phys); 6569 if (device_is_a(child->mii_dev, "igphy")) { 6570 struct igphy_softc *isc = (struct igphy_softc *)child; 6571 6572 model = isc->sc_mii.mii_mpd_model; 6573 if (model == MII_MODEL_yyINTEL_I82566) 6574 sc->sc_phytype = WMPHY_IGP_3; 6575 } 6576 6577 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 6578 } 6579 } 6580 6581 /* 6582 * wm_gmii_mediastatus: [ifmedia interface function] 6583 * 6584 * Get the current interface media status on a 1000BASE-T device. 6585 */ 6586 static void 6587 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 6588 { 6589 struct wm_softc *sc = ifp->if_softc; 6590 6591 ether_mediastatus(ifp, ifmr); 6592 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) 6593 | sc->sc_flowflags; 6594 } 6595 6596 /* 6597 * wm_gmii_mediachange: [ifmedia interface function] 6598 * 6599 * Set hardware to newly-selected media on a 1000BASE-T device. 6600 */ 6601 static int 6602 wm_gmii_mediachange(struct ifnet *ifp) 6603 { 6604 struct wm_softc *sc = ifp->if_softc; 6605 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 6606 int rc; 6607 6608 if ((ifp->if_flags & IFF_UP) == 0) 6609 return 0; 6610 6611 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); 6612 sc->sc_ctrl |= CTRL_SLU; 6613 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) 6614 || (sc->sc_type > WM_T_82543)) { 6615 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX); 6616 } else { 6617 sc->sc_ctrl &= ~CTRL_ASDE; 6618 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; 6619 if (ife->ifm_media & IFM_FDX) 6620 sc->sc_ctrl |= CTRL_FD; 6621 switch (IFM_SUBTYPE(ife->ifm_media)) { 6622 case IFM_10_T: 6623 sc->sc_ctrl |= CTRL_SPEED_10; 6624 break; 6625 case IFM_100_TX: 6626 sc->sc_ctrl |= CTRL_SPEED_100; 6627 break; 6628 case IFM_1000_T: 6629 sc->sc_ctrl |= CTRL_SPEED_1000; 6630 break; 6631 default: 6632 panic("wm_gmii_mediachange: bad media 0x%x", 6633 ife->ifm_media); 6634 } 6635 } 6636 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6637 if (sc->sc_type <= WM_T_82543) 6638 wm_gmii_reset(sc); 6639 6640 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO) 6641 return 0; 6642 return rc; 6643 } 6644 6645 #define MDI_IO CTRL_SWDPIN(2) 6646 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */ 6647 #define MDI_CLK CTRL_SWDPIN(3) 6648 6649 static void 6650 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits) 6651 { 6652 uint32_t i, v; 6653 6654 v = CSR_READ(sc, WMREG_CTRL); 6655 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 6656 v |= MDI_DIR | CTRL_SWDPIO(3); 6657 6658 for (i = 1 << (nbits - 1); i != 0; i >>= 1) { 6659 if (data & i) 6660 v |= MDI_IO; 6661 else 6662 v &= ~MDI_IO; 6663 CSR_WRITE(sc, WMREG_CTRL, v); 6664 delay(10); 6665 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 6666 delay(10); 6667 CSR_WRITE(sc, WMREG_CTRL, v); 6668 delay(10); 6669 } 6670 } 6671 6672 static uint32_t 6673 i82543_mii_recvbits(struct wm_softc *sc) 6674 { 6675 uint32_t v, i, data = 0; 6676 6677 v = CSR_READ(sc, WMREG_CTRL); 6678 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 6679 v |= CTRL_SWDPIO(3); 6680 6681 CSR_WRITE(sc, WMREG_CTRL, v); 6682 delay(10); 6683 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 6684 delay(10); 6685 CSR_WRITE(sc, WMREG_CTRL, v); 6686 delay(10); 6687 6688 for (i = 0; i < 16; i++) { 6689 data <<= 1; 6690 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 6691 delay(10); 6692 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO) 6693 data |= 1; 6694 CSR_WRITE(sc, WMREG_CTRL, v); 6695 delay(10); 6696 } 6697 6698 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 6699 delay(10); 6700 CSR_WRITE(sc, WMREG_CTRL, v); 6701 delay(10); 6702 6703 return data; 6704 } 6705 6706 #undef MDI_IO 6707 #undef MDI_DIR 6708 #undef MDI_CLK 6709 6710 /* 6711 * wm_gmii_i82543_readreg: [mii interface function] 6712 * 6713 * Read a PHY register on the GMII (i82543 version). 6714 */ 6715 static int 6716 wm_gmii_i82543_readreg(device_t self, int phy, int reg) 6717 { 6718 struct wm_softc *sc = device_private(self); 6719 int rv; 6720 6721 i82543_mii_sendbits(sc, 0xffffffffU, 32); 6722 i82543_mii_sendbits(sc, reg | (phy << 5) | 6723 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14); 6724 rv = i82543_mii_recvbits(sc) & 0xffff; 6725 6726 DPRINTF(WM_DEBUG_GMII, 6727 ("%s: GMII: read phy %d reg %d -> 0x%04x\n", 6728 device_xname(sc->sc_dev), phy, reg, rv)); 6729 6730 return rv; 6731 } 6732 6733 /* 6734 * wm_gmii_i82543_writereg: [mii interface function] 6735 * 6736 * Write a PHY register on the GMII (i82543 version). 6737 */ 6738 static void 6739 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val) 6740 { 6741 struct wm_softc *sc = device_private(self); 6742 6743 i82543_mii_sendbits(sc, 0xffffffffU, 32); 6744 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) | 6745 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) | 6746 (MII_COMMAND_START << 30), 32); 6747 } 6748 6749 /* 6750 * wm_gmii_i82544_readreg: [mii interface function] 6751 * 6752 * Read a PHY register on the GMII. 6753 */ 6754 static int 6755 wm_gmii_i82544_readreg(device_t self, int phy, int reg) 6756 { 6757 struct wm_softc *sc = device_private(self); 6758 uint32_t mdic = 0; 6759 int i, rv; 6760 6761 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) | 6762 MDIC_REGADD(reg)); 6763 6764 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) { 6765 mdic = CSR_READ(sc, WMREG_MDIC); 6766 if (mdic & MDIC_READY) 6767 break; 6768 delay(50); 6769 } 6770 6771 if ((mdic & MDIC_READY) == 0) { 6772 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n", 6773 device_xname(sc->sc_dev), phy, reg); 6774 rv = 0; 6775 } else if (mdic & MDIC_E) { 6776 #if 0 /* This is normal if no PHY is present. */ 6777 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n", 6778 device_xname(sc->sc_dev), phy, reg); 6779 #endif 6780 rv = 0; 6781 } else { 6782 rv = MDIC_DATA(mdic); 6783 if (rv == 0xffff) 6784 rv = 0; 6785 } 6786 6787 return rv; 6788 } 6789 6790 /* 6791 * wm_gmii_i82544_writereg: [mii interface function] 6792 * 6793 * Write a PHY register on the GMII. 6794 */ 6795 static void 6796 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val) 6797 { 6798 struct wm_softc *sc = device_private(self); 6799 uint32_t mdic = 0; 6800 int i; 6801 6802 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) | 6803 MDIC_REGADD(reg) | MDIC_DATA(val)); 6804 6805 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) { 6806 mdic = CSR_READ(sc, WMREG_MDIC); 6807 if (mdic & MDIC_READY) 6808 break; 6809 delay(50); 6810 } 6811 6812 if ((mdic & MDIC_READY) == 0) 6813 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n", 6814 device_xname(sc->sc_dev), phy, reg); 6815 else if (mdic & MDIC_E) 6816 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n", 6817 device_xname(sc->sc_dev), phy, reg); 6818 } 6819 6820 /* 6821 * wm_gmii_i80003_readreg: [mii interface function] 6822 * 6823 * Read a PHY register on the kumeran 6824 * This could be handled by the PHY layer if we didn't have to lock the 6825 * ressource ... 6826 */ 6827 static int 6828 wm_gmii_i80003_readreg(device_t self, int phy, int reg) 6829 { 6830 struct wm_softc *sc = device_private(self); 6831 int sem; 6832 int rv; 6833 6834 if (phy != 1) /* only one PHY on kumeran bus */ 6835 return 0; 6836 6837 sem = swfwphysem[sc->sc_funcid]; 6838 if (wm_get_swfw_semaphore(sc, sem)) { 6839 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 6840 __func__); 6841 return 0; 6842 } 6843 6844 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) { 6845 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT, 6846 reg >> GG82563_PAGE_SHIFT); 6847 } else { 6848 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT, 6849 reg >> GG82563_PAGE_SHIFT); 6850 } 6851 /* Wait more 200us for a bug of the ready bit in the MDIC register */ 6852 delay(200); 6853 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS); 6854 delay(200); 6855 6856 wm_put_swfw_semaphore(sc, sem); 6857 return rv; 6858 } 6859 6860 /* 6861 * wm_gmii_i80003_writereg: [mii interface function] 6862 * 6863 * Write a PHY register on the kumeran. 6864 * This could be handled by the PHY layer if we didn't have to lock the 6865 * ressource ... 6866 */ 6867 static void 6868 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val) 6869 { 6870 struct wm_softc *sc = device_private(self); 6871 int sem; 6872 6873 if (phy != 1) /* only one PHY on kumeran bus */ 6874 return; 6875 6876 sem = swfwphysem[sc->sc_funcid]; 6877 if (wm_get_swfw_semaphore(sc, sem)) { 6878 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 6879 __func__); 6880 return; 6881 } 6882 6883 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) { 6884 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT, 6885 reg >> GG82563_PAGE_SHIFT); 6886 } else { 6887 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT, 6888 reg >> GG82563_PAGE_SHIFT); 6889 } 6890 /* Wait more 200us for a bug of the ready bit in the MDIC register */ 6891 delay(200); 6892 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val); 6893 delay(200); 6894 6895 wm_put_swfw_semaphore(sc, sem); 6896 } 6897 6898 /* 6899 * wm_gmii_bm_readreg: [mii interface function] 6900 * 6901 * Read a PHY register on the kumeran 6902 * This could be handled by the PHY layer if we didn't have to lock the 6903 * ressource ... 6904 */ 6905 static int 6906 wm_gmii_bm_readreg(device_t self, int phy, int reg) 6907 { 6908 struct wm_softc *sc = device_private(self); 6909 int sem; 6910 int rv; 6911 6912 sem = swfwphysem[sc->sc_funcid]; 6913 if (wm_get_swfw_semaphore(sc, sem)) { 6914 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 6915 __func__); 6916 return 0; 6917 } 6918 6919 if (reg > BME1000_MAX_MULTI_PAGE_REG) { 6920 if (phy == 1) 6921 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT, 6922 reg); 6923 else 6924 wm_gmii_i82544_writereg(self, phy, 6925 GG82563_PHY_PAGE_SELECT, 6926 reg >> GG82563_PAGE_SHIFT); 6927 } 6928 6929 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS); 6930 wm_put_swfw_semaphore(sc, sem); 6931 return rv; 6932 } 6933 6934 /* 6935 * wm_gmii_bm_writereg: [mii interface function] 6936 * 6937 * Write a PHY register on the kumeran. 6938 * This could be handled by the PHY layer if we didn't have to lock the 6939 * ressource ... 6940 */ 6941 static void 6942 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val) 6943 { 6944 struct wm_softc *sc = device_private(self); 6945 int sem; 6946 6947 sem = swfwphysem[sc->sc_funcid]; 6948 if (wm_get_swfw_semaphore(sc, sem)) { 6949 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 6950 __func__); 6951 return; 6952 } 6953 6954 if (reg > BME1000_MAX_MULTI_PAGE_REG) { 6955 if (phy == 1) 6956 wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT, 6957 reg); 6958 else 6959 wm_gmii_i82544_writereg(self, phy, 6960 GG82563_PHY_PAGE_SELECT, 6961 reg >> GG82563_PAGE_SHIFT); 6962 } 6963 6964 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val); 6965 wm_put_swfw_semaphore(sc, sem); 6966 } 6967 6968 static void 6969 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd) 6970 { 6971 struct wm_softc *sc = device_private(self); 6972 uint16_t regnum = BM_PHY_REG_NUM(offset); 6973 uint16_t wuce; 6974 6975 /* XXX Gig must be disabled for MDIO accesses to page 800 */ 6976 if (sc->sc_type == WM_T_PCH) { 6977 /* XXX e1000 driver do nothing... why? */ 6978 } 6979 6980 /* Set page 769 */ 6981 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT, 6982 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT); 6983 6984 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG); 6985 6986 wuce &= ~BM_WUC_HOST_WU_BIT; 6987 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, 6988 wuce | BM_WUC_ENABLE_BIT); 6989 6990 /* Select page 800 */ 6991 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT, 6992 BM_WUC_PAGE << BME1000_PAGE_SHIFT); 6993 6994 /* Write page 800 */ 6995 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum); 6996 6997 if (rd) 6998 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE); 6999 else 7000 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val); 7001 7002 /* Set page 769 */ 7003 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT, 7004 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT); 7005 7006 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce); 7007 } 7008 7009 /* 7010 * wm_gmii_hv_readreg: [mii interface function] 7011 * 7012 * Read a PHY register on the kumeran 7013 * This could be handled by the PHY layer if we didn't have to lock the 7014 * ressource ... 7015 */ 7016 static int 7017 wm_gmii_hv_readreg(device_t self, int phy, int reg) 7018 { 7019 struct wm_softc *sc = device_private(self); 7020 uint16_t page = BM_PHY_REG_PAGE(reg); 7021 uint16_t regnum = BM_PHY_REG_NUM(reg); 7022 uint16_t val; 7023 int rv; 7024 7025 if (wm_get_swfwhw_semaphore(sc)) { 7026 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 7027 __func__); 7028 return 0; 7029 } 7030 7031 /* XXX Workaround failure in MDIO access while cable is disconnected */ 7032 if (sc->sc_phytype == WMPHY_82577) { 7033 /* XXX must write */ 7034 } 7035 7036 /* Page 800 works differently than the rest so it has its own func */ 7037 if (page == BM_WUC_PAGE) { 7038 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1); 7039 return val; 7040 } 7041 7042 /* 7043 * Lower than page 768 works differently than the rest so it has its 7044 * own func 7045 */ 7046 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) { 7047 printf("gmii_hv_readreg!!!\n"); 7048 return 0; 7049 } 7050 7051 if (regnum > BME1000_MAX_MULTI_PAGE_REG) { 7052 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT, 7053 page << BME1000_PAGE_SHIFT); 7054 } 7055 7056 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR); 7057 wm_put_swfwhw_semaphore(sc); 7058 return rv; 7059 } 7060 7061 /* 7062 * wm_gmii_hv_writereg: [mii interface function] 7063 * 7064 * Write a PHY register on the kumeran. 7065 * This could be handled by the PHY layer if we didn't have to lock the 7066 * ressource ... 7067 */ 7068 static void 7069 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val) 7070 { 7071 struct wm_softc *sc = device_private(self); 7072 uint16_t page = BM_PHY_REG_PAGE(reg); 7073 uint16_t regnum = BM_PHY_REG_NUM(reg); 7074 7075 if (wm_get_swfwhw_semaphore(sc)) { 7076 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 7077 __func__); 7078 return; 7079 } 7080 7081 /* XXX Workaround failure in MDIO access while cable is disconnected */ 7082 7083 /* Page 800 works differently than the rest so it has its own func */ 7084 if (page == BM_WUC_PAGE) { 7085 uint16_t tmp; 7086 7087 tmp = val; 7088 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0); 7089 return; 7090 } 7091 7092 /* 7093 * Lower than page 768 works differently than the rest so it has its 7094 * own func 7095 */ 7096 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) { 7097 printf("gmii_hv_writereg!!!\n"); 7098 return; 7099 } 7100 7101 /* 7102 * XXX Workaround MDIO accesses being disabled after entering IEEE 7103 * Power Down (whenever bit 11 of the PHY control register is set) 7104 */ 7105 7106 if (regnum > BME1000_MAX_MULTI_PAGE_REG) { 7107 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT, 7108 page << BME1000_PAGE_SHIFT); 7109 } 7110 7111 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val); 7112 wm_put_swfwhw_semaphore(sc); 7113 } 7114 7115 /* 7116 * wm_sgmii_readreg: [mii interface function] 7117 * 7118 * Read a PHY register on the SGMII 7119 * This could be handled by the PHY layer if we didn't have to lock the 7120 * ressource ... 7121 */ 7122 static int 7123 wm_sgmii_readreg(device_t self, int phy, int reg) 7124 { 7125 struct wm_softc *sc = device_private(self); 7126 uint32_t i2ccmd; 7127 int i, rv; 7128 7129 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) { 7130 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 7131 __func__); 7132 return 0; 7133 } 7134 7135 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT) 7136 | (phy << I2CCMD_PHY_ADDR_SHIFT) 7137 | I2CCMD_OPCODE_READ; 7138 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd); 7139 7140 /* Poll the ready bit */ 7141 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) { 7142 delay(50); 7143 i2ccmd = CSR_READ(sc, WMREG_I2CCMD); 7144 if (i2ccmd & I2CCMD_READY) 7145 break; 7146 } 7147 if ((i2ccmd & I2CCMD_READY) == 0) 7148 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n"); 7149 if ((i2ccmd & I2CCMD_ERROR) != 0) 7150 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n"); 7151 7152 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00); 7153 7154 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]); 7155 return rv; 7156 } 7157 7158 /* 7159 * wm_sgmii_writereg: [mii interface function] 7160 * 7161 * Write a PHY register on the SGMII. 7162 * This could be handled by the PHY layer if we didn't have to lock the 7163 * ressource ... 7164 */ 7165 static void 7166 wm_sgmii_writereg(device_t self, int phy, int reg, int val) 7167 { 7168 struct wm_softc *sc = device_private(self); 7169 uint32_t i2ccmd; 7170 int i; 7171 7172 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) { 7173 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 7174 __func__); 7175 return; 7176 } 7177 7178 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT) 7179 | (phy << I2CCMD_PHY_ADDR_SHIFT) 7180 | I2CCMD_OPCODE_WRITE; 7181 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd); 7182 7183 /* Poll the ready bit */ 7184 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) { 7185 delay(50); 7186 i2ccmd = CSR_READ(sc, WMREG_I2CCMD); 7187 if (i2ccmd & I2CCMD_READY) 7188 break; 7189 } 7190 if ((i2ccmd & I2CCMD_READY) == 0) 7191 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n"); 7192 if ((i2ccmd & I2CCMD_ERROR) != 0) 7193 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n"); 7194 7195 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM); 7196 } 7197 7198 /* 7199 * wm_gmii_82580_readreg: [mii interface function] 7200 * 7201 * Read a PHY register on the 82580 and I350. 7202 * This could be handled by the PHY layer if we didn't have to lock the 7203 * ressource ... 7204 */ 7205 static int 7206 wm_gmii_82580_readreg(device_t self, int phy, int reg) 7207 { 7208 struct wm_softc *sc = device_private(self); 7209 int sem; 7210 int rv; 7211 7212 sem = swfwphysem[sc->sc_funcid]; 7213 if (wm_get_swfw_semaphore(sc, sem)) { 7214 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 7215 __func__); 7216 return 0; 7217 } 7218 7219 rv = wm_gmii_i82544_readreg(self, phy, reg); 7220 7221 wm_put_swfw_semaphore(sc, sem); 7222 return rv; 7223 } 7224 7225 /* 7226 * wm_gmii_82580_writereg: [mii interface function] 7227 * 7228 * Write a PHY register on the 82580 and I350. 7229 * This could be handled by the PHY layer if we didn't have to lock the 7230 * ressource ... 7231 */ 7232 static void 7233 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val) 7234 { 7235 struct wm_softc *sc = device_private(self); 7236 int sem; 7237 7238 sem = swfwphysem[sc->sc_funcid]; 7239 if (wm_get_swfw_semaphore(sc, sem)) { 7240 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 7241 __func__); 7242 return; 7243 } 7244 7245 wm_gmii_i82544_writereg(self, phy, reg, val); 7246 7247 wm_put_swfw_semaphore(sc, sem); 7248 } 7249 7250 /* 7251 * wm_gmii_statchg: [mii interface function] 7252 * 7253 * Callback from MII layer when media changes. 7254 */ 7255 static void 7256 wm_gmii_statchg(struct ifnet *ifp) 7257 { 7258 struct wm_softc *sc = ifp->if_softc; 7259 struct mii_data *mii = &sc->sc_mii; 7260 7261 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE); 7262 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 7263 sc->sc_fcrtl &= ~FCRTL_XONE; 7264 7265 /* 7266 * Get flow control negotiation result. 7267 */ 7268 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 7269 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { 7270 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 7271 mii->mii_media_active &= ~IFM_ETH_FMASK; 7272 } 7273 7274 if (sc->sc_flowflags & IFM_FLOW) { 7275 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) { 7276 sc->sc_ctrl |= CTRL_TFCE; 7277 sc->sc_fcrtl |= FCRTL_XONE; 7278 } 7279 if (sc->sc_flowflags & IFM_ETH_RXPAUSE) 7280 sc->sc_ctrl |= CTRL_RFCE; 7281 } 7282 7283 if (sc->sc_mii.mii_media_active & IFM_FDX) { 7284 DPRINTF(WM_DEBUG_LINK, 7285 ("%s: LINK: statchg: FDX\n", ifp->if_xname)); 7286 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 7287 } else { 7288 DPRINTF(WM_DEBUG_LINK, 7289 ("%s: LINK: statchg: HDX\n", ifp->if_xname)); 7290 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 7291 } 7292 7293 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 7294 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 7295 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL 7296 : WMREG_FCRTL, sc->sc_fcrtl); 7297 if (sc->sc_type == WM_T_80003) { 7298 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { 7299 case IFM_1000_T: 7300 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL, 7301 KUMCTRLSTA_HD_CTRL_1000_DEFAULT); 7302 sc->sc_tipg = TIPG_1000T_80003_DFLT; 7303 break; 7304 default: 7305 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL, 7306 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT); 7307 sc->sc_tipg = TIPG_10_100_80003_DFLT; 7308 break; 7309 } 7310 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); 7311 } 7312 } 7313 7314 /* 7315 * wm_kmrn_readreg: 7316 * 7317 * Read a kumeran register 7318 */ 7319 static int 7320 wm_kmrn_readreg(struct wm_softc *sc, int reg) 7321 { 7322 int rv; 7323 7324 if (sc->sc_flags == WM_F_SWFW_SYNC) { 7325 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) { 7326 aprint_error_dev(sc->sc_dev, 7327 "%s: failed to get semaphore\n", __func__); 7328 return 0; 7329 } 7330 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) { 7331 if (wm_get_swfwhw_semaphore(sc)) { 7332 aprint_error_dev(sc->sc_dev, 7333 "%s: failed to get semaphore\n", __func__); 7334 return 0; 7335 } 7336 } 7337 7338 CSR_WRITE(sc, WMREG_KUMCTRLSTA, 7339 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | 7340 KUMCTRLSTA_REN); 7341 delay(2); 7342 7343 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK; 7344 7345 if (sc->sc_flags == WM_F_SWFW_SYNC) 7346 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM); 7347 else if (sc->sc_flags == WM_F_SWFWHW_SYNC) 7348 wm_put_swfwhw_semaphore(sc); 7349 7350 return rv; 7351 } 7352 7353 /* 7354 * wm_kmrn_writereg: 7355 * 7356 * Write a kumeran register 7357 */ 7358 static void 7359 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val) 7360 { 7361 7362 if (sc->sc_flags == WM_F_SWFW_SYNC) { 7363 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) { 7364 aprint_error_dev(sc->sc_dev, 7365 "%s: failed to get semaphore\n", __func__); 7366 return; 7367 } 7368 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) { 7369 if (wm_get_swfwhw_semaphore(sc)) { 7370 aprint_error_dev(sc->sc_dev, 7371 "%s: failed to get semaphore\n", __func__); 7372 return; 7373 } 7374 } 7375 7376 CSR_WRITE(sc, WMREG_KUMCTRLSTA, 7377 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | 7378 (val & KUMCTRLSTA_MASK)); 7379 7380 if (sc->sc_flags == WM_F_SWFW_SYNC) 7381 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM); 7382 else if (sc->sc_flags == WM_F_SWFWHW_SYNC) 7383 wm_put_swfwhw_semaphore(sc); 7384 } 7385 7386 static int 7387 wm_is_onboard_nvm_eeprom(struct wm_softc *sc) 7388 { 7389 uint32_t eecd = 0; 7390 7391 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574 7392 || sc->sc_type == WM_T_82583) { 7393 eecd = CSR_READ(sc, WMREG_EECD); 7394 7395 /* Isolate bits 15 & 16 */ 7396 eecd = ((eecd >> 15) & 0x03); 7397 7398 /* If both bits are set, device is Flash type */ 7399 if (eecd == 0x03) 7400 return 0; 7401 } 7402 return 1; 7403 } 7404 7405 static int 7406 wm_get_swsm_semaphore(struct wm_softc *sc) 7407 { 7408 int32_t timeout; 7409 uint32_t swsm; 7410 7411 /* Get the FW semaphore. */ 7412 timeout = 1000 + 1; /* XXX */ 7413 while (timeout) { 7414 swsm = CSR_READ(sc, WMREG_SWSM); 7415 swsm |= SWSM_SWESMBI; 7416 CSR_WRITE(sc, WMREG_SWSM, swsm); 7417 /* if we managed to set the bit we got the semaphore. */ 7418 swsm = CSR_READ(sc, WMREG_SWSM); 7419 if (swsm & SWSM_SWESMBI) 7420 break; 7421 7422 delay(50); 7423 timeout--; 7424 } 7425 7426 if (timeout == 0) { 7427 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n"); 7428 /* Release semaphores */ 7429 wm_put_swsm_semaphore(sc); 7430 return 1; 7431 } 7432 return 0; 7433 } 7434 7435 static void 7436 wm_put_swsm_semaphore(struct wm_softc *sc) 7437 { 7438 uint32_t swsm; 7439 7440 swsm = CSR_READ(sc, WMREG_SWSM); 7441 swsm &= ~(SWSM_SWESMBI); 7442 CSR_WRITE(sc, WMREG_SWSM, swsm); 7443 } 7444 7445 static int 7446 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask) 7447 { 7448 uint32_t swfw_sync; 7449 uint32_t swmask = mask << SWFW_SOFT_SHIFT; 7450 uint32_t fwmask = mask << SWFW_FIRM_SHIFT; 7451 int timeout = 200; 7452 7453 for (timeout = 0; timeout < 200; timeout++) { 7454 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) { 7455 if (wm_get_swsm_semaphore(sc)) { 7456 aprint_error_dev(sc->sc_dev, 7457 "%s: failed to get semaphore\n", 7458 __func__); 7459 return 1; 7460 } 7461 } 7462 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC); 7463 if ((swfw_sync & (swmask | fwmask)) == 0) { 7464 swfw_sync |= swmask; 7465 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync); 7466 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 7467 wm_put_swsm_semaphore(sc); 7468 return 0; 7469 } 7470 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 7471 wm_put_swsm_semaphore(sc); 7472 delay(5000); 7473 } 7474 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n", 7475 device_xname(sc->sc_dev), mask, swfw_sync); 7476 return 1; 7477 } 7478 7479 static void 7480 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask) 7481 { 7482 uint32_t swfw_sync; 7483 7484 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) { 7485 while (wm_get_swsm_semaphore(sc) != 0) 7486 continue; 7487 } 7488 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC); 7489 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT); 7490 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync); 7491 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 7492 wm_put_swsm_semaphore(sc); 7493 } 7494 7495 static int 7496 wm_get_swfwhw_semaphore(struct wm_softc *sc) 7497 { 7498 uint32_t ext_ctrl; 7499 int timeout = 200; 7500 7501 for (timeout = 0; timeout < 200; timeout++) { 7502 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 7503 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; 7504 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 7505 7506 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 7507 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG) 7508 return 0; 7509 delay(5000); 7510 } 7511 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n", 7512 device_xname(sc->sc_dev), ext_ctrl); 7513 return 1; 7514 } 7515 7516 static void 7517 wm_put_swfwhw_semaphore(struct wm_softc *sc) 7518 { 7519 uint32_t ext_ctrl; 7520 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 7521 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 7522 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 7523 } 7524 7525 static int 7526 wm_get_hw_semaphore_82573(struct wm_softc *sc) 7527 { 7528 int i = 0; 7529 uint32_t reg; 7530 7531 reg = CSR_READ(sc, WMREG_EXTCNFCTR); 7532 do { 7533 CSR_WRITE(sc, WMREG_EXTCNFCTR, 7534 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP); 7535 reg = CSR_READ(sc, WMREG_EXTCNFCTR); 7536 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0) 7537 break; 7538 delay(2*1000); 7539 i++; 7540 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT); 7541 7542 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) { 7543 wm_put_hw_semaphore_82573(sc); 7544 log(LOG_ERR, "%s: Driver can't access the PHY\n", 7545 device_xname(sc->sc_dev)); 7546 return -1; 7547 } 7548 7549 return 0; 7550 } 7551 7552 static void 7553 wm_put_hw_semaphore_82573(struct wm_softc *sc) 7554 { 7555 uint32_t reg; 7556 7557 reg = CSR_READ(sc, WMREG_EXTCNFCTR); 7558 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP; 7559 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg); 7560 } 7561 7562 static int 7563 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank) 7564 { 7565 uint32_t eecd; 7566 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1; 7567 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t); 7568 uint8_t sig_byte = 0; 7569 7570 switch (sc->sc_type) { 7571 case WM_T_ICH8: 7572 case WM_T_ICH9: 7573 eecd = CSR_READ(sc, WMREG_EECD); 7574 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) { 7575 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0; 7576 return 0; 7577 } 7578 /* FALLTHROUGH */ 7579 default: 7580 /* Default to 0 */ 7581 *bank = 0; 7582 7583 /* Check bank 0 */ 7584 wm_read_ich8_byte(sc, act_offset, &sig_byte); 7585 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) { 7586 *bank = 0; 7587 return 0; 7588 } 7589 7590 /* Check bank 1 */ 7591 wm_read_ich8_byte(sc, act_offset + bank1_offset, 7592 &sig_byte); 7593 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) { 7594 *bank = 1; 7595 return 0; 7596 } 7597 } 7598 7599 DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n", 7600 device_xname(sc->sc_dev))); 7601 return -1; 7602 } 7603 7604 /****************************************************************************** 7605 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access 7606 * register. 7607 * 7608 * sc - Struct containing variables accessed by shared code 7609 * offset - offset of word in the EEPROM to read 7610 * data - word read from the EEPROM 7611 * words - number of words to read 7612 *****************************************************************************/ 7613 static int 7614 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data) 7615 { 7616 int32_t error = 0; 7617 uint32_t flash_bank = 0; 7618 uint32_t act_offset = 0; 7619 uint32_t bank_offset = 0; 7620 uint16_t word = 0; 7621 uint16_t i = 0; 7622 7623 /* We need to know which is the valid flash bank. In the event 7624 * that we didn't allocate eeprom_shadow_ram, we may not be 7625 * managing flash_bank. So it cannot be trusted and needs 7626 * to be updated with each read. 7627 */ 7628 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank); 7629 if (error) { 7630 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n", 7631 __func__); 7632 flash_bank = 0; 7633 } 7634 7635 /* 7636 * Adjust offset appropriately if we're on bank 1 - adjust for word 7637 * size 7638 */ 7639 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2); 7640 7641 error = wm_get_swfwhw_semaphore(sc); 7642 if (error) { 7643 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 7644 __func__); 7645 return error; 7646 } 7647 7648 for (i = 0; i < words; i++) { 7649 /* The NVM part needs a byte offset, hence * 2 */ 7650 act_offset = bank_offset + ((offset + i) * 2); 7651 error = wm_read_ich8_word(sc, act_offset, &word); 7652 if (error) { 7653 aprint_error_dev(sc->sc_dev, 7654 "%s: failed to read NVM\n", __func__); 7655 break; 7656 } 7657 data[i] = word; 7658 } 7659 7660 wm_put_swfwhw_semaphore(sc); 7661 return error; 7662 } 7663 7664 /****************************************************************************** 7665 * This function does initial flash setup so that a new read/write/erase cycle 7666 * can be started. 7667 * 7668 * sc - The pointer to the hw structure 7669 ****************************************************************************/ 7670 static int32_t 7671 wm_ich8_cycle_init(struct wm_softc *sc) 7672 { 7673 uint16_t hsfsts; 7674 int32_t error = 1; 7675 int32_t i = 0; 7676 7677 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 7678 7679 /* May be check the Flash Des Valid bit in Hw status */ 7680 if ((hsfsts & HSFSTS_FLDVAL) == 0) { 7681 return error; 7682 } 7683 7684 /* Clear FCERR in Hw status by writing 1 */ 7685 /* Clear DAEL in Hw status by writing a 1 */ 7686 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL; 7687 7688 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); 7689 7690 /* 7691 * Either we should have a hardware SPI cycle in progress bit to check 7692 * against, in order to start a new cycle or FDONE bit should be 7693 * changed in the hardware so that it is 1 after harware reset, which 7694 * can then be used as an indication whether a cycle is in progress or 7695 * has been completed .. we should also have some software semaphore 7696 * mechanism to guard FDONE or the cycle in progress bit so that two 7697 * threads access to those bits can be sequentiallized or a way so that 7698 * 2 threads dont start the cycle at the same time 7699 */ 7700 7701 if ((hsfsts & HSFSTS_FLINPRO) == 0) { 7702 /* 7703 * There is no cycle running at present, so we can start a 7704 * cycle 7705 */ 7706 7707 /* Begin by setting Flash Cycle Done. */ 7708 hsfsts |= HSFSTS_DONE; 7709 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); 7710 error = 0; 7711 } else { 7712 /* 7713 * otherwise poll for sometime so the current cycle has a 7714 * chance to end before giving up. 7715 */ 7716 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) { 7717 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 7718 if ((hsfsts & HSFSTS_FLINPRO) == 0) { 7719 error = 0; 7720 break; 7721 } 7722 delay(1); 7723 } 7724 if (error == 0) { 7725 /* 7726 * Successful in waiting for previous cycle to timeout, 7727 * now set the Flash Cycle Done. 7728 */ 7729 hsfsts |= HSFSTS_DONE; 7730 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); 7731 } 7732 } 7733 return error; 7734 } 7735 7736 /****************************************************************************** 7737 * This function starts a flash cycle and waits for its completion 7738 * 7739 * sc - The pointer to the hw structure 7740 ****************************************************************************/ 7741 static int32_t 7742 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout) 7743 { 7744 uint16_t hsflctl; 7745 uint16_t hsfsts; 7746 int32_t error = 1; 7747 uint32_t i = 0; 7748 7749 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ 7750 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL); 7751 hsflctl |= HSFCTL_GO; 7752 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl); 7753 7754 /* wait till FDONE bit is set to 1 */ 7755 do { 7756 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 7757 if (hsfsts & HSFSTS_DONE) 7758 break; 7759 delay(1); 7760 i++; 7761 } while (i < timeout); 7762 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) 7763 error = 0; 7764 7765 return error; 7766 } 7767 7768 /****************************************************************************** 7769 * Reads a byte or word from the NVM using the ICH8 flash access registers. 7770 * 7771 * sc - The pointer to the hw structure 7772 * index - The index of the byte or word to read. 7773 * size - Size of data to read, 1=byte 2=word 7774 * data - Pointer to the word to store the value read. 7775 *****************************************************************************/ 7776 static int32_t 7777 wm_read_ich8_data(struct wm_softc *sc, uint32_t index, 7778 uint32_t size, uint16_t* data) 7779 { 7780 uint16_t hsfsts; 7781 uint16_t hsflctl; 7782 uint32_t flash_linear_address; 7783 uint32_t flash_data = 0; 7784 int32_t error = 1; 7785 int32_t count = 0; 7786 7787 if (size < 1 || size > 2 || data == 0x0 || 7788 index > ICH_FLASH_LINEAR_ADDR_MASK) 7789 return error; 7790 7791 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) + 7792 sc->sc_ich8_flash_base; 7793 7794 do { 7795 delay(1); 7796 /* Steps */ 7797 error = wm_ich8_cycle_init(sc); 7798 if (error) 7799 break; 7800 7801 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL); 7802 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 7803 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT) 7804 & HSFCTL_BCOUNT_MASK; 7805 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT; 7806 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl); 7807 7808 /* 7809 * Write the last 24 bits of index into Flash Linear address 7810 * field in Flash Address 7811 */ 7812 /* TODO: TBD maybe check the index against the size of flash */ 7813 7814 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address); 7815 7816 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT); 7817 7818 /* 7819 * Check if FCERR is set to 1, if set to 1, clear it and try 7820 * the whole sequence a few more times, else read in (shift in) 7821 * the Flash Data0, the order is least significant byte first 7822 * msb to lsb 7823 */ 7824 if (error == 0) { 7825 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0); 7826 if (size == 1) 7827 *data = (uint8_t)(flash_data & 0x000000FF); 7828 else if (size == 2) 7829 *data = (uint16_t)(flash_data & 0x0000FFFF); 7830 break; 7831 } else { 7832 /* 7833 * If we've gotten here, then things are probably 7834 * completely hosed, but if the error condition is 7835 * detected, it won't hurt to give it another try... 7836 * ICH_FLASH_CYCLE_REPEAT_COUNT times. 7837 */ 7838 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 7839 if (hsfsts & HSFSTS_ERR) { 7840 /* Repeat for some time before giving up. */ 7841 continue; 7842 } else if ((hsfsts & HSFSTS_DONE) == 0) 7843 break; 7844 } 7845 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 7846 7847 return error; 7848 } 7849 7850 /****************************************************************************** 7851 * Reads a single byte from the NVM using the ICH8 flash access registers. 7852 * 7853 * sc - pointer to wm_hw structure 7854 * index - The index of the byte to read. 7855 * data - Pointer to a byte to store the value read. 7856 *****************************************************************************/ 7857 static int32_t 7858 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data) 7859 { 7860 int32_t status; 7861 uint16_t word = 0; 7862 7863 status = wm_read_ich8_data(sc, index, 1, &word); 7864 if (status == 0) 7865 *data = (uint8_t)word; 7866 else 7867 *data = 0; 7868 7869 return status; 7870 } 7871 7872 /****************************************************************************** 7873 * Reads a word from the NVM using the ICH8 flash access registers. 7874 * 7875 * sc - pointer to wm_hw structure 7876 * index - The starting byte index of the word to read. 7877 * data - Pointer to a word to store the value read. 7878 *****************************************************************************/ 7879 static int32_t 7880 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data) 7881 { 7882 int32_t status; 7883 7884 status = wm_read_ich8_data(sc, index, 2, data); 7885 return status; 7886 } 7887 7888 static int 7889 wm_check_mng_mode(struct wm_softc *sc) 7890 { 7891 int rv; 7892 7893 switch (sc->sc_type) { 7894 case WM_T_ICH8: 7895 case WM_T_ICH9: 7896 case WM_T_ICH10: 7897 case WM_T_PCH: 7898 case WM_T_PCH2: 7899 case WM_T_PCH_LPT: 7900 rv = wm_check_mng_mode_ich8lan(sc); 7901 break; 7902 case WM_T_82574: 7903 case WM_T_82583: 7904 rv = wm_check_mng_mode_82574(sc); 7905 break; 7906 case WM_T_82571: 7907 case WM_T_82572: 7908 case WM_T_82573: 7909 case WM_T_80003: 7910 rv = wm_check_mng_mode_generic(sc); 7911 break; 7912 default: 7913 /* noting to do */ 7914 rv = 0; 7915 break; 7916 } 7917 7918 return rv; 7919 } 7920 7921 static int 7922 wm_check_mng_mode_ich8lan(struct wm_softc *sc) 7923 { 7924 uint32_t fwsm; 7925 7926 fwsm = CSR_READ(sc, WMREG_FWSM); 7927 7928 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)) 7929 return 1; 7930 7931 return 0; 7932 } 7933 7934 static int 7935 wm_check_mng_mode_82574(struct wm_softc *sc) 7936 { 7937 uint16_t data; 7938 7939 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data); 7940 7941 if ((data & EEPROM_CFG2_MNGM_MASK) != 0) 7942 return 1; 7943 7944 return 0; 7945 } 7946 7947 static int 7948 wm_check_mng_mode_generic(struct wm_softc *sc) 7949 { 7950 uint32_t fwsm; 7951 7952 fwsm = CSR_READ(sc, WMREG_FWSM); 7953 7954 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT)) 7955 return 1; 7956 7957 return 0; 7958 } 7959 7960 static int 7961 wm_enable_mng_pass_thru(struct wm_softc *sc) 7962 { 7963 uint32_t manc, fwsm, factps; 7964 7965 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0) 7966 return 0; 7967 7968 manc = CSR_READ(sc, WMREG_MANC); 7969 7970 DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n", 7971 device_xname(sc->sc_dev), manc)); 7972 if ((manc & MANC_RECV_TCO_EN) == 0) 7973 return 0; 7974 7975 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) { 7976 fwsm = CSR_READ(sc, WMREG_FWSM); 7977 factps = CSR_READ(sc, WMREG_FACTPS); 7978 if (((factps & FACTPS_MNGCG) == 0) 7979 && ((fwsm & FWSM_MODE_MASK) 7980 == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))) 7981 return 1; 7982 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){ 7983 uint16_t data; 7984 7985 factps = CSR_READ(sc, WMREG_FACTPS); 7986 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data); 7987 DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n", 7988 device_xname(sc->sc_dev), factps, data)); 7989 if (((factps & FACTPS_MNGCG) == 0) 7990 && ((data & EEPROM_CFG2_MNGM_MASK) 7991 == (EEPROM_CFG2_MNGM_PT << EEPROM_CFG2_MNGM_SHIFT))) 7992 return 1; 7993 } else if (((manc & MANC_SMBUS_EN) != 0) 7994 && ((manc & MANC_ASF_EN) == 0)) 7995 return 1; 7996 7997 return 0; 7998 } 7999 8000 static int 8001 wm_check_reset_block(struct wm_softc *sc) 8002 { 8003 uint32_t reg; 8004 8005 switch (sc->sc_type) { 8006 case WM_T_ICH8: 8007 case WM_T_ICH9: 8008 case WM_T_ICH10: 8009 case WM_T_PCH: 8010 case WM_T_PCH2: 8011 case WM_T_PCH_LPT: 8012 reg = CSR_READ(sc, WMREG_FWSM); 8013 if ((reg & FWSM_RSPCIPHY) != 0) 8014 return 0; 8015 else 8016 return -1; 8017 break; 8018 case WM_T_82571: 8019 case WM_T_82572: 8020 case WM_T_82573: 8021 case WM_T_82574: 8022 case WM_T_82583: 8023 case WM_T_80003: 8024 reg = CSR_READ(sc, WMREG_MANC); 8025 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0) 8026 return -1; 8027 else 8028 return 0; 8029 break; 8030 default: 8031 /* no problem */ 8032 break; 8033 } 8034 8035 return 0; 8036 } 8037 8038 static void 8039 wm_get_hw_control(struct wm_softc *sc) 8040 { 8041 uint32_t reg; 8042 8043 switch (sc->sc_type) { 8044 case WM_T_82573: 8045 reg = CSR_READ(sc, WMREG_SWSM); 8046 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD); 8047 break; 8048 case WM_T_82571: 8049 case WM_T_82572: 8050 case WM_T_82574: 8051 case WM_T_82583: 8052 case WM_T_80003: 8053 case WM_T_ICH8: 8054 case WM_T_ICH9: 8055 case WM_T_ICH10: 8056 case WM_T_PCH: 8057 case WM_T_PCH2: 8058 case WM_T_PCH_LPT: 8059 reg = CSR_READ(sc, WMREG_CTRL_EXT); 8060 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD); 8061 break; 8062 default: 8063 break; 8064 } 8065 } 8066 8067 static void 8068 wm_release_hw_control(struct wm_softc *sc) 8069 { 8070 uint32_t reg; 8071 8072 if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0) 8073 return; 8074 8075 if (sc->sc_type == WM_T_82573) { 8076 reg = CSR_READ(sc, WMREG_SWSM); 8077 reg &= ~SWSM_DRV_LOAD; 8078 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD); 8079 } else { 8080 reg = CSR_READ(sc, WMREG_CTRL_EXT); 8081 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD); 8082 } 8083 } 8084 8085 /* XXX Currently TBI only */ 8086 static int 8087 wm_check_for_link(struct wm_softc *sc) 8088 { 8089 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 8090 uint32_t rxcw; 8091 uint32_t ctrl; 8092 uint32_t status; 8093 uint32_t sig; 8094 8095 rxcw = CSR_READ(sc, WMREG_RXCW); 8096 ctrl = CSR_READ(sc, WMREG_CTRL); 8097 status = CSR_READ(sc, WMREG_STATUS); 8098 8099 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0; 8100 8101 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n", 8102 device_xname(sc->sc_dev), __func__, 8103 ((ctrl & CTRL_SWDPIN(1)) == sig), 8104 ((status & STATUS_LU) != 0), 8105 ((rxcw & RXCW_C) != 0) 8106 )); 8107 8108 /* 8109 * SWDPIN LU RXCW 8110 * 0 0 0 8111 * 0 0 1 (should not happen) 8112 * 0 1 0 (should not happen) 8113 * 0 1 1 (should not happen) 8114 * 1 0 0 Disable autonego and force linkup 8115 * 1 0 1 got /C/ but not linkup yet 8116 * 1 1 0 (linkup) 8117 * 1 1 1 If IFM_AUTO, back to autonego 8118 * 8119 */ 8120 if (((ctrl & CTRL_SWDPIN(1)) == sig) 8121 && ((status & STATUS_LU) == 0) 8122 && ((rxcw & RXCW_C) == 0)) { 8123 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n", 8124 __func__)); 8125 sc->sc_tbi_linkup = 0; 8126 /* Disable auto-negotiation in the TXCW register */ 8127 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE)); 8128 8129 /* 8130 * Force link-up and also force full-duplex. 8131 * 8132 * NOTE: CTRL was updated TFCE and RFCE automatically, 8133 * so we should update sc->sc_ctrl 8134 */ 8135 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD; 8136 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 8137 } else if (((status & STATUS_LU) != 0) 8138 && ((rxcw & RXCW_C) != 0) 8139 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) { 8140 sc->sc_tbi_linkup = 1; 8141 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n", 8142 __func__)); 8143 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); 8144 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU)); 8145 } else if (((ctrl & CTRL_SWDPIN(1)) == sig) 8146 && ((rxcw & RXCW_C) != 0)) { 8147 DPRINTF(WM_DEBUG_LINK, ("/C/")); 8148 } else { 8149 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl, 8150 status)); 8151 } 8152 8153 return 0; 8154 } 8155 8156 /* Work-around for 82566 Kumeran PCS lock loss */ 8157 static void 8158 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc) 8159 { 8160 int miistatus, active, i; 8161 int reg; 8162 8163 miistatus = sc->sc_mii.mii_media_status; 8164 8165 /* If the link is not up, do nothing */ 8166 if ((miistatus & IFM_ACTIVE) != 0) 8167 return; 8168 8169 active = sc->sc_mii.mii_media_active; 8170 8171 /* Nothing to do if the link is other than 1Gbps */ 8172 if (IFM_SUBTYPE(active) != IFM_1000_T) 8173 return; 8174 8175 for (i = 0; i < 10; i++) { 8176 /* read twice */ 8177 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG); 8178 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG); 8179 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0) 8180 goto out; /* GOOD! */ 8181 8182 /* Reset the PHY */ 8183 wm_gmii_reset(sc); 8184 delay(5*1000); 8185 } 8186 8187 /* Disable GigE link negotiation */ 8188 reg = CSR_READ(sc, WMREG_PHY_CTRL); 8189 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS; 8190 CSR_WRITE(sc, WMREG_PHY_CTRL, reg); 8191 8192 /* 8193 * Call gig speed drop workaround on Gig disable before accessing 8194 * any PHY registers. 8195 */ 8196 wm_gig_downshift_workaround_ich8lan(sc); 8197 8198 out: 8199 return; 8200 } 8201 8202 /* WOL from S5 stops working */ 8203 static void 8204 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc) 8205 { 8206 uint16_t kmrn_reg; 8207 8208 /* Only for igp3 */ 8209 if (sc->sc_phytype == WMPHY_IGP_3) { 8210 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG); 8211 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK; 8212 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg); 8213 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK; 8214 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg); 8215 } 8216 } 8217 8218 #ifdef WM_WOL 8219 /* Power down workaround on D3 */ 8220 static void 8221 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc) 8222 { 8223 uint32_t reg; 8224 int i; 8225 8226 for (i = 0; i < 2; i++) { 8227 /* Disable link */ 8228 reg = CSR_READ(sc, WMREG_PHY_CTRL); 8229 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS; 8230 CSR_WRITE(sc, WMREG_PHY_CTRL, reg); 8231 8232 /* 8233 * Call gig speed drop workaround on Gig disable before 8234 * accessing any PHY registers 8235 */ 8236 if (sc->sc_type == WM_T_ICH8) 8237 wm_gig_downshift_workaround_ich8lan(sc); 8238 8239 /* Write VR power-down enable */ 8240 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL); 8241 reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; 8242 reg |= IGP3_VR_CTRL_MODE_SHUTDOWN; 8243 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg); 8244 8245 /* Read it back and test */ 8246 reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL); 8247 reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; 8248 if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0)) 8249 break; 8250 8251 /* Issue PHY reset and repeat at most one more time */ 8252 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 8253 } 8254 } 8255 #endif /* WM_WOL */ 8256 8257 /* 8258 * Workaround for pch's PHYs 8259 * XXX should be moved to new PHY driver? 8260 */ 8261 static void 8262 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc) 8263 { 8264 if (sc->sc_phytype == WMPHY_82577) 8265 wm_set_mdio_slow_mode_hv(sc); 8266 8267 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */ 8268 8269 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/ 8270 8271 /* 82578 */ 8272 if (sc->sc_phytype == WMPHY_82578) { 8273 /* PCH rev. < 3 */ 8274 if (sc->sc_rev < 3) { 8275 /* XXX 6 bit shift? Why? Is it page2? */ 8276 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29), 8277 0x66c0); 8278 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e), 8279 0xffff); 8280 } 8281 8282 /* XXX phy rev. < 2 */ 8283 } 8284 8285 /* Select page 0 */ 8286 8287 /* XXX acquire semaphore */ 8288 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0); 8289 /* XXX release semaphore */ 8290 8291 /* 8292 * Configure the K1 Si workaround during phy reset assuming there is 8293 * link so that it disables K1 if link is in 1Gbps. 8294 */ 8295 wm_k1_gig_workaround_hv(sc, 1); 8296 } 8297 8298 static void 8299 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc) 8300 { 8301 8302 wm_set_mdio_slow_mode_hv(sc); 8303 } 8304 8305 static void 8306 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link) 8307 { 8308 int k1_enable = sc->sc_nvm_k1_enabled; 8309 8310 /* XXX acquire semaphore */ 8311 8312 if (link) { 8313 k1_enable = 0; 8314 8315 /* Link stall fix for link up */ 8316 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100); 8317 } else { 8318 /* Link stall fix for link down */ 8319 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100); 8320 } 8321 8322 wm_configure_k1_ich8lan(sc, k1_enable); 8323 8324 /* XXX release semaphore */ 8325 } 8326 8327 static void 8328 wm_set_mdio_slow_mode_hv(struct wm_softc *sc) 8329 { 8330 uint32_t reg; 8331 8332 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL); 8333 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, 8334 reg | HV_KMRN_MDIO_SLOW); 8335 } 8336 8337 static void 8338 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable) 8339 { 8340 uint32_t ctrl, ctrl_ext, tmp; 8341 uint16_t kmrn_reg; 8342 8343 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG); 8344 8345 if (k1_enable) 8346 kmrn_reg |= KUMCTRLSTA_K1_ENABLE; 8347 else 8348 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE; 8349 8350 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg); 8351 8352 delay(20); 8353 8354 ctrl = CSR_READ(sc, WMREG_CTRL); 8355 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT); 8356 8357 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100); 8358 tmp |= CTRL_FRCSPD; 8359 8360 CSR_WRITE(sc, WMREG_CTRL, tmp); 8361 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS); 8362 delay(20); 8363 8364 CSR_WRITE(sc, WMREG_CTRL, ctrl); 8365 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext); 8366 delay(20); 8367 } 8368 8369 static void 8370 wm_smbustopci(struct wm_softc *sc) 8371 { 8372 uint32_t fwsm; 8373 8374 fwsm = CSR_READ(sc, WMREG_FWSM); 8375 if (((fwsm & FWSM_FW_VALID) == 0) 8376 && ((wm_check_reset_block(sc) == 0))) { 8377 sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE; 8378 sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE; 8379 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 8380 delay(10); 8381 sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE; 8382 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 8383 delay(50*1000); 8384 8385 /* 8386 * Gate automatic PHY configuration by hardware on non-managed 8387 * 82579 8388 */ 8389 if (sc->sc_type == WM_T_PCH2) 8390 wm_gate_hw_phy_config_ich8lan(sc, 1); 8391 } 8392 } 8393 8394 static void 8395 wm_set_pcie_completion_timeout(struct wm_softc *sc) 8396 { 8397 uint32_t gcr; 8398 pcireg_t ctrl2; 8399 8400 gcr = CSR_READ(sc, WMREG_GCR); 8401 8402 /* Only take action if timeout value is defaulted to 0 */ 8403 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0) 8404 goto out; 8405 8406 if ((gcr & GCR_CAP_VER2) == 0) { 8407 gcr |= GCR_CMPL_TMOUT_10MS; 8408 goto out; 8409 } 8410 8411 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 8412 sc->sc_pcixe_capoff + PCIE_DCSR2); 8413 ctrl2 |= WM_PCIE_DCSR2_16MS; 8414 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 8415 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2); 8416 8417 out: 8418 /* Disable completion timeout resend */ 8419 gcr &= ~GCR_CMPL_TMOUT_RESEND; 8420 8421 CSR_WRITE(sc, WMREG_GCR, gcr); 8422 } 8423 8424 /* special case - for 82575 - need to do manual init ... */ 8425 static void 8426 wm_reset_init_script_82575(struct wm_softc *sc) 8427 { 8428 /* 8429 * remark: this is untested code - we have no board without EEPROM 8430 * same setup as mentioned int the freeBSD driver for the i82575 8431 */ 8432 8433 /* SerDes configuration via SERDESCTRL */ 8434 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c); 8435 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78); 8436 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23); 8437 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15); 8438 8439 /* CCM configuration via CCMCTL register */ 8440 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00); 8441 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00); 8442 8443 /* PCIe lanes configuration */ 8444 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec); 8445 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf); 8446 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05); 8447 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81); 8448 8449 /* PCIe PLL Configuration */ 8450 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47); 8451 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00); 8452 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00); 8453 } 8454 8455 static void 8456 wm_init_manageability(struct wm_softc *sc) 8457 { 8458 8459 if (sc->sc_flags & WM_F_HAS_MANAGE) { 8460 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H); 8461 uint32_t manc = CSR_READ(sc, WMREG_MANC); 8462 8463 /* disabl hardware interception of ARP */ 8464 manc &= ~MANC_ARP_EN; 8465 8466 /* enable receiving management packets to the host */ 8467 if (sc->sc_type >= WM_T_82571) { 8468 manc |= MANC_EN_MNG2HOST; 8469 manc2h |= MANC2H_PORT_623| MANC2H_PORT_624; 8470 CSR_WRITE(sc, WMREG_MANC2H, manc2h); 8471 8472 } 8473 8474 CSR_WRITE(sc, WMREG_MANC, manc); 8475 } 8476 } 8477 8478 static void 8479 wm_release_manageability(struct wm_softc *sc) 8480 { 8481 8482 if (sc->sc_flags & WM_F_HAS_MANAGE) { 8483 uint32_t manc = CSR_READ(sc, WMREG_MANC); 8484 8485 manc |= MANC_ARP_EN; 8486 if (sc->sc_type >= WM_T_82571) 8487 manc &= ~MANC_EN_MNG2HOST; 8488 8489 CSR_WRITE(sc, WMREG_MANC, manc); 8490 } 8491 } 8492 8493 static void 8494 wm_get_wakeup(struct wm_softc *sc) 8495 { 8496 8497 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */ 8498 switch (sc->sc_type) { 8499 case WM_T_82573: 8500 case WM_T_82583: 8501 sc->sc_flags |= WM_F_HAS_AMT; 8502 /* FALLTHROUGH */ 8503 case WM_T_80003: 8504 case WM_T_82541: 8505 case WM_T_82547: 8506 case WM_T_82571: 8507 case WM_T_82572: 8508 case WM_T_82574: 8509 case WM_T_82575: 8510 case WM_T_82576: 8511 case WM_T_82580: 8512 case WM_T_82580ER: 8513 case WM_T_I350: 8514 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0) 8515 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID; 8516 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES; 8517 break; 8518 case WM_T_ICH8: 8519 case WM_T_ICH9: 8520 case WM_T_ICH10: 8521 case WM_T_PCH: 8522 case WM_T_PCH2: 8523 case WM_T_PCH_LPT: 8524 sc->sc_flags |= WM_F_HAS_AMT; 8525 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES; 8526 break; 8527 default: 8528 break; 8529 } 8530 8531 /* 1: HAS_MANAGE */ 8532 if (wm_enable_mng_pass_thru(sc) != 0) 8533 sc->sc_flags |= WM_F_HAS_MANAGE; 8534 8535 #ifdef WM_DEBUG 8536 printf("\n"); 8537 if ((sc->sc_flags & WM_F_HAS_AMT) != 0) 8538 printf("HAS_AMT,"); 8539 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) 8540 printf("ARC_SUBSYS_VALID,"); 8541 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0) 8542 printf("ASF_FIRMWARE_PRES,"); 8543 if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0) 8544 printf("HAS_MANAGE,"); 8545 printf("\n"); 8546 #endif 8547 /* 8548 * Note that the WOL flags is set after the resetting of the eeprom 8549 * stuff 8550 */ 8551 } 8552 8553 #ifdef WM_WOL 8554 /* WOL in the newer chipset interfaces (pchlan) */ 8555 static void 8556 wm_enable_phy_wakeup(struct wm_softc *sc) 8557 { 8558 #if 0 8559 uint16_t preg; 8560 8561 /* Copy MAC RARs to PHY RARs */ 8562 8563 /* Copy MAC MTA to PHY MTA */ 8564 8565 /* Configure PHY Rx Control register */ 8566 8567 /* Enable PHY wakeup in MAC register */ 8568 8569 /* Configure and enable PHY wakeup in PHY registers */ 8570 8571 /* Activate PHY wakeup */ 8572 8573 /* XXX */ 8574 #endif 8575 } 8576 8577 static void 8578 wm_enable_wakeup(struct wm_softc *sc) 8579 { 8580 uint32_t reg, pmreg; 8581 pcireg_t pmode; 8582 8583 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT, 8584 &pmreg, NULL) == 0) 8585 return; 8586 8587 /* Advertise the wakeup capability */ 8588 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2) 8589 | CTRL_SWDPIN(3)); 8590 CSR_WRITE(sc, WMREG_WUC, WUC_APME); 8591 8592 /* ICH workaround */ 8593 switch (sc->sc_type) { 8594 case WM_T_ICH8: 8595 case WM_T_ICH9: 8596 case WM_T_ICH10: 8597 case WM_T_PCH: 8598 case WM_T_PCH2: 8599 case WM_T_PCH_LPT: 8600 /* Disable gig during WOL */ 8601 reg = CSR_READ(sc, WMREG_PHY_CTRL); 8602 reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS; 8603 CSR_WRITE(sc, WMREG_PHY_CTRL, reg); 8604 if (sc->sc_type == WM_T_PCH) 8605 wm_gmii_reset(sc); 8606 8607 /* Power down workaround */ 8608 if (sc->sc_phytype == WMPHY_82577) { 8609 struct mii_softc *child; 8610 8611 /* Assume that the PHY is copper */ 8612 child = LIST_FIRST(&sc->sc_mii.mii_phys); 8613 if (child->mii_mpd_rev <= 2) 8614 sc->sc_mii.mii_writereg(sc->sc_dev, 1, 8615 (768 << 5) | 25, 0x0444); /* magic num */ 8616 } 8617 break; 8618 default: 8619 break; 8620 } 8621 8622 /* Keep the laser running on fiber adapters */ 8623 if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0) 8624 || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) { 8625 reg = CSR_READ(sc, WMREG_CTRL_EXT); 8626 reg |= CTRL_EXT_SWDPIN(3); 8627 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 8628 } 8629 8630 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG; 8631 #if 0 /* for the multicast packet */ 8632 reg |= WUFC_MC; 8633 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE); 8634 #endif 8635 8636 if (sc->sc_type == WM_T_PCH) { 8637 wm_enable_phy_wakeup(sc); 8638 } else { 8639 CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN); 8640 CSR_WRITE(sc, WMREG_WUFC, reg); 8641 } 8642 8643 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 8644 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 8645 || (sc->sc_type == WM_T_PCH2)) 8646 && (sc->sc_phytype == WMPHY_IGP_3)) 8647 wm_igp3_phy_powerdown_workaround_ich8lan(sc); 8648 8649 /* Request PME */ 8650 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR); 8651 #if 0 8652 /* Disable WOL */ 8653 pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN); 8654 #else 8655 /* For WOL */ 8656 pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN; 8657 #endif 8658 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode); 8659 } 8660 #endif /* WM_WOL */ 8661 8662 static bool 8663 wm_suspend(device_t self, const pmf_qual_t *qual) 8664 { 8665 struct wm_softc *sc = device_private(self); 8666 8667 wm_release_manageability(sc); 8668 wm_release_hw_control(sc); 8669 #ifdef WM_WOL 8670 wm_enable_wakeup(sc); 8671 #endif 8672 8673 return true; 8674 } 8675 8676 static bool 8677 wm_resume(device_t self, const pmf_qual_t *qual) 8678 { 8679 struct wm_softc *sc = device_private(self); 8680 8681 wm_init_manageability(sc); 8682 8683 return true; 8684 } 8685 8686 static void 8687 wm_set_eee_i350(struct wm_softc * sc) 8688 { 8689 uint32_t ipcnfg, eeer; 8690 8691 ipcnfg = CSR_READ(sc, WMREG_IPCNFG); 8692 eeer = CSR_READ(sc, WMREG_EEER); 8693 8694 if ((sc->sc_flags & WM_F_EEE) != 0) { 8695 ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN); 8696 eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN 8697 | EEER_LPI_FC); 8698 } else { 8699 ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN); 8700 eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN 8701 | EEER_LPI_FC); 8702 } 8703 8704 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg); 8705 CSR_WRITE(sc, WMREG_EEER, eeer); 8706 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */ 8707 CSR_READ(sc, WMREG_EEER); /* XXX flush? */ 8708 } 8709