1 /* $NetBSD: if_wm.c,v 1.178 2009/07/30 03:46:48 msaitoh Exp $ */ 2 3 /* 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /******************************************************************************* 39 40 Copyright (c) 2001-2005, Intel Corporation 41 All rights reserved. 42 43 Redistribution and use in source and binary forms, with or without 44 modification, are permitted provided that the following conditions are met: 45 46 1. Redistributions of source code must retain the above copyright notice, 47 this list of conditions and the following disclaimer. 48 49 2. Redistributions in binary form must reproduce the above copyright 50 notice, this list of conditions and the following disclaimer in the 51 documentation and/or other materials provided with the distribution. 52 53 3. Neither the name of the Intel Corporation nor the names of its 54 contributors may be used to endorse or promote products derived from 55 this software without specific prior written permission. 56 57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 67 POSSIBILITY OF SUCH DAMAGE. 68 69 *******************************************************************************/ 70 /* 71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips. 72 * 73 * TODO (in order of importance): 74 * 75 * - Rework how parameters are loaded from the EEPROM. 76 */ 77 78 #include <sys/cdefs.h> 79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.178 2009/07/30 03:46:48 msaitoh Exp $"); 80 81 #include "bpfilter.h" 82 #include "rnd.h" 83 84 #include <sys/param.h> 85 #include <sys/systm.h> 86 #include <sys/callout.h> 87 #include <sys/mbuf.h> 88 #include <sys/malloc.h> 89 #include <sys/kernel.h> 90 #include <sys/socket.h> 91 #include <sys/ioctl.h> 92 #include <sys/errno.h> 93 #include <sys/device.h> 94 #include <sys/queue.h> 95 #include <sys/syslog.h> 96 97 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */ 98 99 #if NRND > 0 100 #include <sys/rnd.h> 101 #endif 102 103 #include <net/if.h> 104 #include <net/if_dl.h> 105 #include <net/if_media.h> 106 #include <net/if_ether.h> 107 108 #if NBPFILTER > 0 109 #include <net/bpf.h> 110 #endif 111 112 #include <netinet/in.h> /* XXX for struct ip */ 113 #include <netinet/in_systm.h> /* XXX for struct ip */ 114 #include <netinet/ip.h> /* XXX for struct ip */ 115 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */ 116 #include <netinet/tcp.h> /* XXX for struct tcphdr */ 117 118 #include <sys/bus.h> 119 #include <sys/intr.h> 120 #include <machine/endian.h> 121 122 #include <dev/mii/mii.h> 123 #include <dev/mii/miivar.h> 124 #include <dev/mii/mii_bitbang.h> 125 #include <dev/mii/ikphyreg.h> 126 127 #include <dev/pci/pcireg.h> 128 #include <dev/pci/pcivar.h> 129 #include <dev/pci/pcidevs.h> 130 131 #include <dev/pci/if_wmreg.h> 132 133 #ifdef WM_DEBUG 134 #define WM_DEBUG_LINK 0x01 135 #define WM_DEBUG_TX 0x02 136 #define WM_DEBUG_RX 0x04 137 #define WM_DEBUG_GMII 0x08 138 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII; 139 140 #define DPRINTF(x, y) if (wm_debug & (x)) printf y 141 #else 142 #define DPRINTF(x, y) /* nothing */ 143 #endif /* WM_DEBUG */ 144 145 /* 146 * Transmit descriptor list size. Due to errata, we can only have 147 * 256 hardware descriptors in the ring on < 82544, but we use 4096 148 * on >= 82544. We tell the upper layers that they can queue a lot 149 * of packets, and we go ahead and manage up to 64 (16 for the i82547) 150 * of them at a time. 151 * 152 * We allow up to 256 (!) DMA segments per packet. Pathological packet 153 * chains containing many small mbufs have been observed in zero-copy 154 * situations with jumbo frames. 155 */ 156 #define WM_NTXSEGS 256 157 #define WM_IFQUEUELEN 256 158 #define WM_TXQUEUELEN_MAX 64 159 #define WM_TXQUEUELEN_MAX_82547 16 160 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum) 161 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1) 162 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8) 163 #define WM_NTXDESC_82542 256 164 #define WM_NTXDESC_82544 4096 165 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc) 166 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1) 167 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t)) 168 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc)) 169 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc)) 170 171 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */ 172 173 /* 174 * Receive descriptor list size. We have one Rx buffer for normal 175 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized 176 * packet. We allocate 256 receive descriptors, each with a 2k 177 * buffer (MCLBYTES), which gives us room for 50 jumbo packets. 178 */ 179 #define WM_NRXDESC 256 180 #define WM_NRXDESC_MASK (WM_NRXDESC - 1) 181 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) 182 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) 183 184 /* 185 * Control structures are DMA'd to the i82542 chip. We allocate them in 186 * a single clump that maps to a single DMA segment to make several things 187 * easier. 188 */ 189 struct wm_control_data_82544 { 190 /* 191 * The receive descriptors. 192 */ 193 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; 194 195 /* 196 * The transmit descriptors. Put these at the end, because 197 * we might use a smaller number of them. 198 */ 199 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544]; 200 }; 201 202 struct wm_control_data_82542 { 203 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; 204 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542]; 205 }; 206 207 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x) 208 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)]) 209 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)]) 210 211 /* 212 * Software state for transmit jobs. 213 */ 214 struct wm_txsoft { 215 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 216 bus_dmamap_t txs_dmamap; /* our DMA map */ 217 int txs_firstdesc; /* first descriptor in packet */ 218 int txs_lastdesc; /* last descriptor in packet */ 219 int txs_ndesc; /* # of descriptors used */ 220 }; 221 222 /* 223 * Software state for receive buffers. Each descriptor gets a 224 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill 225 * more than one buffer, we chain them together. 226 */ 227 struct wm_rxsoft { 228 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 229 bus_dmamap_t rxs_dmamap; /* our DMA map */ 230 }; 231 232 typedef enum { 233 WM_T_unknown = 0, 234 WM_T_82542_2_0, /* i82542 2.0 (really old) */ 235 WM_T_82542_2_1, /* i82542 2.1+ (old) */ 236 WM_T_82543, /* i82543 */ 237 WM_T_82544, /* i82544 */ 238 WM_T_82540, /* i82540 */ 239 WM_T_82545, /* i82545 */ 240 WM_T_82545_3, /* i82545 3.0+ */ 241 WM_T_82546, /* i82546 */ 242 WM_T_82546_3, /* i82546 3.0+ */ 243 WM_T_82541, /* i82541 */ 244 WM_T_82541_2, /* i82541 2.0+ */ 245 WM_T_82547, /* i82547 */ 246 WM_T_82547_2, /* i82547 2.0+ */ 247 WM_T_82571, /* i82571 */ 248 WM_T_82572, /* i82572 */ 249 WM_T_82573, /* i82573 */ 250 WM_T_82574, /* i82574 */ 251 WM_T_80003, /* i80003 */ 252 WM_T_ICH8, /* ICH8 LAN */ 253 WM_T_ICH9, /* ICH9 LAN */ 254 WM_T_ICH10, /* ICH10 LAN */ 255 } wm_chip_type; 256 257 #define WM_LINKUP_TIMEOUT 50 258 259 /* 260 * Software state per device. 261 */ 262 struct wm_softc { 263 device_t sc_dev; /* generic device information */ 264 bus_space_tag_t sc_st; /* bus space tag */ 265 bus_space_handle_t sc_sh; /* bus space handle */ 266 bus_space_tag_t sc_iot; /* I/O space tag */ 267 bus_space_handle_t sc_ioh; /* I/O space handle */ 268 bus_space_tag_t sc_flasht; /* flash registers space tag */ 269 bus_space_handle_t sc_flashh; /* flash registers space handle */ 270 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 271 struct ethercom sc_ethercom; /* ethernet common data */ 272 pci_chipset_tag_t sc_pc; 273 pcitag_t sc_pcitag; 274 275 wm_chip_type sc_type; /* chip type */ 276 int sc_flags; /* flags; see below */ 277 int sc_bus_speed; /* PCI/PCIX bus speed */ 278 int sc_pcix_offset; /* PCIX capability register offset */ 279 int sc_flowflags; /* 802.3x flow control flags */ 280 281 void *sc_ih; /* interrupt cookie */ 282 283 int sc_ee_addrbits; /* EEPROM address bits */ 284 285 struct mii_data sc_mii; /* MII/media information */ 286 287 callout_t sc_tick_ch; /* tick callout */ 288 289 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 290 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 291 292 int sc_align_tweak; 293 294 /* 295 * Software state for the transmit and receive descriptors. 296 */ 297 int sc_txnum; /* must be a power of two */ 298 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX]; 299 struct wm_rxsoft sc_rxsoft[WM_NRXDESC]; 300 301 /* 302 * Control data structures. 303 */ 304 int sc_ntxdesc; /* must be a power of two */ 305 struct wm_control_data_82544 *sc_control_data; 306 #define sc_txdescs sc_control_data->wcd_txdescs 307 #define sc_rxdescs sc_control_data->wcd_rxdescs 308 309 #ifdef WM_EVENT_COUNTERS 310 /* Event counters. */ 311 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */ 312 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */ 313 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */ 314 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */ 315 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */ 316 struct evcnt sc_ev_rxintr; /* Rx interrupts */ 317 struct evcnt sc_ev_linkintr; /* Link interrupts */ 318 319 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */ 320 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */ 321 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */ 322 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */ 323 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */ 324 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */ 325 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */ 326 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */ 327 328 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ 329 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */ 330 331 struct evcnt sc_ev_tu; /* Tx underrun */ 332 333 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ 334 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ 335 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ 336 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ 337 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ 338 #endif /* WM_EVENT_COUNTERS */ 339 340 bus_addr_t sc_tdt_reg; /* offset of TDT register */ 341 342 int sc_txfree; /* number of free Tx descriptors */ 343 int sc_txnext; /* next ready Tx descriptor */ 344 345 int sc_txsfree; /* number of free Tx jobs */ 346 int sc_txsnext; /* next free Tx job */ 347 int sc_txsdirty; /* dirty Tx jobs */ 348 349 /* These 5 variables are used only on the 82547. */ 350 int sc_txfifo_size; /* Tx FIFO size */ 351 int sc_txfifo_head; /* current head of FIFO */ 352 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */ 353 int sc_txfifo_stall; /* Tx FIFO is stalled */ 354 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */ 355 356 bus_addr_t sc_rdt_reg; /* offset of RDT register */ 357 358 int sc_rxptr; /* next ready Rx descriptor/queue ent */ 359 int sc_rxdiscard; 360 int sc_rxlen; 361 struct mbuf *sc_rxhead; 362 struct mbuf *sc_rxtail; 363 struct mbuf **sc_rxtailp; 364 365 uint32_t sc_ctrl; /* prototype CTRL register */ 366 #if 0 367 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ 368 #endif 369 uint32_t sc_icr; /* prototype interrupt bits */ 370 uint32_t sc_itr; /* prototype intr throttling reg */ 371 uint32_t sc_tctl; /* prototype TCTL register */ 372 uint32_t sc_rctl; /* prototype RCTL register */ 373 uint32_t sc_txcw; /* prototype TXCW register */ 374 uint32_t sc_tipg; /* prototype TIPG register */ 375 uint32_t sc_fcrtl; /* prototype FCRTL register */ 376 uint32_t sc_pba; /* prototype PBA register */ 377 378 int sc_tbi_linkup; /* TBI link status */ 379 int sc_tbi_anegticks; /* autonegotiation ticks */ 380 int sc_tbi_ticks; /* tbi ticks */ 381 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */ 382 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */ 383 384 int sc_mchash_type; /* multicast filter offset */ 385 386 #if NRND > 0 387 rndsource_element_t rnd_source; /* random source */ 388 #endif 389 int sc_ich8_flash_base; 390 int sc_ich8_flash_bank_size; 391 }; 392 393 #define WM_RXCHAIN_RESET(sc) \ 394 do { \ 395 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \ 396 *(sc)->sc_rxtailp = NULL; \ 397 (sc)->sc_rxlen = 0; \ 398 } while (/*CONSTCOND*/0) 399 400 #define WM_RXCHAIN_LINK(sc, m) \ 401 do { \ 402 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \ 403 (sc)->sc_rxtailp = &(m)->m_next; \ 404 } while (/*CONSTCOND*/0) 405 406 /* sc_flags */ 407 #define WM_F_HAS_MII 0x0001 /* has MII */ 408 #define WM_F_EEPROM_HANDSHAKE 0x0002 /* requires EEPROM handshake */ 409 #define WM_F_EEPROM_SEMAPHORE 0x0004 /* EEPROM with semaphore */ 410 #define WM_F_EEPROM_EERDEEWR 0x0008 /* EEPROM access via EERD/EEWR */ 411 #define WM_F_EEPROM_SPI 0x0010 /* EEPROM is SPI */ 412 #define WM_F_EEPROM_FLASH 0x0020 /* EEPROM is FLASH */ 413 #define WM_F_EEPROM_INVALID 0x0040 /* EEPROM not present (bad checksum) */ 414 #define WM_F_IOH_VALID 0x0080 /* I/O handle is valid */ 415 #define WM_F_BUS64 0x0100 /* bus is 64-bit */ 416 #define WM_F_PCIX 0x0200 /* bus is PCI-X */ 417 #define WM_F_CSA 0x0400 /* bus is CSA */ 418 #define WM_F_PCIE 0x0800 /* bus is PCI-Express */ 419 #define WM_F_SWFW_SYNC 0x1000 /* Software-Firmware synchronisation */ 420 #define WM_F_SWFWHW_SYNC 0x2000 /* Software-Firmware synchronisation */ 421 422 #ifdef WM_EVENT_COUNTERS 423 #define WM_EVCNT_INCR(ev) (ev)->ev_count++ 424 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val) 425 #else 426 #define WM_EVCNT_INCR(ev) /* nothing */ 427 #define WM_EVCNT_ADD(ev, val) /* nothing */ 428 #endif 429 430 #define CSR_READ(sc, reg) \ 431 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) 432 #define CSR_WRITE(sc, reg, val) \ 433 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) 434 #define CSR_WRITE_FLUSH(sc) \ 435 (void) CSR_READ((sc), WMREG_STATUS) 436 437 #define ICH8_FLASH_READ32(sc, reg) \ 438 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg)) 439 #define ICH8_FLASH_WRITE32(sc, reg, data) \ 440 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) 441 442 #define ICH8_FLASH_READ16(sc, reg) \ 443 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg)) 444 #define ICH8_FLASH_WRITE16(sc, reg, data) \ 445 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) 446 447 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x))) 448 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x))) 449 450 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU) 451 #define WM_CDTXADDR_HI(sc, x) \ 452 (sizeof(bus_addr_t) == 8 ? \ 453 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0) 454 455 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU) 456 #define WM_CDRXADDR_HI(sc, x) \ 457 (sizeof(bus_addr_t) == 8 ? \ 458 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0) 459 460 #define WM_CDTXSYNC(sc, x, n, ops) \ 461 do { \ 462 int __x, __n; \ 463 \ 464 __x = (x); \ 465 __n = (n); \ 466 \ 467 /* If it will wrap around, sync to the end of the ring. */ \ 468 if ((__x + __n) > WM_NTXDESC(sc)) { \ 469 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 470 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \ 471 (WM_NTXDESC(sc) - __x), (ops)); \ 472 __n -= (WM_NTXDESC(sc) - __x); \ 473 __x = 0; \ 474 } \ 475 \ 476 /* Now sync whatever is left. */ \ 477 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 478 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \ 479 } while (/*CONSTCOND*/0) 480 481 #define WM_CDRXSYNC(sc, x, ops) \ 482 do { \ 483 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 484 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \ 485 } while (/*CONSTCOND*/0) 486 487 #define WM_INIT_RXDESC(sc, x) \ 488 do { \ 489 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 490 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \ 491 struct mbuf *__m = __rxs->rxs_mbuf; \ 492 \ 493 /* \ 494 * Note: We scoot the packet forward 2 bytes in the buffer \ 495 * so that the payload after the Ethernet header is aligned \ 496 * to a 4-byte boundary. \ 497 * \ 498 * XXX BRAINDAMAGE ALERT! \ 499 * The stupid chip uses the same size for every buffer, which \ 500 * is set in the Receive Control register. We are using the 2K \ 501 * size option, but what we REALLY want is (2K - 2)! For this \ 502 * reason, we can't "scoot" packets longer than the standard \ 503 * Ethernet MTU. On strict-alignment platforms, if the total \ 504 * size exceeds (2K - 2) we set align_tweak to 0 and let \ 505 * the upper layer copy the headers. \ 506 */ \ 507 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \ 508 \ 509 wm_set_dma_addr(&__rxd->wrx_addr, \ 510 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \ 511 __rxd->wrx_len = 0; \ 512 __rxd->wrx_cksum = 0; \ 513 __rxd->wrx_status = 0; \ 514 __rxd->wrx_errors = 0; \ 515 __rxd->wrx_special = 0; \ 516 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 517 \ 518 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \ 519 } while (/*CONSTCOND*/0) 520 521 static void wm_start(struct ifnet *); 522 static void wm_watchdog(struct ifnet *); 523 static int wm_ioctl(struct ifnet *, u_long, void *); 524 static int wm_init(struct ifnet *); 525 static void wm_stop(struct ifnet *, int); 526 527 static void wm_reset(struct wm_softc *); 528 static void wm_rxdrain(struct wm_softc *); 529 static int wm_add_rxbuf(struct wm_softc *, int); 530 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *); 531 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *); 532 static int wm_validate_eeprom_checksum(struct wm_softc *); 533 static void wm_tick(void *); 534 535 static void wm_set_filter(struct wm_softc *); 536 537 static int wm_intr(void *); 538 static void wm_txintr(struct wm_softc *); 539 static void wm_rxintr(struct wm_softc *); 540 static void wm_linkintr(struct wm_softc *, uint32_t); 541 542 static void wm_tbi_mediainit(struct wm_softc *); 543 static int wm_tbi_mediachange(struct ifnet *); 544 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); 545 546 static void wm_tbi_set_linkled(struct wm_softc *); 547 static void wm_tbi_check_link(struct wm_softc *); 548 549 static void wm_gmii_reset(struct wm_softc *); 550 551 static int wm_gmii_i82543_readreg(device_t, int, int); 552 static void wm_gmii_i82543_writereg(device_t, int, int, int); 553 554 static int wm_gmii_i82544_readreg(device_t, int, int); 555 static void wm_gmii_i82544_writereg(device_t, int, int, int); 556 557 static int wm_gmii_i80003_readreg(device_t, int, int); 558 static void wm_gmii_i80003_writereg(device_t, int, int, int); 559 560 static int wm_gmii_bm_readreg(device_t, int, int); 561 static void wm_gmii_bm_writereg(device_t, int, int, int); 562 563 static void wm_gmii_statchg(device_t); 564 565 static void wm_gmii_mediainit(struct wm_softc *); 566 static int wm_gmii_mediachange(struct ifnet *); 567 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); 568 569 static int wm_kmrn_readreg(struct wm_softc *, int); 570 static void wm_kmrn_writereg(struct wm_softc *, int, int); 571 572 static int wm_match(device_t, cfdata_t, void *); 573 static void wm_attach(device_t, device_t, void *); 574 static int wm_is_onboard_nvm_eeprom(struct wm_softc *); 575 static void wm_get_auto_rd_done(struct wm_softc *); 576 static int wm_get_swsm_semaphore(struct wm_softc *); 577 static void wm_put_swsm_semaphore(struct wm_softc *); 578 static int wm_poll_eerd_eewr_done(struct wm_softc *, int); 579 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t); 580 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t); 581 static int wm_get_swfwhw_semaphore(struct wm_softc *); 582 static void wm_put_swfwhw_semaphore(struct wm_softc *); 583 584 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *); 585 static int32_t wm_ich8_cycle_init(struct wm_softc *); 586 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t); 587 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, 588 uint32_t, uint16_t *); 589 static int32_t wm_read_ich8_byte(struct wm_softc *sc, uint32_t, uint8_t *); 590 static int32_t wm_read_ich8_word(struct wm_softc *sc, uint32_t, uint16_t *); 591 static void wm_82547_txfifo_stall(void *); 592 static int wm_check_mng_mode(struct wm_softc *); 593 static int wm_check_mng_mode_ich8lan(struct wm_softc *); 594 #if 0 595 static int wm_check_mng_mode_82574(struct wm_softc *); 596 #endif 597 static int wm_check_mng_mode_generic(struct wm_softc *); 598 static void wm_get_hw_control(struct wm_softc *); 599 static int wm_check_for_link(struct wm_softc *); 600 601 CFATTACH_DECL_NEW(wm, sizeof(struct wm_softc), 602 wm_match, wm_attach, NULL, NULL); 603 604 605 /* 606 * Devices supported by this driver. 607 */ 608 static const struct wm_product { 609 pci_vendor_id_t wmp_vendor; 610 pci_product_id_t wmp_product; 611 const char *wmp_name; 612 wm_chip_type wmp_type; 613 int wmp_flags; 614 #define WMP_F_1000X 0x01 615 #define WMP_F_1000T 0x02 616 } wm_products[] = { 617 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, 618 "Intel i82542 1000BASE-X Ethernet", 619 WM_T_82542_2_1, WMP_F_1000X }, 620 621 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, 622 "Intel i82543GC 1000BASE-X Ethernet", 623 WM_T_82543, WMP_F_1000X }, 624 625 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, 626 "Intel i82543GC 1000BASE-T Ethernet", 627 WM_T_82543, WMP_F_1000T }, 628 629 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, 630 "Intel i82544EI 1000BASE-T Ethernet", 631 WM_T_82544, WMP_F_1000T }, 632 633 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, 634 "Intel i82544EI 1000BASE-X Ethernet", 635 WM_T_82544, WMP_F_1000X }, 636 637 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, 638 "Intel i82544GC 1000BASE-T Ethernet", 639 WM_T_82544, WMP_F_1000T }, 640 641 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, 642 "Intel i82544GC (LOM) 1000BASE-T Ethernet", 643 WM_T_82544, WMP_F_1000T }, 644 645 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, 646 "Intel i82540EM 1000BASE-T Ethernet", 647 WM_T_82540, WMP_F_1000T }, 648 649 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM, 650 "Intel i82540EM (LOM) 1000BASE-T Ethernet", 651 WM_T_82540, WMP_F_1000T }, 652 653 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM, 654 "Intel i82540EP 1000BASE-T Ethernet", 655 WM_T_82540, WMP_F_1000T }, 656 657 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP, 658 "Intel i82540EP 1000BASE-T Ethernet", 659 WM_T_82540, WMP_F_1000T }, 660 661 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP, 662 "Intel i82540EP 1000BASE-T Ethernet", 663 WM_T_82540, WMP_F_1000T }, 664 665 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER, 666 "Intel i82545EM 1000BASE-T Ethernet", 667 WM_T_82545, WMP_F_1000T }, 668 669 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER, 670 "Intel i82545GM 1000BASE-T Ethernet", 671 WM_T_82545_3, WMP_F_1000T }, 672 673 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER, 674 "Intel i82545GM 1000BASE-X Ethernet", 675 WM_T_82545_3, WMP_F_1000X }, 676 #if 0 677 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES, 678 "Intel i82545GM Gigabit Ethernet (SERDES)", 679 WM_T_82545_3, WMP_F_SERDES }, 680 #endif 681 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER, 682 "Intel i82546EB 1000BASE-T Ethernet", 683 WM_T_82546, WMP_F_1000T }, 684 685 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD, 686 "Intel i82546EB 1000BASE-T Ethernet", 687 WM_T_82546, WMP_F_1000T }, 688 689 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER, 690 "Intel i82545EM 1000BASE-X Ethernet", 691 WM_T_82545, WMP_F_1000X }, 692 693 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER, 694 "Intel i82546EB 1000BASE-X Ethernet", 695 WM_T_82546, WMP_F_1000X }, 696 697 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER, 698 "Intel i82546GB 1000BASE-T Ethernet", 699 WM_T_82546_3, WMP_F_1000T }, 700 701 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER, 702 "Intel i82546GB 1000BASE-X Ethernet", 703 WM_T_82546_3, WMP_F_1000X }, 704 #if 0 705 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES, 706 "Intel i82546GB Gigabit Ethernet (SERDES)", 707 WM_T_82546_3, WMP_F_SERDES }, 708 #endif 709 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER, 710 "i82546GB quad-port Gigabit Ethernet", 711 WM_T_82546_3, WMP_F_1000T }, 712 713 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3, 714 "i82546GB quad-port Gigabit Ethernet (KSP3)", 715 WM_T_82546_3, WMP_F_1000T }, 716 717 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE, 718 "Intel PRO/1000MT (82546GB)", 719 WM_T_82546_3, WMP_F_1000T }, 720 721 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI, 722 "Intel i82541EI 1000BASE-T Ethernet", 723 WM_T_82541, WMP_F_1000T }, 724 725 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM, 726 "Intel i82541ER (LOM) 1000BASE-T Ethernet", 727 WM_T_82541, WMP_F_1000T }, 728 729 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE, 730 "Intel i82541EI Mobile 1000BASE-T Ethernet", 731 WM_T_82541, WMP_F_1000T }, 732 733 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER, 734 "Intel i82541ER 1000BASE-T Ethernet", 735 WM_T_82541_2, WMP_F_1000T }, 736 737 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI, 738 "Intel i82541GI 1000BASE-T Ethernet", 739 WM_T_82541_2, WMP_F_1000T }, 740 741 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE, 742 "Intel i82541GI Mobile 1000BASE-T Ethernet", 743 WM_T_82541_2, WMP_F_1000T }, 744 745 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI, 746 "Intel i82541PI 1000BASE-T Ethernet", 747 WM_T_82541_2, WMP_F_1000T }, 748 749 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI, 750 "Intel i82547EI 1000BASE-T Ethernet", 751 WM_T_82547, WMP_F_1000T }, 752 753 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE, 754 "Intel i82547EI Mobile 1000BASE-T Ethernet", 755 WM_T_82547, WMP_F_1000T }, 756 757 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI, 758 "Intel i82547GI 1000BASE-T Ethernet", 759 WM_T_82547_2, WMP_F_1000T }, 760 761 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER, 762 "Intel PRO/1000 PT (82571EB)", 763 WM_T_82571, WMP_F_1000T }, 764 765 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER, 766 "Intel PRO/1000 PF (82571EB)", 767 WM_T_82571, WMP_F_1000X }, 768 #if 0 769 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES, 770 "Intel PRO/1000 PB (82571EB)", 771 WM_T_82571, WMP_F_SERDES }, 772 #endif 773 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER, 774 "Intel PRO/1000 QT (82571EB)", 775 WM_T_82571, WMP_F_1000T }, 776 777 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER, 778 "Intel i82572EI 1000baseT Ethernet", 779 WM_T_82572, WMP_F_1000T }, 780 781 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER, 782 "Intel� PRO/1000 PT Quad Port Server Adapter", 783 WM_T_82571, WMP_F_1000T, }, 784 785 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER, 786 "Intel i82572EI 1000baseX Ethernet", 787 WM_T_82572, WMP_F_1000X }, 788 #if 0 789 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES, 790 "Intel i82572EI Gigabit Ethernet (SERDES)", 791 WM_T_82572, WMP_F_SERDES }, 792 #endif 793 794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI, 795 "Intel i82572EI 1000baseT Ethernet", 796 WM_T_82572, WMP_F_1000T }, 797 798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E, 799 "Intel i82573E", 800 WM_T_82573, WMP_F_1000T }, 801 802 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT, 803 "Intel i82573E IAMT", 804 WM_T_82573, WMP_F_1000T }, 805 806 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L, 807 "Intel i82573L Gigabit Ethernet", 808 WM_T_82573, WMP_F_1000T }, 809 810 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L, 811 "Intel i82574L", 812 WM_T_82574, WMP_F_1000T }, 813 814 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT, 815 "i80003 dual 1000baseT Ethernet", 816 WM_T_80003, WMP_F_1000T }, 817 818 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT, 819 "i80003 dual 1000baseX Ethernet", 820 WM_T_80003, WMP_F_1000T }, 821 #if 0 822 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT, 823 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)", 824 WM_T_80003, WMP_F_SERDES }, 825 #endif 826 827 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT, 828 "Intel i80003 1000baseT Ethernet", 829 WM_T_80003, WMP_F_1000T }, 830 #if 0 831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT, 832 "Intel i80003 Gigabit Ethernet (SERDES)", 833 WM_T_80003, WMP_F_SERDES }, 834 #endif 835 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT, 836 "Intel i82801H (M_AMT) LAN Controller", 837 WM_T_ICH8, WMP_F_1000T }, 838 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT, 839 "Intel i82801H (AMT) LAN Controller", 840 WM_T_ICH8, WMP_F_1000T }, 841 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN, 842 "Intel i82801H LAN Controller", 843 WM_T_ICH8, WMP_F_1000T }, 844 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN, 845 "Intel i82801H (IFE) LAN Controller", 846 WM_T_ICH8, WMP_F_1000T }, 847 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN, 848 "Intel i82801H (M) LAN Controller", 849 WM_T_ICH8, WMP_F_1000T }, 850 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT, 851 "Intel i82801H IFE (GT) LAN Controller", 852 WM_T_ICH8, WMP_F_1000T }, 853 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G, 854 "Intel i82801H IFE (G) LAN Controller", 855 WM_T_ICH8, WMP_F_1000T }, 856 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT, 857 "82801I (AMT) LAN Controller", 858 WM_T_ICH9, WMP_F_1000T }, 859 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE, 860 "82801I LAN Controller", 861 WM_T_ICH9, WMP_F_1000T }, 862 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G, 863 "82801I (G) LAN Controller", 864 WM_T_ICH9, WMP_F_1000T }, 865 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT, 866 "82801I (GT) LAN Controller", 867 WM_T_ICH9, WMP_F_1000T }, 868 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C, 869 "82801I (C) LAN Controller", 870 WM_T_ICH9, WMP_F_1000T }, 871 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M, 872 "82801I mobile LAN Controller", 873 WM_T_ICH9, WMP_F_1000T }, 874 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V, 875 "82801I mobile (V) LAN Controller", 876 WM_T_ICH9, WMP_F_1000T }, 877 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT, 878 "82801I mobile (AMT) LAN Controller", 879 WM_T_ICH9, WMP_F_1000T }, 880 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82567LM_3, 881 "82567LM-3 LAN Controller", 882 WM_T_ICH10, WMP_F_1000T }, 883 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82567LF_3, 884 "82567LF-3 LAN Controller", 885 WM_T_ICH10, WMP_F_1000T }, 886 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF, 887 "i82801J (LF) LAN Controller", 888 WM_T_ICH10, WMP_F_1000T }, 889 { 0, 0, 890 NULL, 891 0, 0 }, 892 }; 893 894 #ifdef WM_EVENT_COUNTERS 895 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")]; 896 #endif /* WM_EVENT_COUNTERS */ 897 898 #if 0 /* Not currently used */ 899 static inline uint32_t 900 wm_io_read(struct wm_softc *sc, int reg) 901 { 902 903 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 904 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4)); 905 } 906 #endif 907 908 static inline void 909 wm_io_write(struct wm_softc *sc, int reg, uint32_t val) 910 { 911 912 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 913 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val); 914 } 915 916 static inline void 917 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v) 918 { 919 wa->wa_low = htole32(v & 0xffffffffU); 920 if (sizeof(bus_addr_t) == 8) 921 wa->wa_high = htole32((uint64_t) v >> 32); 922 else 923 wa->wa_high = 0; 924 } 925 926 static const struct wm_product * 927 wm_lookup(const struct pci_attach_args *pa) 928 { 929 const struct wm_product *wmp; 930 931 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) { 932 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor && 933 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product) 934 return (wmp); 935 } 936 return (NULL); 937 } 938 939 static int 940 wm_match(device_t parent, cfdata_t cf, void *aux) 941 { 942 struct pci_attach_args *pa = aux; 943 944 if (wm_lookup(pa) != NULL) 945 return (1); 946 947 return (0); 948 } 949 950 static void 951 wm_attach(device_t parent, device_t self, void *aux) 952 { 953 struct wm_softc *sc = device_private(self); 954 struct pci_attach_args *pa = aux; 955 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 956 pci_chipset_tag_t pc = pa->pa_pc; 957 pci_intr_handle_t ih; 958 size_t cdata_size; 959 const char *intrstr = NULL; 960 const char *eetype, *xname; 961 bus_space_tag_t memt; 962 bus_space_handle_t memh; 963 bus_dma_segment_t seg; 964 int memh_valid; 965 int i, rseg, error; 966 const struct wm_product *wmp; 967 prop_data_t ea; 968 prop_number_t pn; 969 uint8_t enaddr[ETHER_ADDR_LEN]; 970 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin; 971 pcireg_t preg, memtype; 972 uint32_t reg; 973 974 sc->sc_dev = self; 975 callout_init(&sc->sc_tick_ch, 0); 976 977 wmp = wm_lookup(pa); 978 if (wmp == NULL) { 979 printf("\n"); 980 panic("wm_attach: impossible"); 981 } 982 983 sc->sc_pc = pa->pa_pc; 984 sc->sc_pcitag = pa->pa_tag; 985 986 if (pci_dma64_available(pa)) 987 sc->sc_dmat = pa->pa_dmat64; 988 else 989 sc->sc_dmat = pa->pa_dmat; 990 991 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG)); 992 aprint_naive(": Ethernet controller\n"); 993 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg); 994 995 sc->sc_type = wmp->wmp_type; 996 if (sc->sc_type < WM_T_82543) { 997 if (preg < 2) { 998 aprint_error_dev(sc->sc_dev, 999 "i82542 must be at least rev. 2\n"); 1000 return; 1001 } 1002 if (preg < 3) 1003 sc->sc_type = WM_T_82542_2_0; 1004 } 1005 1006 /* 1007 * Map the device. All devices support memory-mapped acccess, 1008 * and it is really required for normal operation. 1009 */ 1010 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA); 1011 switch (memtype) { 1012 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 1013 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 1014 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA, 1015 memtype, 0, &memt, &memh, NULL, NULL) == 0); 1016 break; 1017 default: 1018 memh_valid = 0; 1019 } 1020 1021 if (memh_valid) { 1022 sc->sc_st = memt; 1023 sc->sc_sh = memh; 1024 } else { 1025 aprint_error_dev(sc->sc_dev, 1026 "unable to map device registers\n"); 1027 return; 1028 } 1029 1030 /* 1031 * In addition, i82544 and later support I/O mapped indirect 1032 * register access. It is not desirable (nor supported in 1033 * this driver) to use it for normal operation, though it is 1034 * required to work around bugs in some chip versions. 1035 */ 1036 if (sc->sc_type >= WM_T_82544) { 1037 /* First we have to find the I/O BAR. */ 1038 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) { 1039 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) == 1040 PCI_MAPREG_TYPE_IO) 1041 break; 1042 } 1043 if (i == PCI_MAPREG_END) 1044 aprint_error_dev(sc->sc_dev, 1045 "WARNING: unable to find I/O BAR\n"); 1046 else { 1047 /* 1048 * The i8254x doesn't apparently respond when the 1049 * I/O BAR is 0, which looks somewhat like it's not 1050 * been configured. 1051 */ 1052 preg = pci_conf_read(pc, pa->pa_tag, i); 1053 if (PCI_MAPREG_MEM_ADDR(preg) == 0) { 1054 aprint_error_dev(sc->sc_dev, 1055 "WARNING: I/O BAR at zero.\n"); 1056 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO, 1057 0, &sc->sc_iot, &sc->sc_ioh, 1058 NULL, NULL) == 0) { 1059 sc->sc_flags |= WM_F_IOH_VALID; 1060 } else { 1061 aprint_error_dev(sc->sc_dev, 1062 "WARNING: unable to map I/O space\n"); 1063 } 1064 } 1065 1066 } 1067 1068 /* Enable bus mastering. Disable MWI on the i82542 2.0. */ 1069 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 1070 preg |= PCI_COMMAND_MASTER_ENABLE; 1071 if (sc->sc_type < WM_T_82542_2_1) 1072 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE; 1073 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg); 1074 1075 /* power up chip */ 1076 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, 1077 NULL)) && error != EOPNOTSUPP) { 1078 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error); 1079 return; 1080 } 1081 1082 /* 1083 * Map and establish our interrupt. 1084 */ 1085 if (pci_intr_map(pa, &ih)) { 1086 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n"); 1087 return; 1088 } 1089 intrstr = pci_intr_string(pc, ih); 1090 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc); 1091 if (sc->sc_ih == NULL) { 1092 aprint_error_dev(sc->sc_dev, "unable to establish interrupt"); 1093 if (intrstr != NULL) 1094 aprint_normal(" at %s", intrstr); 1095 aprint_normal("\n"); 1096 return; 1097 } 1098 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); 1099 1100 /* 1101 * Determine a few things about the bus we're connected to. 1102 */ 1103 if (sc->sc_type < WM_T_82543) { 1104 /* We don't really know the bus characteristics here. */ 1105 sc->sc_bus_speed = 33; 1106 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) { 1107 /* 1108 * CSA (Communication Streaming Architecture) is about as fast 1109 * a 32-bit 66MHz PCI Bus. 1110 */ 1111 sc->sc_flags |= WM_F_CSA; 1112 sc->sc_bus_speed = 66; 1113 aprint_verbose_dev(sc->sc_dev, 1114 "Communication Streaming Architecture\n"); 1115 if (sc->sc_type == WM_T_82547) { 1116 callout_init(&sc->sc_txfifo_ch, 0); 1117 callout_setfunc(&sc->sc_txfifo_ch, 1118 wm_82547_txfifo_stall, sc); 1119 aprint_verbose_dev(sc->sc_dev, 1120 "using 82547 Tx FIFO stall work-around\n"); 1121 } 1122 } else if (sc->sc_type >= WM_T_82571) { 1123 sc->sc_flags |= WM_F_PCIE; 1124 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9) 1125 && (sc->sc_type != WM_T_ICH10)) 1126 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE; 1127 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n"); 1128 } else { 1129 reg = CSR_READ(sc, WMREG_STATUS); 1130 if (reg & STATUS_BUS64) 1131 sc->sc_flags |= WM_F_BUS64; 1132 if ((reg & STATUS_PCIX_MODE) != 0) { 1133 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb; 1134 1135 sc->sc_flags |= WM_F_PCIX; 1136 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 1137 PCI_CAP_PCIX, 1138 &sc->sc_pcix_offset, NULL) == 0) 1139 aprint_error_dev(sc->sc_dev, 1140 "unable to find PCIX capability\n"); 1141 else if (sc->sc_type != WM_T_82545_3 && 1142 sc->sc_type != WM_T_82546_3) { 1143 /* 1144 * Work around a problem caused by the BIOS 1145 * setting the max memory read byte count 1146 * incorrectly. 1147 */ 1148 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, 1149 sc->sc_pcix_offset + PCI_PCIX_CMD); 1150 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag, 1151 sc->sc_pcix_offset + PCI_PCIX_STATUS); 1152 1153 bytecnt = 1154 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >> 1155 PCI_PCIX_CMD_BYTECNT_SHIFT; 1156 maxb = 1157 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >> 1158 PCI_PCIX_STATUS_MAXB_SHIFT; 1159 if (bytecnt > maxb) { 1160 aprint_verbose_dev(sc->sc_dev, 1161 "resetting PCI-X MMRBC: %d -> %d\n", 1162 512 << bytecnt, 512 << maxb); 1163 pcix_cmd = (pcix_cmd & 1164 ~PCI_PCIX_CMD_BYTECNT_MASK) | 1165 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT); 1166 pci_conf_write(pa->pa_pc, pa->pa_tag, 1167 sc->sc_pcix_offset + PCI_PCIX_CMD, 1168 pcix_cmd); 1169 } 1170 } 1171 } 1172 /* 1173 * The quad port adapter is special; it has a PCIX-PCIX 1174 * bridge on the board, and can run the secondary bus at 1175 * a higher speed. 1176 */ 1177 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) { 1178 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120 1179 : 66; 1180 } else if (sc->sc_flags & WM_F_PCIX) { 1181 switch (reg & STATUS_PCIXSPD_MASK) { 1182 case STATUS_PCIXSPD_50_66: 1183 sc->sc_bus_speed = 66; 1184 break; 1185 case STATUS_PCIXSPD_66_100: 1186 sc->sc_bus_speed = 100; 1187 break; 1188 case STATUS_PCIXSPD_100_133: 1189 sc->sc_bus_speed = 133; 1190 break; 1191 default: 1192 aprint_error_dev(sc->sc_dev, 1193 "unknown PCIXSPD %d; assuming 66MHz\n", 1194 reg & STATUS_PCIXSPD_MASK); 1195 sc->sc_bus_speed = 66; 1196 } 1197 } else 1198 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33; 1199 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n", 1200 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed, 1201 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI"); 1202 } 1203 1204 /* 1205 * Allocate the control data structures, and create and load the 1206 * DMA map for it. 1207 * 1208 * NOTE: All Tx descriptors must be in the same 4G segment of 1209 * memory. So must Rx descriptors. We simplify by allocating 1210 * both sets within the same 4G segment. 1211 */ 1212 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ? 1213 WM_NTXDESC_82542 : WM_NTXDESC_82544; 1214 cdata_size = sc->sc_type < WM_T_82544 ? 1215 sizeof(struct wm_control_data_82542) : 1216 sizeof(struct wm_control_data_82544); 1217 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE, 1218 (bus_size_t) 0x100000000ULL, 1219 &seg, 1, &rseg, 0)) != 0) { 1220 aprint_error_dev(sc->sc_dev, 1221 "unable to allocate control data, error = %d\n", 1222 error); 1223 goto fail_0; 1224 } 1225 1226 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size, 1227 (void **)&sc->sc_control_data, 1228 BUS_DMA_COHERENT)) != 0) { 1229 aprint_error_dev(sc->sc_dev, 1230 "unable to map control data, error = %d\n", error); 1231 goto fail_1; 1232 } 1233 1234 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size, 1235 0, 0, &sc->sc_cddmamap)) != 0) { 1236 aprint_error_dev(sc->sc_dev, 1237 "unable to create control data DMA map, error = %d\n", 1238 error); 1239 goto fail_2; 1240 } 1241 1242 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 1243 sc->sc_control_data, cdata_size, NULL, 1244 0)) != 0) { 1245 aprint_error_dev(sc->sc_dev, 1246 "unable to load control data DMA map, error = %d\n", 1247 error); 1248 goto fail_3; 1249 } 1250 1251 1252 /* 1253 * Create the transmit buffer DMA maps. 1254 */ 1255 WM_TXQUEUELEN(sc) = 1256 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ? 1257 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX; 1258 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 1259 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA, 1260 WM_NTXSEGS, WTX_MAX_LEN, 0, 0, 1261 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 1262 aprint_error_dev(sc->sc_dev, 1263 "unable to create Tx DMA map %d, error = %d\n", 1264 i, error); 1265 goto fail_4; 1266 } 1267 } 1268 1269 /* 1270 * Create the receive buffer DMA maps. 1271 */ 1272 for (i = 0; i < WM_NRXDESC; i++) { 1273 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1274 MCLBYTES, 0, 0, 1275 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 1276 aprint_error_dev(sc->sc_dev, 1277 "unable to create Rx DMA map %d error = %d\n", 1278 i, error); 1279 goto fail_5; 1280 } 1281 sc->sc_rxsoft[i].rxs_mbuf = NULL; 1282 } 1283 1284 /* clear interesting stat counters */ 1285 CSR_READ(sc, WMREG_COLC); 1286 CSR_READ(sc, WMREG_RXERRC); 1287 1288 /* 1289 * Reset the chip to a known state. 1290 */ 1291 wm_reset(sc); 1292 1293 switch (sc->sc_type) { 1294 case WM_T_82571: 1295 case WM_T_82572: 1296 case WM_T_82573: 1297 case WM_T_82574: 1298 case WM_T_80003: 1299 case WM_T_ICH8: 1300 case WM_T_ICH9: 1301 case WM_T_ICH10: 1302 if (wm_check_mng_mode(sc) != 0) 1303 wm_get_hw_control(sc); 1304 break; 1305 default: 1306 break; 1307 } 1308 1309 /* 1310 * Get some information about the EEPROM. 1311 */ 1312 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 1313 || (sc->sc_type == WM_T_ICH10)) { 1314 uint32_t flash_size; 1315 sc->sc_flags |= WM_F_SWFWHW_SYNC | WM_F_EEPROM_FLASH; 1316 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH); 1317 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0, 1318 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) { 1319 aprint_error_dev(sc->sc_dev, 1320 "can't map FLASH registers\n"); 1321 return; 1322 } 1323 flash_size = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG); 1324 sc->sc_ich8_flash_base = (flash_size & ICH_GFPREG_BASE_MASK) * 1325 ICH_FLASH_SECTOR_SIZE; 1326 sc->sc_ich8_flash_bank_size = 1327 ((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1; 1328 sc->sc_ich8_flash_bank_size -= 1329 (flash_size & ICH_GFPREG_BASE_MASK); 1330 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE; 1331 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t); 1332 } else if (sc->sc_type == WM_T_80003) 1333 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC; 1334 else if (sc->sc_type == WM_T_82573) 1335 sc->sc_flags |= WM_F_EEPROM_EERDEEWR; 1336 else if (sc->sc_type == WM_T_82574) 1337 sc->sc_flags |= WM_F_EEPROM_EERDEEWR; 1338 else if (sc->sc_type > WM_T_82544) 1339 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE; 1340 1341 if (sc->sc_type <= WM_T_82544) 1342 sc->sc_ee_addrbits = 6; 1343 else if (sc->sc_type <= WM_T_82546_3) { 1344 reg = CSR_READ(sc, WMREG_EECD); 1345 if (reg & EECD_EE_SIZE) 1346 sc->sc_ee_addrbits = 8; 1347 else 1348 sc->sc_ee_addrbits = 6; 1349 } else if (sc->sc_type <= WM_T_82547_2) { 1350 reg = CSR_READ(sc, WMREG_EECD); 1351 if (reg & EECD_EE_TYPE) { 1352 sc->sc_flags |= WM_F_EEPROM_SPI; 1353 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8; 1354 } else 1355 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6; 1356 } else if ((sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574) && 1357 (wm_is_onboard_nvm_eeprom(sc) == 0)) { 1358 sc->sc_flags |= WM_F_EEPROM_FLASH; 1359 } else { 1360 /* Assume everything else is SPI. */ 1361 reg = CSR_READ(sc, WMREG_EECD); 1362 sc->sc_flags |= WM_F_EEPROM_SPI; 1363 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8; 1364 } 1365 1366 /* 1367 * Defer printing the EEPROM type until after verifying the checksum 1368 * This allows the EEPROM type to be printed correctly in the case 1369 * that no EEPROM is attached. 1370 */ 1371 1372 /* 1373 * Validate the EEPROM checksum. If the checksum fails, flag this for 1374 * later, so we can fail future reads from the EEPROM. 1375 */ 1376 if (wm_validate_eeprom_checksum(sc)) { 1377 /* 1378 * Read twice again because some PCI-e parts fail the first 1379 * check due to the link being in sleep state. 1380 */ 1381 if (wm_validate_eeprom_checksum(sc)) 1382 sc->sc_flags |= WM_F_EEPROM_INVALID; 1383 } 1384 1385 if (sc->sc_flags & WM_F_EEPROM_INVALID) 1386 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n"); 1387 else if (sc->sc_flags & WM_F_EEPROM_FLASH) { 1388 aprint_verbose_dev(sc->sc_dev, "FLASH\n"); 1389 } else { 1390 if (sc->sc_flags & WM_F_EEPROM_SPI) 1391 eetype = "SPI"; 1392 else 1393 eetype = "MicroWire"; 1394 aprint_verbose_dev(sc->sc_dev, 1395 "%u word (%d address bits) %s EEPROM\n", 1396 1U << sc->sc_ee_addrbits, 1397 sc->sc_ee_addrbits, eetype); 1398 } 1399 1400 /* 1401 * Read the Ethernet address from the EEPROM, if not first found 1402 * in device properties. 1403 */ 1404 ea = prop_dictionary_get(device_properties(sc->sc_dev), "mac-addr"); 1405 if (ea != NULL) { 1406 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 1407 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 1408 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN); 1409 } else { 1410 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR, 1411 sizeof(myea) / sizeof(myea[0]), myea)) { 1412 aprint_error_dev(sc->sc_dev, 1413 "unable to read Ethernet address\n"); 1414 return; 1415 } 1416 enaddr[0] = myea[0] & 0xff; 1417 enaddr[1] = myea[0] >> 8; 1418 enaddr[2] = myea[1] & 0xff; 1419 enaddr[3] = myea[1] >> 8; 1420 enaddr[4] = myea[2] & 0xff; 1421 enaddr[5] = myea[2] >> 8; 1422 } 1423 1424 /* 1425 * Toggle the LSB of the MAC address on the second port 1426 * of the dual port controller. 1427 */ 1428 if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3 1429 || sc->sc_type == WM_T_82571 || sc->sc_type == WM_T_80003) { 1430 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1) 1431 enaddr[5] ^= 1; 1432 } 1433 1434 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 1435 ether_sprintf(enaddr)); 1436 1437 /* 1438 * Read the config info from the EEPROM, and set up various 1439 * bits in the control registers based on their contents. 1440 */ 1441 pn = prop_dictionary_get(device_properties(sc->sc_dev), 1442 "i82543-cfg1"); 1443 if (pn != NULL) { 1444 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1445 cfg1 = (uint16_t) prop_number_integer_value(pn); 1446 } else { 1447 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) { 1448 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n"); 1449 return; 1450 } 1451 } 1452 1453 pn = prop_dictionary_get(device_properties(sc->sc_dev), 1454 "i82543-cfg2"); 1455 if (pn != NULL) { 1456 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1457 cfg2 = (uint16_t) prop_number_integer_value(pn); 1458 } else { 1459 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) { 1460 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n"); 1461 return; 1462 } 1463 } 1464 1465 if (sc->sc_type >= WM_T_82544) { 1466 pn = prop_dictionary_get(device_properties(sc->sc_dev), 1467 "i82543-swdpin"); 1468 if (pn != NULL) { 1469 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1470 swdpin = (uint16_t) prop_number_integer_value(pn); 1471 } else { 1472 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) { 1473 aprint_error_dev(sc->sc_dev, 1474 "unable to read SWDPIN\n"); 1475 return; 1476 } 1477 } 1478 } 1479 1480 if (cfg1 & EEPROM_CFG1_ILOS) 1481 sc->sc_ctrl |= CTRL_ILOS; 1482 if (sc->sc_type >= WM_T_82544) { 1483 sc->sc_ctrl |= 1484 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) << 1485 CTRL_SWDPIO_SHIFT; 1486 sc->sc_ctrl |= 1487 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) << 1488 CTRL_SWDPINS_SHIFT; 1489 } else { 1490 sc->sc_ctrl |= 1491 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) << 1492 CTRL_SWDPIO_SHIFT; 1493 } 1494 1495 #if 0 1496 if (sc->sc_type >= WM_T_82544) { 1497 if (cfg1 & EEPROM_CFG1_IPS0) 1498 sc->sc_ctrl_ext |= CTRL_EXT_IPS; 1499 if (cfg1 & EEPROM_CFG1_IPS1) 1500 sc->sc_ctrl_ext |= CTRL_EXT_IPS1; 1501 sc->sc_ctrl_ext |= 1502 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) << 1503 CTRL_EXT_SWDPIO_SHIFT; 1504 sc->sc_ctrl_ext |= 1505 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) << 1506 CTRL_EXT_SWDPINS_SHIFT; 1507 } else { 1508 sc->sc_ctrl_ext |= 1509 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) << 1510 CTRL_EXT_SWDPIO_SHIFT; 1511 } 1512 #endif 1513 1514 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 1515 #if 0 1516 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 1517 #endif 1518 1519 /* 1520 * Set up some register offsets that are different between 1521 * the i82542 and the i82543 and later chips. 1522 */ 1523 if (sc->sc_type < WM_T_82543) { 1524 sc->sc_rdt_reg = WMREG_OLD_RDT0; 1525 sc->sc_tdt_reg = WMREG_OLD_TDT; 1526 } else { 1527 sc->sc_rdt_reg = WMREG_RDT; 1528 sc->sc_tdt_reg = WMREG_TDT; 1529 } 1530 1531 /* 1532 * Determine if we're TBI or GMII mode, and initialize the 1533 * media structures accordingly. 1534 */ 1535 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9 1536 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_82573 1537 || sc->sc_type == WM_T_82574) { 1538 /* STATUS_TBIMODE reserved/reused, can't rely on it */ 1539 wm_gmii_mediainit(sc); 1540 } else if (sc->sc_type < WM_T_82543 || 1541 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) { 1542 if (wmp->wmp_flags & WMP_F_1000T) 1543 aprint_error_dev(sc->sc_dev, 1544 "WARNING: TBIMODE set on 1000BASE-T product!\n"); 1545 wm_tbi_mediainit(sc); 1546 } else { 1547 if (wmp->wmp_flags & WMP_F_1000X) 1548 aprint_error_dev(sc->sc_dev, 1549 "WARNING: TBIMODE clear on 1000BASE-X product!\n"); 1550 wm_gmii_mediainit(sc); 1551 } 1552 1553 ifp = &sc->sc_ethercom.ec_if; 1554 xname = device_xname(sc->sc_dev); 1555 strlcpy(ifp->if_xname, xname, IFNAMSIZ); 1556 ifp->if_softc = sc; 1557 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1558 ifp->if_ioctl = wm_ioctl; 1559 ifp->if_start = wm_start; 1560 ifp->if_watchdog = wm_watchdog; 1561 ifp->if_init = wm_init; 1562 ifp->if_stop = wm_stop; 1563 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN)); 1564 IFQ_SET_READY(&ifp->if_snd); 1565 1566 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_82574 && 1567 sc->sc_type != WM_T_ICH8) 1568 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1569 1570 /* 1571 * If we're a i82543 or greater, we can support VLANs. 1572 */ 1573 if (sc->sc_type >= WM_T_82543) 1574 sc->sc_ethercom.ec_capabilities |= 1575 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING; 1576 1577 /* 1578 * We can perform TCPv4 and UDPv4 checkums in-bound. Only 1579 * on i82543 and later. 1580 */ 1581 if (sc->sc_type >= WM_T_82543) { 1582 ifp->if_capabilities |= 1583 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 1584 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 1585 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx | 1586 IFCAP_CSUM_TCPv6_Tx | 1587 IFCAP_CSUM_UDPv6_Tx; 1588 } 1589 1590 /* 1591 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL. 1592 * 1593 * 82541GI (8086:1076) ... no 1594 * 82572EI (8086:10b9) ... yes 1595 */ 1596 if (sc->sc_type >= WM_T_82571) { 1597 ifp->if_capabilities |= 1598 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 1599 } 1600 1601 /* 1602 * If we're a i82544 or greater (except i82547), we can do 1603 * TCP segmentation offload. 1604 */ 1605 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) { 1606 ifp->if_capabilities |= IFCAP_TSOv4; 1607 } 1608 1609 if (sc->sc_type >= WM_T_82571) { 1610 ifp->if_capabilities |= IFCAP_TSOv6; 1611 } 1612 1613 /* 1614 * Attach the interface. 1615 */ 1616 if_attach(ifp); 1617 ether_ifattach(ifp, enaddr); 1618 #if NRND > 0 1619 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0); 1620 #endif 1621 1622 #ifdef WM_EVENT_COUNTERS 1623 /* Attach event counters. */ 1624 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC, 1625 NULL, xname, "txsstall"); 1626 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC, 1627 NULL, xname, "txdstall"); 1628 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC, 1629 NULL, xname, "txfifo_stall"); 1630 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR, 1631 NULL, xname, "txdw"); 1632 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR, 1633 NULL, xname, "txqe"); 1634 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR, 1635 NULL, xname, "rxintr"); 1636 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR, 1637 NULL, xname, "linkintr"); 1638 1639 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC, 1640 NULL, xname, "rxipsum"); 1641 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC, 1642 NULL, xname, "rxtusum"); 1643 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC, 1644 NULL, xname, "txipsum"); 1645 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC, 1646 NULL, xname, "txtusum"); 1647 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC, 1648 NULL, xname, "txtusum6"); 1649 1650 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC, 1651 NULL, xname, "txtso"); 1652 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC, 1653 NULL, xname, "txtso6"); 1654 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC, 1655 NULL, xname, "txtsopain"); 1656 1657 for (i = 0; i < WM_NTXSEGS; i++) { 1658 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i); 1659 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC, 1660 NULL, xname, wm_txseg_evcnt_names[i]); 1661 } 1662 1663 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC, 1664 NULL, xname, "txdrop"); 1665 1666 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC, 1667 NULL, xname, "tu"); 1668 1669 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC, 1670 NULL, xname, "tx_xoff"); 1671 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC, 1672 NULL, xname, "tx_xon"); 1673 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC, 1674 NULL, xname, "rx_xoff"); 1675 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC, 1676 NULL, xname, "rx_xon"); 1677 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC, 1678 NULL, xname, "rx_macctl"); 1679 #endif /* WM_EVENT_COUNTERS */ 1680 1681 if (!pmf_device_register(self, NULL, NULL)) 1682 aprint_error_dev(self, "couldn't establish power handler\n"); 1683 else 1684 pmf_class_network_register(self, ifp); 1685 1686 return; 1687 1688 /* 1689 * Free any resources we've allocated during the failed attach 1690 * attempt. Do this in reverse order and fall through. 1691 */ 1692 fail_5: 1693 for (i = 0; i < WM_NRXDESC; i++) { 1694 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 1695 bus_dmamap_destroy(sc->sc_dmat, 1696 sc->sc_rxsoft[i].rxs_dmamap); 1697 } 1698 fail_4: 1699 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 1700 if (sc->sc_txsoft[i].txs_dmamap != NULL) 1701 bus_dmamap_destroy(sc->sc_dmat, 1702 sc->sc_txsoft[i].txs_dmamap); 1703 } 1704 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 1705 fail_3: 1706 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 1707 fail_2: 1708 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 1709 cdata_size); 1710 fail_1: 1711 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1712 fail_0: 1713 return; 1714 } 1715 1716 /* 1717 * wm_tx_offload: 1718 * 1719 * Set up TCP/IP checksumming parameters for the 1720 * specified packet. 1721 */ 1722 static int 1723 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp, 1724 uint8_t *fieldsp) 1725 { 1726 struct mbuf *m0 = txs->txs_mbuf; 1727 struct livengood_tcpip_ctxdesc *t; 1728 uint32_t ipcs, tucs, cmd, cmdlen, seg; 1729 uint32_t ipcse; 1730 struct ether_header *eh; 1731 int offset, iphl; 1732 uint8_t fields; 1733 1734 /* 1735 * XXX It would be nice if the mbuf pkthdr had offset 1736 * fields for the protocol headers. 1737 */ 1738 1739 eh = mtod(m0, struct ether_header *); 1740 switch (htons(eh->ether_type)) { 1741 case ETHERTYPE_IP: 1742 case ETHERTYPE_IPV6: 1743 offset = ETHER_HDR_LEN; 1744 break; 1745 1746 case ETHERTYPE_VLAN: 1747 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1748 break; 1749 1750 default: 1751 /* 1752 * Don't support this protocol or encapsulation. 1753 */ 1754 *fieldsp = 0; 1755 *cmdp = 0; 1756 return (0); 1757 } 1758 1759 if ((m0->m_pkthdr.csum_flags & 1760 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) { 1761 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 1762 } else { 1763 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data); 1764 } 1765 ipcse = offset + iphl - 1; 1766 1767 cmd = WTX_CMD_DEXT | WTX_DTYP_D; 1768 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE; 1769 seg = 0; 1770 fields = 0; 1771 1772 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) { 1773 int hlen = offset + iphl; 1774 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 1775 1776 if (__predict_false(m0->m_len < 1777 (hlen + sizeof(struct tcphdr)))) { 1778 /* 1779 * TCP/IP headers are not in the first mbuf; we need 1780 * to do this the slow and painful way. Let's just 1781 * hope this doesn't happen very often. 1782 */ 1783 struct tcphdr th; 1784 1785 WM_EVCNT_INCR(&sc->sc_ev_txtsopain); 1786 1787 m_copydata(m0, hlen, sizeof(th), &th); 1788 if (v4) { 1789 struct ip ip; 1790 1791 m_copydata(m0, offset, sizeof(ip), &ip); 1792 ip.ip_len = 0; 1793 m_copyback(m0, 1794 offset + offsetof(struct ip, ip_len), 1795 sizeof(ip.ip_len), &ip.ip_len); 1796 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 1797 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 1798 } else { 1799 struct ip6_hdr ip6; 1800 1801 m_copydata(m0, offset, sizeof(ip6), &ip6); 1802 ip6.ip6_plen = 0; 1803 m_copyback(m0, 1804 offset + offsetof(struct ip6_hdr, ip6_plen), 1805 sizeof(ip6.ip6_plen), &ip6.ip6_plen); 1806 th.th_sum = in6_cksum_phdr(&ip6.ip6_src, 1807 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP)); 1808 } 1809 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 1810 sizeof(th.th_sum), &th.th_sum); 1811 1812 hlen += th.th_off << 2; 1813 } else { 1814 /* 1815 * TCP/IP headers are in the first mbuf; we can do 1816 * this the easy way. 1817 */ 1818 struct tcphdr *th; 1819 1820 if (v4) { 1821 struct ip *ip = 1822 (void *)(mtod(m0, char *) + offset); 1823 th = (void *)(mtod(m0, char *) + hlen); 1824 1825 ip->ip_len = 0; 1826 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 1827 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 1828 } else { 1829 struct ip6_hdr *ip6 = 1830 (void *)(mtod(m0, char *) + offset); 1831 th = (void *)(mtod(m0, char *) + hlen); 1832 1833 ip6->ip6_plen = 0; 1834 th->th_sum = in6_cksum_phdr(&ip6->ip6_src, 1835 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); 1836 } 1837 hlen += th->th_off << 2; 1838 } 1839 1840 if (v4) { 1841 WM_EVCNT_INCR(&sc->sc_ev_txtso); 1842 cmdlen |= WTX_TCPIP_CMD_IP; 1843 } else { 1844 WM_EVCNT_INCR(&sc->sc_ev_txtso6); 1845 ipcse = 0; 1846 } 1847 cmd |= WTX_TCPIP_CMD_TSE; 1848 cmdlen |= WTX_TCPIP_CMD_TSE | 1849 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen); 1850 seg = WTX_TCPIP_SEG_HDRLEN(hlen) | 1851 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz); 1852 } 1853 1854 /* 1855 * NOTE: Even if we're not using the IP or TCP/UDP checksum 1856 * offload feature, if we load the context descriptor, we 1857 * MUST provide valid values for IPCSS and TUCSS fields. 1858 */ 1859 1860 ipcs = WTX_TCPIP_IPCSS(offset) | 1861 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) | 1862 WTX_TCPIP_IPCSE(ipcse); 1863 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) { 1864 WM_EVCNT_INCR(&sc->sc_ev_txipsum); 1865 fields |= WTX_IXSM; 1866 } 1867 1868 offset += iphl; 1869 1870 if (m0->m_pkthdr.csum_flags & 1871 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) { 1872 WM_EVCNT_INCR(&sc->sc_ev_txtusum); 1873 fields |= WTX_TXSM; 1874 tucs = WTX_TCPIP_TUCSS(offset) | 1875 WTX_TCPIP_TUCSO(offset + 1876 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) | 1877 WTX_TCPIP_TUCSE(0) /* rest of packet */; 1878 } else if ((m0->m_pkthdr.csum_flags & 1879 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) { 1880 WM_EVCNT_INCR(&sc->sc_ev_txtusum6); 1881 fields |= WTX_TXSM; 1882 tucs = WTX_TCPIP_TUCSS(offset) | 1883 WTX_TCPIP_TUCSO(offset + 1884 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) | 1885 WTX_TCPIP_TUCSE(0) /* rest of packet */; 1886 } else { 1887 /* Just initialize it to a valid TCP context. */ 1888 tucs = WTX_TCPIP_TUCSS(offset) | 1889 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) | 1890 WTX_TCPIP_TUCSE(0) /* rest of packet */; 1891 } 1892 1893 /* Fill in the context descriptor. */ 1894 t = (struct livengood_tcpip_ctxdesc *) 1895 &sc->sc_txdescs[sc->sc_txnext]; 1896 t->tcpip_ipcs = htole32(ipcs); 1897 t->tcpip_tucs = htole32(tucs); 1898 t->tcpip_cmdlen = htole32(cmdlen); 1899 t->tcpip_seg = htole32(seg); 1900 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE); 1901 1902 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext); 1903 txs->txs_ndesc++; 1904 1905 *cmdp = cmd; 1906 *fieldsp = fields; 1907 1908 return (0); 1909 } 1910 1911 static void 1912 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0) 1913 { 1914 struct mbuf *m; 1915 int i; 1916 1917 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev)); 1918 for (m = m0, i = 0; m != NULL; m = m->m_next, i++) 1919 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, " 1920 "m_flags = 0x%08x\n", device_xname(sc->sc_dev), 1921 m->m_data, m->m_len, m->m_flags); 1922 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev), 1923 i, i == 1 ? "" : "s"); 1924 } 1925 1926 /* 1927 * wm_82547_txfifo_stall: 1928 * 1929 * Callout used to wait for the 82547 Tx FIFO to drain, 1930 * reset the FIFO pointers, and restart packet transmission. 1931 */ 1932 static void 1933 wm_82547_txfifo_stall(void *arg) 1934 { 1935 struct wm_softc *sc = arg; 1936 int s; 1937 1938 s = splnet(); 1939 1940 if (sc->sc_txfifo_stall) { 1941 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) && 1942 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) && 1943 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) { 1944 /* 1945 * Packets have drained. Stop transmitter, reset 1946 * FIFO pointers, restart transmitter, and kick 1947 * the packet queue. 1948 */ 1949 uint32_t tctl = CSR_READ(sc, WMREG_TCTL); 1950 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN); 1951 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr); 1952 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr); 1953 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr); 1954 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr); 1955 CSR_WRITE(sc, WMREG_TCTL, tctl); 1956 CSR_WRITE_FLUSH(sc); 1957 1958 sc->sc_txfifo_head = 0; 1959 sc->sc_txfifo_stall = 0; 1960 wm_start(&sc->sc_ethercom.ec_if); 1961 } else { 1962 /* 1963 * Still waiting for packets to drain; try again in 1964 * another tick. 1965 */ 1966 callout_schedule(&sc->sc_txfifo_ch, 1); 1967 } 1968 } 1969 1970 splx(s); 1971 } 1972 1973 /* 1974 * wm_82547_txfifo_bugchk: 1975 * 1976 * Check for bug condition in the 82547 Tx FIFO. We need to 1977 * prevent enqueueing a packet that would wrap around the end 1978 * if the Tx FIFO ring buffer, otherwise the chip will croak. 1979 * 1980 * We do this by checking the amount of space before the end 1981 * of the Tx FIFO buffer. If the packet will not fit, we "stall" 1982 * the Tx FIFO, wait for all remaining packets to drain, reset 1983 * the internal FIFO pointers to the beginning, and restart 1984 * transmission on the interface. 1985 */ 1986 #define WM_FIFO_HDR 0x10 1987 #define WM_82547_PAD_LEN 0x3e0 1988 static int 1989 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0) 1990 { 1991 int space = sc->sc_txfifo_size - sc->sc_txfifo_head; 1992 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR); 1993 1994 /* Just return if already stalled. */ 1995 if (sc->sc_txfifo_stall) 1996 return (1); 1997 1998 if (sc->sc_mii.mii_media_active & IFM_FDX) { 1999 /* Stall only occurs in half-duplex mode. */ 2000 goto send_packet; 2001 } 2002 2003 if (len >= WM_82547_PAD_LEN + space) { 2004 sc->sc_txfifo_stall = 1; 2005 callout_schedule(&sc->sc_txfifo_ch, 1); 2006 return (1); 2007 } 2008 2009 send_packet: 2010 sc->sc_txfifo_head += len; 2011 if (sc->sc_txfifo_head >= sc->sc_txfifo_size) 2012 sc->sc_txfifo_head -= sc->sc_txfifo_size; 2013 2014 return (0); 2015 } 2016 2017 /* 2018 * wm_start: [ifnet interface function] 2019 * 2020 * Start packet transmission on the interface. 2021 */ 2022 static void 2023 wm_start(struct ifnet *ifp) 2024 { 2025 struct wm_softc *sc = ifp->if_softc; 2026 struct mbuf *m0; 2027 struct m_tag *mtag; 2028 struct wm_txsoft *txs; 2029 bus_dmamap_t dmamap; 2030 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso; 2031 bus_addr_t curaddr; 2032 bus_size_t seglen, curlen; 2033 uint32_t cksumcmd; 2034 uint8_t cksumfields; 2035 2036 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 2037 return; 2038 2039 /* 2040 * Remember the previous number of free descriptors. 2041 */ 2042 ofree = sc->sc_txfree; 2043 2044 /* 2045 * Loop through the send queue, setting up transmit descriptors 2046 * until we drain the queue, or use up all available transmit 2047 * descriptors. 2048 */ 2049 for (;;) { 2050 /* Grab a packet off the queue. */ 2051 IFQ_POLL(&ifp->if_snd, m0); 2052 if (m0 == NULL) 2053 break; 2054 2055 DPRINTF(WM_DEBUG_TX, 2056 ("%s: TX: have packet to transmit: %p\n", 2057 device_xname(sc->sc_dev), m0)); 2058 2059 /* Get a work queue entry. */ 2060 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) { 2061 wm_txintr(sc); 2062 if (sc->sc_txsfree == 0) { 2063 DPRINTF(WM_DEBUG_TX, 2064 ("%s: TX: no free job descriptors\n", 2065 device_xname(sc->sc_dev))); 2066 WM_EVCNT_INCR(&sc->sc_ev_txsstall); 2067 break; 2068 } 2069 } 2070 2071 txs = &sc->sc_txsoft[sc->sc_txsnext]; 2072 dmamap = txs->txs_dmamap; 2073 2074 use_tso = (m0->m_pkthdr.csum_flags & 2075 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0; 2076 2077 /* 2078 * So says the Linux driver: 2079 * The controller does a simple calculation to make sure 2080 * there is enough room in the FIFO before initiating the 2081 * DMA for each buffer. The calc is: 2082 * 4 = ceil(buffer len / MSS) 2083 * To make sure we don't overrun the FIFO, adjust the max 2084 * buffer len if the MSS drops. 2085 */ 2086 dmamap->dm_maxsegsz = 2087 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN) 2088 ? m0->m_pkthdr.segsz << 2 2089 : WTX_MAX_LEN; 2090 2091 /* 2092 * Load the DMA map. If this fails, the packet either 2093 * didn't fit in the allotted number of segments, or we 2094 * were short on resources. For the too-many-segments 2095 * case, we simply report an error and drop the packet, 2096 * since we can't sanely copy a jumbo packet to a single 2097 * buffer. 2098 */ 2099 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 2100 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 2101 if (error) { 2102 if (error == EFBIG) { 2103 WM_EVCNT_INCR(&sc->sc_ev_txdrop); 2104 log(LOG_ERR, "%s: Tx packet consumes too many " 2105 "DMA segments, dropping...\n", 2106 device_xname(sc->sc_dev)); 2107 IFQ_DEQUEUE(&ifp->if_snd, m0); 2108 wm_dump_mbuf_chain(sc, m0); 2109 m_freem(m0); 2110 continue; 2111 } 2112 /* 2113 * Short on resources, just stop for now. 2114 */ 2115 DPRINTF(WM_DEBUG_TX, 2116 ("%s: TX: dmamap load failed: %d\n", 2117 device_xname(sc->sc_dev), error)); 2118 break; 2119 } 2120 2121 segs_needed = dmamap->dm_nsegs; 2122 if (use_tso) { 2123 /* For sentinel descriptor; see below. */ 2124 segs_needed++; 2125 } 2126 2127 /* 2128 * Ensure we have enough descriptors free to describe 2129 * the packet. Note, we always reserve one descriptor 2130 * at the end of the ring due to the semantics of the 2131 * TDT register, plus one more in the event we need 2132 * to load offload context. 2133 */ 2134 if (segs_needed > sc->sc_txfree - 2) { 2135 /* 2136 * Not enough free descriptors to transmit this 2137 * packet. We haven't committed anything yet, 2138 * so just unload the DMA map, put the packet 2139 * pack on the queue, and punt. Notify the upper 2140 * layer that there are no more slots left. 2141 */ 2142 DPRINTF(WM_DEBUG_TX, 2143 ("%s: TX: need %d (%d) descriptors, have %d\n", 2144 device_xname(sc->sc_dev), dmamap->dm_nsegs, 2145 segs_needed, sc->sc_txfree - 1)); 2146 ifp->if_flags |= IFF_OACTIVE; 2147 bus_dmamap_unload(sc->sc_dmat, dmamap); 2148 WM_EVCNT_INCR(&sc->sc_ev_txdstall); 2149 break; 2150 } 2151 2152 /* 2153 * Check for 82547 Tx FIFO bug. We need to do this 2154 * once we know we can transmit the packet, since we 2155 * do some internal FIFO space accounting here. 2156 */ 2157 if (sc->sc_type == WM_T_82547 && 2158 wm_82547_txfifo_bugchk(sc, m0)) { 2159 DPRINTF(WM_DEBUG_TX, 2160 ("%s: TX: 82547 Tx FIFO bug detected\n", 2161 device_xname(sc->sc_dev))); 2162 ifp->if_flags |= IFF_OACTIVE; 2163 bus_dmamap_unload(sc->sc_dmat, dmamap); 2164 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall); 2165 break; 2166 } 2167 2168 IFQ_DEQUEUE(&ifp->if_snd, m0); 2169 2170 /* 2171 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 2172 */ 2173 2174 DPRINTF(WM_DEBUG_TX, 2175 ("%s: TX: packet has %d (%d) DMA segments\n", 2176 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed)); 2177 2178 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]); 2179 2180 /* 2181 * Store a pointer to the packet so that we can free it 2182 * later. 2183 * 2184 * Initially, we consider the number of descriptors the 2185 * packet uses the number of DMA segments. This may be 2186 * incremented by 1 if we do checksum offload (a descriptor 2187 * is used to set the checksum context). 2188 */ 2189 txs->txs_mbuf = m0; 2190 txs->txs_firstdesc = sc->sc_txnext; 2191 txs->txs_ndesc = segs_needed; 2192 2193 /* Set up offload parameters for this packet. */ 2194 if (m0->m_pkthdr.csum_flags & 2195 (M_CSUM_TSOv4|M_CSUM_TSOv6| 2196 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4| 2197 M_CSUM_TCPv6|M_CSUM_UDPv6)) { 2198 if (wm_tx_offload(sc, txs, &cksumcmd, 2199 &cksumfields) != 0) { 2200 /* Error message already displayed. */ 2201 bus_dmamap_unload(sc->sc_dmat, dmamap); 2202 continue; 2203 } 2204 } else { 2205 cksumcmd = 0; 2206 cksumfields = 0; 2207 } 2208 2209 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS; 2210 2211 /* Sync the DMA map. */ 2212 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 2213 BUS_DMASYNC_PREWRITE); 2214 2215 /* 2216 * Initialize the transmit descriptor. 2217 */ 2218 for (nexttx = sc->sc_txnext, seg = 0; 2219 seg < dmamap->dm_nsegs; seg++) { 2220 for (seglen = dmamap->dm_segs[seg].ds_len, 2221 curaddr = dmamap->dm_segs[seg].ds_addr; 2222 seglen != 0; 2223 curaddr += curlen, seglen -= curlen, 2224 nexttx = WM_NEXTTX(sc, nexttx)) { 2225 curlen = seglen; 2226 2227 /* 2228 * So says the Linux driver: 2229 * Work around for premature descriptor 2230 * write-backs in TSO mode. Append a 2231 * 4-byte sentinel descriptor. 2232 */ 2233 if (use_tso && 2234 seg == dmamap->dm_nsegs - 1 && 2235 curlen > 8) 2236 curlen -= 4; 2237 2238 wm_set_dma_addr( 2239 &sc->sc_txdescs[nexttx].wtx_addr, 2240 curaddr); 2241 sc->sc_txdescs[nexttx].wtx_cmdlen = 2242 htole32(cksumcmd | curlen); 2243 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 2244 0; 2245 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 2246 cksumfields; 2247 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0; 2248 lasttx = nexttx; 2249 2250 DPRINTF(WM_DEBUG_TX, 2251 ("%s: TX: desc %d: low 0x%08lx, " 2252 "len 0x%04x\n", 2253 device_xname(sc->sc_dev), nexttx, 2254 curaddr & 0xffffffffUL, (unsigned)curlen)); 2255 } 2256 } 2257 2258 KASSERT(lasttx != -1); 2259 2260 /* 2261 * Set up the command byte on the last descriptor of 2262 * the packet. If we're in the interrupt delay window, 2263 * delay the interrupt. 2264 */ 2265 sc->sc_txdescs[lasttx].wtx_cmdlen |= 2266 htole32(WTX_CMD_EOP | WTX_CMD_RS); 2267 2268 /* 2269 * If VLANs are enabled and the packet has a VLAN tag, set 2270 * up the descriptor to encapsulate the packet for us. 2271 * 2272 * This is only valid on the last descriptor of the packet. 2273 */ 2274 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) { 2275 sc->sc_txdescs[lasttx].wtx_cmdlen |= 2276 htole32(WTX_CMD_VLE); 2277 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan 2278 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff); 2279 } 2280 2281 txs->txs_lastdesc = lasttx; 2282 2283 DPRINTF(WM_DEBUG_TX, 2284 ("%s: TX: desc %d: cmdlen 0x%08x\n", 2285 device_xname(sc->sc_dev), 2286 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen))); 2287 2288 /* Sync the descriptors we're using. */ 2289 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc, 2290 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2291 2292 /* Give the packet to the chip. */ 2293 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx); 2294 2295 DPRINTF(WM_DEBUG_TX, 2296 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx)); 2297 2298 DPRINTF(WM_DEBUG_TX, 2299 ("%s: TX: finished transmitting packet, job %d\n", 2300 device_xname(sc->sc_dev), sc->sc_txsnext)); 2301 2302 /* Advance the tx pointer. */ 2303 sc->sc_txfree -= txs->txs_ndesc; 2304 sc->sc_txnext = nexttx; 2305 2306 sc->sc_txsfree--; 2307 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext); 2308 2309 #if NBPFILTER > 0 2310 /* Pass the packet to any BPF listeners. */ 2311 if (ifp->if_bpf) 2312 bpf_mtap(ifp->if_bpf, m0); 2313 #endif /* NBPFILTER > 0 */ 2314 } 2315 2316 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) { 2317 /* No more slots; notify upper layer. */ 2318 ifp->if_flags |= IFF_OACTIVE; 2319 } 2320 2321 if (sc->sc_txfree != ofree) { 2322 /* Set a watchdog timer in case the chip flakes out. */ 2323 ifp->if_timer = 5; 2324 } 2325 } 2326 2327 /* 2328 * wm_watchdog: [ifnet interface function] 2329 * 2330 * Watchdog timer handler. 2331 */ 2332 static void 2333 wm_watchdog(struct ifnet *ifp) 2334 { 2335 struct wm_softc *sc = ifp->if_softc; 2336 2337 /* 2338 * Since we're using delayed interrupts, sweep up 2339 * before we report an error. 2340 */ 2341 wm_txintr(sc); 2342 2343 if (sc->sc_txfree != WM_NTXDESC(sc)) { 2344 log(LOG_ERR, 2345 "%s: device timeout (txfree %d txsfree %d txnext %d)\n", 2346 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree, 2347 sc->sc_txnext); 2348 ifp->if_oerrors++; 2349 2350 /* Reset the interface. */ 2351 (void) wm_init(ifp); 2352 } 2353 2354 /* Try to get more packets going. */ 2355 wm_start(ifp); 2356 } 2357 2358 /* 2359 * wm_ioctl: [ifnet interface function] 2360 * 2361 * Handle control requests from the operator. 2362 */ 2363 static int 2364 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data) 2365 { 2366 struct wm_softc *sc = ifp->if_softc; 2367 struct ifreq *ifr = (struct ifreq *) data; 2368 struct ifaddr *ifa = (struct ifaddr *)data; 2369 struct sockaddr_dl *sdl; 2370 int s, error; 2371 2372 s = splnet(); 2373 2374 switch (cmd) { 2375 case SIOCSIFMEDIA: 2376 case SIOCGIFMEDIA: 2377 /* Flow control requires full-duplex mode. */ 2378 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 2379 (ifr->ifr_media & IFM_FDX) == 0) 2380 ifr->ifr_media &= ~IFM_ETH_FMASK; 2381 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 2382 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 2383 /* We can do both TXPAUSE and RXPAUSE. */ 2384 ifr->ifr_media |= 2385 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 2386 } 2387 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 2388 } 2389 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 2390 break; 2391 case SIOCINITIFADDR: 2392 if (ifa->ifa_addr->sa_family == AF_LINK) { 2393 sdl = satosdl(ifp->if_dl->ifa_addr); 2394 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len, 2395 LLADDR(satosdl(ifa->ifa_addr)), 2396 ifp->if_addrlen); 2397 /* unicast address is first multicast entry */ 2398 wm_set_filter(sc); 2399 error = 0; 2400 break; 2401 } 2402 /* Fall through for rest */ 2403 default: 2404 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) 2405 break; 2406 2407 error = 0; 2408 2409 if (cmd == SIOCSIFCAP) 2410 error = (*ifp->if_init)(ifp); 2411 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 2412 ; 2413 else if (ifp->if_flags & IFF_RUNNING) { 2414 /* 2415 * Multicast list has changed; set the hardware filter 2416 * accordingly. 2417 */ 2418 wm_set_filter(sc); 2419 } 2420 break; 2421 } 2422 2423 /* Try to get more packets going. */ 2424 wm_start(ifp); 2425 2426 splx(s); 2427 return (error); 2428 } 2429 2430 /* 2431 * wm_intr: 2432 * 2433 * Interrupt service routine. 2434 */ 2435 static int 2436 wm_intr(void *arg) 2437 { 2438 struct wm_softc *sc = arg; 2439 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2440 uint32_t icr; 2441 int handled = 0; 2442 2443 while (1 /* CONSTCOND */) { 2444 icr = CSR_READ(sc, WMREG_ICR); 2445 if ((icr & sc->sc_icr) == 0) 2446 break; 2447 #if 0 /*NRND > 0*/ 2448 if (RND_ENABLED(&sc->rnd_source)) 2449 rnd_add_uint32(&sc->rnd_source, icr); 2450 #endif 2451 2452 handled = 1; 2453 2454 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 2455 if (icr & (ICR_RXDMT0|ICR_RXT0)) { 2456 DPRINTF(WM_DEBUG_RX, 2457 ("%s: RX: got Rx intr 0x%08x\n", 2458 device_xname(sc->sc_dev), 2459 icr & (ICR_RXDMT0|ICR_RXT0))); 2460 WM_EVCNT_INCR(&sc->sc_ev_rxintr); 2461 } 2462 #endif 2463 wm_rxintr(sc); 2464 2465 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 2466 if (icr & ICR_TXDW) { 2467 DPRINTF(WM_DEBUG_TX, 2468 ("%s: TX: got TXDW interrupt\n", 2469 device_xname(sc->sc_dev))); 2470 WM_EVCNT_INCR(&sc->sc_ev_txdw); 2471 } 2472 #endif 2473 wm_txintr(sc); 2474 2475 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) { 2476 WM_EVCNT_INCR(&sc->sc_ev_linkintr); 2477 wm_linkintr(sc, icr); 2478 } 2479 2480 if (icr & ICR_RXO) { 2481 ifp->if_ierrors++; 2482 #if defined(WM_DEBUG) 2483 log(LOG_WARNING, "%s: Receive overrun\n", 2484 device_xname(sc->sc_dev)); 2485 #endif /* defined(WM_DEBUG) */ 2486 } 2487 } 2488 2489 if (handled) { 2490 /* Try to get more packets going. */ 2491 wm_start(ifp); 2492 } 2493 2494 return (handled); 2495 } 2496 2497 /* 2498 * wm_txintr: 2499 * 2500 * Helper; handle transmit interrupts. 2501 */ 2502 static void 2503 wm_txintr(struct wm_softc *sc) 2504 { 2505 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2506 struct wm_txsoft *txs; 2507 uint8_t status; 2508 int i; 2509 2510 ifp->if_flags &= ~IFF_OACTIVE; 2511 2512 /* 2513 * Go through the Tx list and free mbufs for those 2514 * frames which have been transmitted. 2515 */ 2516 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc); 2517 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) { 2518 txs = &sc->sc_txsoft[i]; 2519 2520 DPRINTF(WM_DEBUG_TX, 2521 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i)); 2522 2523 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc, 2524 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2525 2526 status = 2527 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status; 2528 if ((status & WTX_ST_DD) == 0) { 2529 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1, 2530 BUS_DMASYNC_PREREAD); 2531 break; 2532 } 2533 2534 DPRINTF(WM_DEBUG_TX, 2535 ("%s: TX: job %d done: descs %d..%d\n", 2536 device_xname(sc->sc_dev), i, txs->txs_firstdesc, 2537 txs->txs_lastdesc)); 2538 2539 /* 2540 * XXX We should probably be using the statistics 2541 * XXX registers, but I don't know if they exist 2542 * XXX on chips before the i82544. 2543 */ 2544 2545 #ifdef WM_EVENT_COUNTERS 2546 if (status & WTX_ST_TU) 2547 WM_EVCNT_INCR(&sc->sc_ev_tu); 2548 #endif /* WM_EVENT_COUNTERS */ 2549 2550 if (status & (WTX_ST_EC|WTX_ST_LC)) { 2551 ifp->if_oerrors++; 2552 if (status & WTX_ST_LC) 2553 log(LOG_WARNING, "%s: late collision\n", 2554 device_xname(sc->sc_dev)); 2555 else if (status & WTX_ST_EC) { 2556 ifp->if_collisions += 16; 2557 log(LOG_WARNING, "%s: excessive collisions\n", 2558 device_xname(sc->sc_dev)); 2559 } 2560 } else 2561 ifp->if_opackets++; 2562 2563 sc->sc_txfree += txs->txs_ndesc; 2564 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 2565 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2566 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 2567 m_freem(txs->txs_mbuf); 2568 txs->txs_mbuf = NULL; 2569 } 2570 2571 /* Update the dirty transmit buffer pointer. */ 2572 sc->sc_txsdirty = i; 2573 DPRINTF(WM_DEBUG_TX, 2574 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i)); 2575 2576 /* 2577 * If there are no more pending transmissions, cancel the watchdog 2578 * timer. 2579 */ 2580 if (sc->sc_txsfree == WM_TXQUEUELEN(sc)) 2581 ifp->if_timer = 0; 2582 } 2583 2584 /* 2585 * wm_rxintr: 2586 * 2587 * Helper; handle receive interrupts. 2588 */ 2589 static void 2590 wm_rxintr(struct wm_softc *sc) 2591 { 2592 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2593 struct wm_rxsoft *rxs; 2594 struct mbuf *m; 2595 int i, len; 2596 uint8_t status, errors; 2597 uint16_t vlantag; 2598 2599 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) { 2600 rxs = &sc->sc_rxsoft[i]; 2601 2602 DPRINTF(WM_DEBUG_RX, 2603 ("%s: RX: checking descriptor %d\n", 2604 device_xname(sc->sc_dev), i)); 2605 2606 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2607 2608 status = sc->sc_rxdescs[i].wrx_status; 2609 errors = sc->sc_rxdescs[i].wrx_errors; 2610 len = le16toh(sc->sc_rxdescs[i].wrx_len); 2611 vlantag = sc->sc_rxdescs[i].wrx_special; 2612 2613 if ((status & WRX_ST_DD) == 0) { 2614 /* 2615 * We have processed all of the receive descriptors. 2616 */ 2617 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD); 2618 break; 2619 } 2620 2621 if (__predict_false(sc->sc_rxdiscard)) { 2622 DPRINTF(WM_DEBUG_RX, 2623 ("%s: RX: discarding contents of descriptor %d\n", 2624 device_xname(sc->sc_dev), i)); 2625 WM_INIT_RXDESC(sc, i); 2626 if (status & WRX_ST_EOP) { 2627 /* Reset our state. */ 2628 DPRINTF(WM_DEBUG_RX, 2629 ("%s: RX: resetting rxdiscard -> 0\n", 2630 device_xname(sc->sc_dev))); 2631 sc->sc_rxdiscard = 0; 2632 } 2633 continue; 2634 } 2635 2636 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 2637 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2638 2639 m = rxs->rxs_mbuf; 2640 2641 /* 2642 * Add a new receive buffer to the ring, unless of 2643 * course the length is zero. Treat the latter as a 2644 * failed mapping. 2645 */ 2646 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) { 2647 /* 2648 * Failed, throw away what we've done so 2649 * far, and discard the rest of the packet. 2650 */ 2651 ifp->if_ierrors++; 2652 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 2653 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 2654 WM_INIT_RXDESC(sc, i); 2655 if ((status & WRX_ST_EOP) == 0) 2656 sc->sc_rxdiscard = 1; 2657 if (sc->sc_rxhead != NULL) 2658 m_freem(sc->sc_rxhead); 2659 WM_RXCHAIN_RESET(sc); 2660 DPRINTF(WM_DEBUG_RX, 2661 ("%s: RX: Rx buffer allocation failed, " 2662 "dropping packet%s\n", device_xname(sc->sc_dev), 2663 sc->sc_rxdiscard ? " (discard)" : "")); 2664 continue; 2665 } 2666 2667 m->m_len = len; 2668 sc->sc_rxlen += len; 2669 DPRINTF(WM_DEBUG_RX, 2670 ("%s: RX: buffer at %p len %d\n", 2671 device_xname(sc->sc_dev), m->m_data, len)); 2672 2673 /* 2674 * If this is not the end of the packet, keep 2675 * looking. 2676 */ 2677 if ((status & WRX_ST_EOP) == 0) { 2678 WM_RXCHAIN_LINK(sc, m); 2679 DPRINTF(WM_DEBUG_RX, 2680 ("%s: RX: not yet EOP, rxlen -> %d\n", 2681 device_xname(sc->sc_dev), sc->sc_rxlen)); 2682 continue; 2683 } 2684 2685 /* 2686 * Okay, we have the entire packet now. The chip is 2687 * configured to include the FCS (not all chips can 2688 * be configured to strip it), so we need to trim it. 2689 * May need to adjust length of previous mbuf in the 2690 * chain if the current mbuf is too short. 2691 */ 2692 if (m->m_len < ETHER_CRC_LEN) { 2693 sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len); 2694 m->m_len = 0; 2695 } else { 2696 m->m_len -= ETHER_CRC_LEN; 2697 } 2698 len = sc->sc_rxlen - ETHER_CRC_LEN; 2699 2700 WM_RXCHAIN_LINK(sc, m); 2701 2702 *sc->sc_rxtailp = NULL; 2703 m = sc->sc_rxhead; 2704 2705 WM_RXCHAIN_RESET(sc); 2706 2707 DPRINTF(WM_DEBUG_RX, 2708 ("%s: RX: have entire packet, len -> %d\n", 2709 device_xname(sc->sc_dev), len)); 2710 2711 /* 2712 * If an error occurred, update stats and drop the packet. 2713 */ 2714 if (errors & 2715 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) { 2716 ifp->if_ierrors++; 2717 if (errors & WRX_ER_SE) 2718 log(LOG_WARNING, "%s: symbol error\n", 2719 device_xname(sc->sc_dev)); 2720 else if (errors & WRX_ER_SEQ) 2721 log(LOG_WARNING, "%s: receive sequence error\n", 2722 device_xname(sc->sc_dev)); 2723 else if (errors & WRX_ER_CE) 2724 log(LOG_WARNING, "%s: CRC error\n", 2725 device_xname(sc->sc_dev)); 2726 m_freem(m); 2727 continue; 2728 } 2729 2730 /* 2731 * No errors. Receive the packet. 2732 */ 2733 m->m_pkthdr.rcvif = ifp; 2734 m->m_pkthdr.len = len; 2735 2736 /* 2737 * If VLANs are enabled, VLAN packets have been unwrapped 2738 * for us. Associate the tag with the packet. 2739 */ 2740 if ((status & WRX_ST_VP) != 0) { 2741 VLAN_INPUT_TAG(ifp, m, 2742 le16toh(vlantag), 2743 continue); 2744 } 2745 2746 /* 2747 * Set up checksum info for this packet. 2748 */ 2749 if ((status & WRX_ST_IXSM) == 0) { 2750 if (status & WRX_ST_IPCS) { 2751 WM_EVCNT_INCR(&sc->sc_ev_rxipsum); 2752 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 2753 if (errors & WRX_ER_IPE) 2754 m->m_pkthdr.csum_flags |= 2755 M_CSUM_IPv4_BAD; 2756 } 2757 if (status & WRX_ST_TCPCS) { 2758 /* 2759 * Note: we don't know if this was TCP or UDP, 2760 * so we just set both bits, and expect the 2761 * upper layers to deal. 2762 */ 2763 WM_EVCNT_INCR(&sc->sc_ev_rxtusum); 2764 m->m_pkthdr.csum_flags |= 2765 M_CSUM_TCPv4 | M_CSUM_UDPv4 | 2766 M_CSUM_TCPv6 | M_CSUM_UDPv6; 2767 if (errors & WRX_ER_TCPE) 2768 m->m_pkthdr.csum_flags |= 2769 M_CSUM_TCP_UDP_BAD; 2770 } 2771 } 2772 2773 ifp->if_ipackets++; 2774 2775 #if NBPFILTER > 0 2776 /* Pass this up to any BPF listeners. */ 2777 if (ifp->if_bpf) 2778 bpf_mtap(ifp->if_bpf, m); 2779 #endif /* NBPFILTER > 0 */ 2780 2781 /* Pass it on. */ 2782 (*ifp->if_input)(ifp, m); 2783 } 2784 2785 /* Update the receive pointer. */ 2786 sc->sc_rxptr = i; 2787 2788 DPRINTF(WM_DEBUG_RX, 2789 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i)); 2790 } 2791 2792 /* 2793 * wm_linkintr: 2794 * 2795 * Helper; handle link interrupts. 2796 */ 2797 static void 2798 wm_linkintr(struct wm_softc *sc, uint32_t icr) 2799 { 2800 uint32_t status; 2801 2802 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), 2803 __func__)); 2804 /* 2805 * If we get a link status interrupt on a 1000BASE-T 2806 * device, just fall into the normal MII tick path. 2807 */ 2808 if (sc->sc_flags & WM_F_HAS_MII) { 2809 if (icr & ICR_LSC) { 2810 DPRINTF(WM_DEBUG_LINK, 2811 ("%s: LINK: LSC -> mii_tick\n", 2812 device_xname(sc->sc_dev))); 2813 mii_tick(&sc->sc_mii); 2814 if (sc->sc_type == WM_T_82543) { 2815 int miistatus, active; 2816 2817 /* 2818 * With 82543, we need to force speed and 2819 * duplex on the MAC equal to what the PHY 2820 * speed and duplex configuration is. 2821 */ 2822 miistatus = sc->sc_mii.mii_media_status; 2823 2824 if (miistatus & IFM_ACTIVE) { 2825 active = sc->sc_mii.mii_media_active; 2826 sc->sc_ctrl &= ~(CTRL_SPEED_MASK 2827 | CTRL_FD); 2828 switch (IFM_SUBTYPE(active)) { 2829 case IFM_10_T: 2830 sc->sc_ctrl |= CTRL_SPEED_10; 2831 break; 2832 case IFM_100_TX: 2833 sc->sc_ctrl |= CTRL_SPEED_100; 2834 break; 2835 case IFM_1000_T: 2836 sc->sc_ctrl |= CTRL_SPEED_1000; 2837 break; 2838 default: 2839 /* 2840 * fiber? 2841 * Shoud not enter here. 2842 */ 2843 printf("unknown media (%x)\n", 2844 active); 2845 break; 2846 } 2847 if (active & IFM_FDX) 2848 sc->sc_ctrl |= CTRL_FD; 2849 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 2850 } 2851 } 2852 } else if (icr & ICR_RXSEQ) { 2853 DPRINTF(WM_DEBUG_LINK, 2854 ("%s: LINK Receive sequence error\n", 2855 device_xname(sc->sc_dev))); 2856 } 2857 return; 2858 } 2859 2860 status = CSR_READ(sc, WMREG_STATUS); 2861 if (icr & ICR_LSC) { 2862 if (status & STATUS_LU) { 2863 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n", 2864 device_xname(sc->sc_dev), 2865 (status & STATUS_FD) ? "FDX" : "HDX")); 2866 /* 2867 * NOTE: CTRL will update TFCE and RFCE automatically, 2868 * so we should update sc->sc_ctrl 2869 */ 2870 2871 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); 2872 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 2873 sc->sc_fcrtl &= ~FCRTL_XONE; 2874 if (status & STATUS_FD) 2875 sc->sc_tctl |= 2876 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 2877 else 2878 sc->sc_tctl |= 2879 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 2880 if (sc->sc_ctrl & CTRL_TFCE) 2881 sc->sc_fcrtl |= FCRTL_XONE; 2882 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 2883 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 2884 WMREG_OLD_FCRTL : WMREG_FCRTL, 2885 sc->sc_fcrtl); 2886 sc->sc_tbi_linkup = 1; 2887 } else { 2888 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", 2889 device_xname(sc->sc_dev))); 2890 sc->sc_tbi_linkup = 0; 2891 } 2892 wm_tbi_set_linkled(sc); 2893 } else if (icr & ICR_RXCFG) { 2894 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n", 2895 device_xname(sc->sc_dev))); 2896 sc->sc_tbi_nrxcfg++; 2897 wm_check_for_link(sc); 2898 } else if (icr & ICR_RXSEQ) { 2899 DPRINTF(WM_DEBUG_LINK, 2900 ("%s: LINK: Receive sequence error\n", 2901 device_xname(sc->sc_dev))); 2902 } 2903 } 2904 2905 /* 2906 * wm_tick: 2907 * 2908 * One second timer, used to check link status, sweep up 2909 * completed transmit jobs, etc. 2910 */ 2911 static void 2912 wm_tick(void *arg) 2913 { 2914 struct wm_softc *sc = arg; 2915 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2916 int s; 2917 2918 s = splnet(); 2919 2920 if (sc->sc_type >= WM_T_82542_2_1) { 2921 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC)); 2922 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC)); 2923 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC)); 2924 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC)); 2925 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC)); 2926 } 2927 2928 ifp->if_collisions += CSR_READ(sc, WMREG_COLC); 2929 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC); 2930 2931 2932 if (sc->sc_flags & WM_F_HAS_MII) 2933 mii_tick(&sc->sc_mii); 2934 else 2935 wm_tbi_check_link(sc); 2936 2937 splx(s); 2938 2939 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); 2940 } 2941 2942 /* 2943 * wm_reset: 2944 * 2945 * Reset the i82542 chip. 2946 */ 2947 static void 2948 wm_reset(struct wm_softc *sc) 2949 { 2950 uint32_t reg; 2951 2952 /* 2953 * Allocate on-chip memory according to the MTU size. 2954 * The Packet Buffer Allocation register must be written 2955 * before the chip is reset. 2956 */ 2957 switch (sc->sc_type) { 2958 case WM_T_82547: 2959 case WM_T_82547_2: 2960 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? 2961 PBA_22K : PBA_30K; 2962 sc->sc_txfifo_head = 0; 2963 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT; 2964 sc->sc_txfifo_size = 2965 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT; 2966 sc->sc_txfifo_stall = 0; 2967 break; 2968 case WM_T_82571: 2969 case WM_T_82572: 2970 case WM_T_80003: 2971 sc->sc_pba = PBA_32K; 2972 break; 2973 case WM_T_82573: 2974 case WM_T_82574: 2975 sc->sc_pba = PBA_12K; 2976 break; 2977 case WM_T_ICH8: 2978 sc->sc_pba = PBA_8K; 2979 CSR_WRITE(sc, WMREG_PBS, PBA_16K); 2980 break; 2981 case WM_T_ICH9: 2982 case WM_T_ICH10: 2983 sc->sc_pba = PBA_10K; 2984 break; 2985 default: 2986 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? 2987 PBA_40K : PBA_48K; 2988 break; 2989 } 2990 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba); 2991 2992 if (sc->sc_flags & WM_F_PCIE) { 2993 int timeout = 800; 2994 2995 sc->sc_ctrl |= CTRL_GIO_M_DIS; 2996 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 2997 2998 while (timeout) { 2999 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0) 3000 break; 3001 delay(100); 3002 } 3003 } 3004 3005 /* clear interrupt */ 3006 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 3007 3008 /* 3009 * 82541 Errata 29? & 82547 Errata 28? 3010 * See also the description about PHY_RST bit in CTRL register 3011 * in 8254x_GBe_SDM.pdf. 3012 */ 3013 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) { 3014 CSR_WRITE(sc, WMREG_CTRL, 3015 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET); 3016 delay(5000); 3017 } 3018 3019 switch (sc->sc_type) { 3020 case WM_T_82544: 3021 case WM_T_82540: 3022 case WM_T_82545: 3023 case WM_T_82546: 3024 case WM_T_82541: 3025 case WM_T_82541_2: 3026 /* 3027 * On some chipsets, a reset through a memory-mapped write 3028 * cycle can cause the chip to reset before completing the 3029 * write cycle. This causes major headache that can be 3030 * avoided by issuing the reset via indirect register writes 3031 * through I/O space. 3032 * 3033 * So, if we successfully mapped the I/O BAR at attach time, 3034 * use that. Otherwise, try our luck with a memory-mapped 3035 * reset. 3036 */ 3037 if (sc->sc_flags & WM_F_IOH_VALID) 3038 wm_io_write(sc, WMREG_CTRL, CTRL_RST); 3039 else 3040 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST); 3041 break; 3042 3043 case WM_T_82545_3: 3044 case WM_T_82546_3: 3045 /* Use the shadow control register on these chips. */ 3046 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST); 3047 break; 3048 3049 case WM_T_ICH8: 3050 case WM_T_ICH9: 3051 case WM_T_ICH10: 3052 wm_get_swfwhw_semaphore(sc); 3053 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST | CTRL_PHY_RESET); 3054 delay(10000); 3055 3056 default: 3057 /* Everything else can safely use the documented method. */ 3058 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST); 3059 break; 3060 } 3061 delay(10000); 3062 3063 /* reload EEPROM */ 3064 switch(sc->sc_type) { 3065 case WM_T_82542_2_0: 3066 case WM_T_82542_2_1: 3067 case WM_T_82543: 3068 case WM_T_82544: 3069 delay(10); 3070 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; 3071 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 3072 delay(2000); 3073 break; 3074 case WM_T_82541: 3075 case WM_T_82541_2: 3076 case WM_T_82547: 3077 case WM_T_82547_2: 3078 delay(20000); 3079 break; 3080 case WM_T_82573: 3081 case WM_T_82574: 3082 if (sc->sc_flags & WM_F_EEPROM_FLASH) { 3083 delay(10); 3084 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; 3085 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 3086 } 3087 /* FALLTHROUGH */ 3088 default: 3089 /* check EECD_EE_AUTORD */ 3090 wm_get_auto_rd_done(sc); 3091 } 3092 3093 /* reload sc_ctrl */ 3094 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); 3095 3096 #if 0 3097 for (i = 0; i < 1000; i++) { 3098 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) { 3099 return; 3100 } 3101 delay(20); 3102 } 3103 3104 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST) 3105 log(LOG_ERR, "%s: reset failed to complete\n", 3106 device_xname(sc->sc_dev)); 3107 #endif 3108 } 3109 3110 /* 3111 * wm_init: [ifnet interface function] 3112 * 3113 * Initialize the interface. Must be called at splnet(). 3114 */ 3115 static int 3116 wm_init(struct ifnet *ifp) 3117 { 3118 struct wm_softc *sc = ifp->if_softc; 3119 struct wm_rxsoft *rxs; 3120 int i, error = 0; 3121 uint32_t reg; 3122 3123 /* 3124 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set. 3125 * There is a small but measurable benefit to avoiding the adjusment 3126 * of the descriptor so that the headers are aligned, for normal mtu, 3127 * on such platforms. One possibility is that the DMA itself is 3128 * slightly more efficient if the front of the entire packet (instead 3129 * of the front of the headers) is aligned. 3130 * 3131 * Note we must always set align_tweak to 0 if we are using 3132 * jumbo frames. 3133 */ 3134 #ifdef __NO_STRICT_ALIGNMENT 3135 sc->sc_align_tweak = 0; 3136 #else 3137 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2)) 3138 sc->sc_align_tweak = 0; 3139 else 3140 sc->sc_align_tweak = 2; 3141 #endif /* __NO_STRICT_ALIGNMENT */ 3142 3143 /* Cancel any pending I/O. */ 3144 wm_stop(ifp, 0); 3145 3146 /* update statistics before reset */ 3147 ifp->if_collisions += CSR_READ(sc, WMREG_COLC); 3148 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC); 3149 3150 /* Reset the chip to a known state. */ 3151 wm_reset(sc); 3152 3153 switch (sc->sc_type) { 3154 case WM_T_82571: 3155 case WM_T_82572: 3156 case WM_T_82573: 3157 case WM_T_82574: 3158 case WM_T_80003: 3159 case WM_T_ICH8: 3160 case WM_T_ICH9: 3161 case WM_T_ICH10: 3162 if (wm_check_mng_mode(sc) != 0) 3163 wm_get_hw_control(sc); 3164 break; 3165 default: 3166 break; 3167 } 3168 3169 /* Initialize the transmit descriptor ring. */ 3170 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc)); 3171 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc), 3172 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3173 sc->sc_txfree = WM_NTXDESC(sc); 3174 sc->sc_txnext = 0; 3175 3176 if (sc->sc_type < WM_T_82543) { 3177 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0)); 3178 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0)); 3179 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc)); 3180 CSR_WRITE(sc, WMREG_OLD_TDH, 0); 3181 CSR_WRITE(sc, WMREG_OLD_TDT, 0); 3182 CSR_WRITE(sc, WMREG_OLD_TIDV, 128); 3183 } else { 3184 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0)); 3185 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0)); 3186 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc)); 3187 CSR_WRITE(sc, WMREG_TDH, 0); 3188 CSR_WRITE(sc, WMREG_TDT, 0); 3189 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */ 3190 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */ 3191 3192 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) | 3193 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0)); 3194 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) | 3195 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1)); 3196 } 3197 CSR_WRITE(sc, WMREG_TQSA_LO, 0); 3198 CSR_WRITE(sc, WMREG_TQSA_HI, 0); 3199 3200 /* Initialize the transmit job descriptors. */ 3201 for (i = 0; i < WM_TXQUEUELEN(sc); i++) 3202 sc->sc_txsoft[i].txs_mbuf = NULL; 3203 sc->sc_txsfree = WM_TXQUEUELEN(sc); 3204 sc->sc_txsnext = 0; 3205 sc->sc_txsdirty = 0; 3206 3207 /* 3208 * Initialize the receive descriptor and receive job 3209 * descriptor rings. 3210 */ 3211 if (sc->sc_type < WM_T_82543) { 3212 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0)); 3213 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0)); 3214 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs)); 3215 CSR_WRITE(sc, WMREG_OLD_RDH0, 0); 3216 CSR_WRITE(sc, WMREG_OLD_RDT0, 0); 3217 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD); 3218 3219 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0); 3220 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0); 3221 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0); 3222 CSR_WRITE(sc, WMREG_OLD_RDH1, 0); 3223 CSR_WRITE(sc, WMREG_OLD_RDT1, 0); 3224 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0); 3225 } else { 3226 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0)); 3227 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0)); 3228 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs)); 3229 CSR_WRITE(sc, WMREG_RDH, 0); 3230 CSR_WRITE(sc, WMREG_RDT, 0); 3231 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */ 3232 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */ 3233 } 3234 for (i = 0; i < WM_NRXDESC; i++) { 3235 rxs = &sc->sc_rxsoft[i]; 3236 if (rxs->rxs_mbuf == NULL) { 3237 if ((error = wm_add_rxbuf(sc, i)) != 0) { 3238 log(LOG_ERR, "%s: unable to allocate or map rx " 3239 "buffer %d, error = %d\n", 3240 device_xname(sc->sc_dev), i, error); 3241 /* 3242 * XXX Should attempt to run with fewer receive 3243 * XXX buffers instead of just failing. 3244 */ 3245 wm_rxdrain(sc); 3246 goto out; 3247 } 3248 } else 3249 WM_INIT_RXDESC(sc, i); 3250 } 3251 sc->sc_rxptr = 0; 3252 sc->sc_rxdiscard = 0; 3253 WM_RXCHAIN_RESET(sc); 3254 3255 /* 3256 * Clear out the VLAN table -- we don't use it (yet). 3257 */ 3258 CSR_WRITE(sc, WMREG_VET, 0); 3259 for (i = 0; i < WM_VLAN_TABSIZE; i++) 3260 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0); 3261 3262 /* 3263 * Set up flow-control parameters. 3264 * 3265 * XXX Values could probably stand some tuning. 3266 */ 3267 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9) 3268 && (sc->sc_type != WM_T_ICH10)) { 3269 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST); 3270 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST); 3271 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL); 3272 } 3273 3274 sc->sc_fcrtl = FCRTL_DFLT; 3275 if (sc->sc_type < WM_T_82543) { 3276 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT); 3277 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl); 3278 } else { 3279 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT); 3280 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl); 3281 } 3282 3283 if (sc->sc_type == WM_T_80003) 3284 CSR_WRITE(sc, WMREG_FCTTV, 0xffff); 3285 else 3286 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT); 3287 3288 /* Deal with VLAN enables. */ 3289 if (VLAN_ATTACHED(&sc->sc_ethercom)) 3290 sc->sc_ctrl |= CTRL_VME; 3291 else 3292 sc->sc_ctrl &= ~CTRL_VME; 3293 3294 /* Write the control registers. */ 3295 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3296 3297 if (sc->sc_flags & WM_F_HAS_MII) { 3298 int val; 3299 3300 switch (sc->sc_type) { 3301 case WM_T_80003: 3302 case WM_T_ICH8: 3303 case WM_T_ICH9: 3304 case WM_T_ICH10: 3305 /* 3306 * Set the mac to wait the maximum time between each 3307 * iteration and increase the max iterations when 3308 * polling the phy; this fixes erroneous timeouts at 3309 * 10Mbps. 3310 */ 3311 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, 3312 0xFFFF); 3313 val = wm_kmrn_readreg(sc, 3314 KUMCTRLSTA_OFFSET_INB_PARAM); 3315 val |= 0x3F; 3316 wm_kmrn_writereg(sc, 3317 KUMCTRLSTA_OFFSET_INB_PARAM, val); 3318 break; 3319 default: 3320 break; 3321 } 3322 3323 if (sc->sc_type == WM_T_80003) { 3324 val = CSR_READ(sc, WMREG_CTRL_EXT); 3325 val &= ~CTRL_EXT_LINK_MODE_MASK; 3326 CSR_WRITE(sc, WMREG_CTRL_EXT, val); 3327 3328 /* Bypass RX and TX FIFO's */ 3329 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL, 3330 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS | 3331 KUMCTRLSTA_FIFO_CTRL_TX_BYPASS); 3332 3333 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL, 3334 KUMCTRLSTA_INB_CTRL_DIS_PADDING | 3335 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT); 3336 } 3337 } 3338 #if 0 3339 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 3340 #endif 3341 3342 /* 3343 * Set up checksum offload parameters. 3344 */ 3345 reg = CSR_READ(sc, WMREG_RXCSUM); 3346 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL); 3347 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) 3348 reg |= RXCSUM_IPOFL; 3349 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) 3350 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL; 3351 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx)) 3352 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL; 3353 CSR_WRITE(sc, WMREG_RXCSUM, reg); 3354 3355 /* Reset TBI's RXCFG count */ 3356 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0; 3357 3358 /* 3359 * Set up the interrupt registers. 3360 */ 3361 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 3362 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 | 3363 ICR_RXO | ICR_RXT0; 3364 if ((sc->sc_flags & WM_F_HAS_MII) == 0) 3365 sc->sc_icr |= ICR_RXCFG; 3366 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr); 3367 3368 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 3369 || (sc->sc_type == WM_T_ICH10)) { 3370 reg = CSR_READ(sc, WMREG_KABGTXD); 3371 reg |= KABGTXD_BGSQLBIAS; 3372 CSR_WRITE(sc, WMREG_KABGTXD, reg); 3373 } 3374 3375 /* Set up the inter-packet gap. */ 3376 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); 3377 3378 if (sc->sc_type >= WM_T_82543) { 3379 /* 3380 * Set up the interrupt throttling register (units of 256ns) 3381 * Note that a footnote in Intel's documentation says this 3382 * ticker runs at 1/4 the rate when the chip is in 100Mbit 3383 * or 10Mbit mode. Empirically, it appears to be the case 3384 * that that is also true for the 1024ns units of the other 3385 * interrupt-related timer registers -- so, really, we ought 3386 * to divide this value by 4 when the link speed is low. 3387 * 3388 * XXX implement this division at link speed change! 3389 */ 3390 3391 /* 3392 * For N interrupts/sec, set this value to: 3393 * 1000000000 / (N * 256). Note that we set the 3394 * absolute and packet timer values to this value 3395 * divided by 4 to get "simple timer" behavior. 3396 */ 3397 3398 sc->sc_itr = 1500; /* 2604 ints/sec */ 3399 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr); 3400 } 3401 3402 /* Set the VLAN ethernetype. */ 3403 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN); 3404 3405 /* 3406 * Set up the transmit control register; we start out with 3407 * a collision distance suitable for FDX, but update it whe 3408 * we resolve the media type. 3409 */ 3410 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC 3411 | TCTL_CT(TX_COLLISION_THRESHOLD) 3412 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 3413 if (sc->sc_type >= WM_T_82571) 3414 sc->sc_tctl |= TCTL_MULR; 3415 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 3416 3417 if (sc->sc_type == WM_T_80003) { 3418 reg = CSR_READ(sc, WMREG_TCTL_EXT); 3419 reg &= ~TCTL_EXT_GCEX_MASK; 3420 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX; 3421 CSR_WRITE(sc, WMREG_TCTL_EXT, reg); 3422 } 3423 3424 /* Set the media. */ 3425 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0) 3426 goto out; 3427 3428 /* 3429 * Set up the receive control register; we actually program 3430 * the register when we set the receive filter. Use multicast 3431 * address offset type 0. 3432 * 3433 * Only the i82544 has the ability to strip the incoming 3434 * CRC, so we don't enable that feature. 3435 */ 3436 sc->sc_mchash_type = 0; 3437 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF 3438 | RCTL_MO(sc->sc_mchash_type); 3439 3440 /* 82573 doesn't support jumbo frame */ 3441 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_82574 && 3442 sc->sc_type != WM_T_ICH8) 3443 sc->sc_rctl |= RCTL_LPE; 3444 3445 if (MCLBYTES == 2048) { 3446 sc->sc_rctl |= RCTL_2k; 3447 } else { 3448 if (sc->sc_type >= WM_T_82543) { 3449 switch(MCLBYTES) { 3450 case 4096: 3451 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k; 3452 break; 3453 case 8192: 3454 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k; 3455 break; 3456 case 16384: 3457 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k; 3458 break; 3459 default: 3460 panic("wm_init: MCLBYTES %d unsupported", 3461 MCLBYTES); 3462 break; 3463 } 3464 } else panic("wm_init: i82542 requires MCLBYTES = 2048"); 3465 } 3466 3467 /* Set the receive filter. */ 3468 wm_set_filter(sc); 3469 3470 /* Start the one second link check clock. */ 3471 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); 3472 3473 /* ...all done! */ 3474 ifp->if_flags |= IFF_RUNNING; 3475 ifp->if_flags &= ~IFF_OACTIVE; 3476 3477 out: 3478 if (error) 3479 log(LOG_ERR, "%s: interface not running\n", 3480 device_xname(sc->sc_dev)); 3481 return (error); 3482 } 3483 3484 /* 3485 * wm_rxdrain: 3486 * 3487 * Drain the receive queue. 3488 */ 3489 static void 3490 wm_rxdrain(struct wm_softc *sc) 3491 { 3492 struct wm_rxsoft *rxs; 3493 int i; 3494 3495 for (i = 0; i < WM_NRXDESC; i++) { 3496 rxs = &sc->sc_rxsoft[i]; 3497 if (rxs->rxs_mbuf != NULL) { 3498 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 3499 m_freem(rxs->rxs_mbuf); 3500 rxs->rxs_mbuf = NULL; 3501 } 3502 } 3503 } 3504 3505 /* 3506 * wm_stop: [ifnet interface function] 3507 * 3508 * Stop transmission on the interface. 3509 */ 3510 static void 3511 wm_stop(struct ifnet *ifp, int disable) 3512 { 3513 struct wm_softc *sc = ifp->if_softc; 3514 struct wm_txsoft *txs; 3515 int i; 3516 3517 /* Stop the one second clock. */ 3518 callout_stop(&sc->sc_tick_ch); 3519 3520 /* Stop the 82547 Tx FIFO stall check timer. */ 3521 if (sc->sc_type == WM_T_82547) 3522 callout_stop(&sc->sc_txfifo_ch); 3523 3524 if (sc->sc_flags & WM_F_HAS_MII) { 3525 /* Down the MII. */ 3526 mii_down(&sc->sc_mii); 3527 } else { 3528 #if 0 3529 /* Should we clear PHY's status properly? */ 3530 wm_reset(sc); 3531 #endif 3532 } 3533 3534 /* Stop the transmit and receive processes. */ 3535 CSR_WRITE(sc, WMREG_TCTL, 0); 3536 CSR_WRITE(sc, WMREG_RCTL, 0); 3537 3538 /* 3539 * Clear the interrupt mask to ensure the device cannot assert its 3540 * interrupt line. 3541 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service 3542 * any currently pending or shared interrupt. 3543 */ 3544 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 3545 sc->sc_icr = 0; 3546 3547 /* Release any queued transmit buffers. */ 3548 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 3549 txs = &sc->sc_txsoft[i]; 3550 if (txs->txs_mbuf != NULL) { 3551 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 3552 m_freem(txs->txs_mbuf); 3553 txs->txs_mbuf = NULL; 3554 } 3555 } 3556 3557 /* Mark the interface as down and cancel the watchdog timer. */ 3558 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3559 ifp->if_timer = 0; 3560 3561 if (disable) 3562 wm_rxdrain(sc); 3563 } 3564 3565 void 3566 wm_get_auto_rd_done(struct wm_softc *sc) 3567 { 3568 int i; 3569 3570 /* wait for eeprom to reload */ 3571 switch (sc->sc_type) { 3572 case WM_T_82571: 3573 case WM_T_82572: 3574 case WM_T_82573: 3575 case WM_T_82574: 3576 case WM_T_80003: 3577 case WM_T_ICH8: 3578 case WM_T_ICH9: 3579 case WM_T_ICH10: 3580 for (i = 10; i > 0; i--) { 3581 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD) 3582 break; 3583 delay(1000); 3584 } 3585 if (i == 0) { 3586 log(LOG_ERR, "%s: auto read from eeprom failed to " 3587 "complete\n", device_xname(sc->sc_dev)); 3588 } 3589 break; 3590 default: 3591 delay(5000); 3592 break; 3593 } 3594 3595 /* Phy configuration starts after EECD_AUTO_RD is set */ 3596 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574) 3597 delay(25000); 3598 } 3599 3600 /* 3601 * wm_acquire_eeprom: 3602 * 3603 * Perform the EEPROM handshake required on some chips. 3604 */ 3605 static int 3606 wm_acquire_eeprom(struct wm_softc *sc) 3607 { 3608 uint32_t reg; 3609 int x; 3610 int ret = 0; 3611 3612 /* always success */ 3613 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0) 3614 return 0; 3615 3616 if (sc->sc_flags & WM_F_SWFWHW_SYNC) { 3617 ret = wm_get_swfwhw_semaphore(sc); 3618 } else if (sc->sc_flags & WM_F_SWFW_SYNC) { 3619 /* this will also do wm_get_swsm_semaphore() if needed */ 3620 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM); 3621 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) { 3622 ret = wm_get_swsm_semaphore(sc); 3623 } 3624 3625 if (ret) { 3626 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 3627 __func__); 3628 return 1; 3629 } 3630 3631 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) { 3632 reg = CSR_READ(sc, WMREG_EECD); 3633 3634 /* Request EEPROM access. */ 3635 reg |= EECD_EE_REQ; 3636 CSR_WRITE(sc, WMREG_EECD, reg); 3637 3638 /* ..and wait for it to be granted. */ 3639 for (x = 0; x < 1000; x++) { 3640 reg = CSR_READ(sc, WMREG_EECD); 3641 if (reg & EECD_EE_GNT) 3642 break; 3643 delay(5); 3644 } 3645 if ((reg & EECD_EE_GNT) == 0) { 3646 aprint_error_dev(sc->sc_dev, 3647 "could not acquire EEPROM GNT\n"); 3648 reg &= ~EECD_EE_REQ; 3649 CSR_WRITE(sc, WMREG_EECD, reg); 3650 if (sc->sc_flags & WM_F_SWFWHW_SYNC) 3651 wm_put_swfwhw_semaphore(sc); 3652 if (sc->sc_flags & WM_F_SWFW_SYNC) 3653 wm_put_swfw_semaphore(sc, SWFW_EEP_SM); 3654 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 3655 wm_put_swsm_semaphore(sc); 3656 return (1); 3657 } 3658 } 3659 3660 return (0); 3661 } 3662 3663 /* 3664 * wm_release_eeprom: 3665 * 3666 * Release the EEPROM mutex. 3667 */ 3668 static void 3669 wm_release_eeprom(struct wm_softc *sc) 3670 { 3671 uint32_t reg; 3672 3673 /* always success */ 3674 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0) 3675 return; 3676 3677 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) { 3678 reg = CSR_READ(sc, WMREG_EECD); 3679 reg &= ~EECD_EE_REQ; 3680 CSR_WRITE(sc, WMREG_EECD, reg); 3681 } 3682 3683 if (sc->sc_flags & WM_F_SWFWHW_SYNC) 3684 wm_put_swfwhw_semaphore(sc); 3685 if (sc->sc_flags & WM_F_SWFW_SYNC) 3686 wm_put_swfw_semaphore(sc, SWFW_EEP_SM); 3687 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 3688 wm_put_swsm_semaphore(sc); 3689 } 3690 3691 /* 3692 * wm_eeprom_sendbits: 3693 * 3694 * Send a series of bits to the EEPROM. 3695 */ 3696 static void 3697 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits) 3698 { 3699 uint32_t reg; 3700 int x; 3701 3702 reg = CSR_READ(sc, WMREG_EECD); 3703 3704 for (x = nbits; x > 0; x--) { 3705 if (bits & (1U << (x - 1))) 3706 reg |= EECD_DI; 3707 else 3708 reg &= ~EECD_DI; 3709 CSR_WRITE(sc, WMREG_EECD, reg); 3710 delay(2); 3711 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK); 3712 delay(2); 3713 CSR_WRITE(sc, WMREG_EECD, reg); 3714 delay(2); 3715 } 3716 } 3717 3718 /* 3719 * wm_eeprom_recvbits: 3720 * 3721 * Receive a series of bits from the EEPROM. 3722 */ 3723 static void 3724 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits) 3725 { 3726 uint32_t reg, val; 3727 int x; 3728 3729 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI; 3730 3731 val = 0; 3732 for (x = nbits; x > 0; x--) { 3733 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK); 3734 delay(2); 3735 if (CSR_READ(sc, WMREG_EECD) & EECD_DO) 3736 val |= (1U << (x - 1)); 3737 CSR_WRITE(sc, WMREG_EECD, reg); 3738 delay(2); 3739 } 3740 *valp = val; 3741 } 3742 3743 /* 3744 * wm_read_eeprom_uwire: 3745 * 3746 * Read a word from the EEPROM using the MicroWire protocol. 3747 */ 3748 static int 3749 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 3750 { 3751 uint32_t reg, val; 3752 int i; 3753 3754 for (i = 0; i < wordcnt; i++) { 3755 /* Clear SK and DI. */ 3756 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI); 3757 CSR_WRITE(sc, WMREG_EECD, reg); 3758 3759 /* Set CHIP SELECT. */ 3760 reg |= EECD_CS; 3761 CSR_WRITE(sc, WMREG_EECD, reg); 3762 delay(2); 3763 3764 /* Shift in the READ command. */ 3765 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3); 3766 3767 /* Shift in address. */ 3768 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits); 3769 3770 /* Shift out the data. */ 3771 wm_eeprom_recvbits(sc, &val, 16); 3772 data[i] = val & 0xffff; 3773 3774 /* Clear CHIP SELECT. */ 3775 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS; 3776 CSR_WRITE(sc, WMREG_EECD, reg); 3777 delay(2); 3778 } 3779 3780 return (0); 3781 } 3782 3783 /* 3784 * wm_spi_eeprom_ready: 3785 * 3786 * Wait for a SPI EEPROM to be ready for commands. 3787 */ 3788 static int 3789 wm_spi_eeprom_ready(struct wm_softc *sc) 3790 { 3791 uint32_t val; 3792 int usec; 3793 3794 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) { 3795 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8); 3796 wm_eeprom_recvbits(sc, &val, 8); 3797 if ((val & SPI_SR_RDY) == 0) 3798 break; 3799 } 3800 if (usec >= SPI_MAX_RETRIES) { 3801 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n"); 3802 return (1); 3803 } 3804 return (0); 3805 } 3806 3807 /* 3808 * wm_read_eeprom_spi: 3809 * 3810 * Read a work from the EEPROM using the SPI protocol. 3811 */ 3812 static int 3813 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 3814 { 3815 uint32_t reg, val; 3816 int i; 3817 uint8_t opc; 3818 3819 /* Clear SK and CS. */ 3820 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS); 3821 CSR_WRITE(sc, WMREG_EECD, reg); 3822 delay(2); 3823 3824 if (wm_spi_eeprom_ready(sc)) 3825 return (1); 3826 3827 /* Toggle CS to flush commands. */ 3828 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS); 3829 delay(2); 3830 CSR_WRITE(sc, WMREG_EECD, reg); 3831 delay(2); 3832 3833 opc = SPI_OPC_READ; 3834 if (sc->sc_ee_addrbits == 8 && word >= 128) 3835 opc |= SPI_OPC_A8; 3836 3837 wm_eeprom_sendbits(sc, opc, 8); 3838 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits); 3839 3840 for (i = 0; i < wordcnt; i++) { 3841 wm_eeprom_recvbits(sc, &val, 16); 3842 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8); 3843 } 3844 3845 /* Raise CS and clear SK. */ 3846 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS; 3847 CSR_WRITE(sc, WMREG_EECD, reg); 3848 delay(2); 3849 3850 return (0); 3851 } 3852 3853 #define EEPROM_CHECKSUM 0xBABA 3854 #define EEPROM_SIZE 0x0040 3855 3856 /* 3857 * wm_validate_eeprom_checksum 3858 * 3859 * The checksum is defined as the sum of the first 64 (16 bit) words. 3860 */ 3861 static int 3862 wm_validate_eeprom_checksum(struct wm_softc *sc) 3863 { 3864 uint16_t checksum; 3865 uint16_t eeprom_data; 3866 int i; 3867 3868 checksum = 0; 3869 3870 for (i = 0; i < EEPROM_SIZE; i++) { 3871 if (wm_read_eeprom(sc, i, 1, &eeprom_data)) 3872 return 1; 3873 checksum += eeprom_data; 3874 } 3875 3876 if (checksum != (uint16_t) EEPROM_CHECKSUM) 3877 return 1; 3878 3879 return 0; 3880 } 3881 3882 /* 3883 * wm_read_eeprom: 3884 * 3885 * Read data from the serial EEPROM. 3886 */ 3887 static int 3888 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 3889 { 3890 int rv; 3891 3892 if (sc->sc_flags & WM_F_EEPROM_INVALID) 3893 return 1; 3894 3895 if (wm_acquire_eeprom(sc)) 3896 return 1; 3897 3898 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 3899 || (sc->sc_type == WM_T_ICH10)) 3900 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data); 3901 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR) 3902 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data); 3903 else if (sc->sc_flags & WM_F_EEPROM_SPI) 3904 rv = wm_read_eeprom_spi(sc, word, wordcnt, data); 3905 else 3906 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data); 3907 3908 wm_release_eeprom(sc); 3909 return rv; 3910 } 3911 3912 static int 3913 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt, 3914 uint16_t *data) 3915 { 3916 int i, eerd = 0; 3917 int error = 0; 3918 3919 for (i = 0; i < wordcnt; i++) { 3920 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START; 3921 3922 CSR_WRITE(sc, WMREG_EERD, eerd); 3923 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD); 3924 if (error != 0) 3925 break; 3926 3927 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT); 3928 } 3929 3930 return error; 3931 } 3932 3933 static int 3934 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw) 3935 { 3936 uint32_t attempts = 100000; 3937 uint32_t i, reg = 0; 3938 int32_t done = -1; 3939 3940 for (i = 0; i < attempts; i++) { 3941 reg = CSR_READ(sc, rw); 3942 3943 if (reg & EERD_DONE) { 3944 done = 0; 3945 break; 3946 } 3947 delay(5); 3948 } 3949 3950 return done; 3951 } 3952 3953 /* 3954 * wm_add_rxbuf: 3955 * 3956 * Add a receive buffer to the indiciated descriptor. 3957 */ 3958 static int 3959 wm_add_rxbuf(struct wm_softc *sc, int idx) 3960 { 3961 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx]; 3962 struct mbuf *m; 3963 int error; 3964 3965 MGETHDR(m, M_DONTWAIT, MT_DATA); 3966 if (m == NULL) 3967 return (ENOBUFS); 3968 3969 MCLGET(m, M_DONTWAIT); 3970 if ((m->m_flags & M_EXT) == 0) { 3971 m_freem(m); 3972 return (ENOBUFS); 3973 } 3974 3975 if (rxs->rxs_mbuf != NULL) 3976 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 3977 3978 rxs->rxs_mbuf = m; 3979 3980 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 3981 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m, 3982 BUS_DMA_READ|BUS_DMA_NOWAIT); 3983 if (error) { 3984 /* XXX XXX XXX */ 3985 aprint_error_dev(sc->sc_dev, 3986 "unable to load rx DMA map %d, error = %d\n", 3987 idx, error); 3988 panic("wm_add_rxbuf"); 3989 } 3990 3991 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 3992 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 3993 3994 WM_INIT_RXDESC(sc, idx); 3995 3996 return (0); 3997 } 3998 3999 /* 4000 * wm_set_ral: 4001 * 4002 * Set an entery in the receive address list. 4003 */ 4004 static void 4005 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx) 4006 { 4007 uint32_t ral_lo, ral_hi; 4008 4009 if (enaddr != NULL) { 4010 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) | 4011 (enaddr[3] << 24); 4012 ral_hi = enaddr[4] | (enaddr[5] << 8); 4013 ral_hi |= RAL_AV; 4014 } else { 4015 ral_lo = 0; 4016 ral_hi = 0; 4017 } 4018 4019 if (sc->sc_type >= WM_T_82544) { 4020 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx), 4021 ral_lo); 4022 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx), 4023 ral_hi); 4024 } else { 4025 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo); 4026 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi); 4027 } 4028 } 4029 4030 /* 4031 * wm_mchash: 4032 * 4033 * Compute the hash of the multicast address for the 4096-bit 4034 * multicast filter. 4035 */ 4036 static uint32_t 4037 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr) 4038 { 4039 static const int lo_shift[4] = { 4, 3, 2, 0 }; 4040 static const int hi_shift[4] = { 4, 5, 6, 8 }; 4041 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 }; 4042 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 }; 4043 uint32_t hash; 4044 4045 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 4046 || (sc->sc_type == WM_T_ICH10)) { 4047 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) | 4048 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]); 4049 return (hash & 0x3ff); 4050 } 4051 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) | 4052 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]); 4053 4054 return (hash & 0xfff); 4055 } 4056 4057 /* 4058 * wm_set_filter: 4059 * 4060 * Set up the receive filter. 4061 */ 4062 static void 4063 wm_set_filter(struct wm_softc *sc) 4064 { 4065 struct ethercom *ec = &sc->sc_ethercom; 4066 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 4067 struct ether_multi *enm; 4068 struct ether_multistep step; 4069 bus_addr_t mta_reg; 4070 uint32_t hash, reg, bit; 4071 int i, size; 4072 4073 if (sc->sc_type >= WM_T_82544) 4074 mta_reg = WMREG_CORDOVA_MTA; 4075 else 4076 mta_reg = WMREG_MTA; 4077 4078 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE); 4079 4080 if (ifp->if_flags & IFF_BROADCAST) 4081 sc->sc_rctl |= RCTL_BAM; 4082 if (ifp->if_flags & IFF_PROMISC) { 4083 sc->sc_rctl |= RCTL_UPE; 4084 goto allmulti; 4085 } 4086 4087 /* 4088 * Set the station address in the first RAL slot, and 4089 * clear the remaining slots. 4090 */ 4091 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 4092 || (sc->sc_type == WM_T_ICH10)) 4093 size = WM_ICH8_RAL_TABSIZE; 4094 else 4095 size = WM_RAL_TABSIZE; 4096 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0); 4097 for (i = 1; i < size; i++) 4098 wm_set_ral(sc, NULL, i); 4099 4100 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 4101 || (sc->sc_type == WM_T_ICH10)) 4102 size = WM_ICH8_MC_TABSIZE; 4103 else 4104 size = WM_MC_TABSIZE; 4105 /* Clear out the multicast table. */ 4106 for (i = 0; i < size; i++) 4107 CSR_WRITE(sc, mta_reg + (i << 2), 0); 4108 4109 ETHER_FIRST_MULTI(step, ec, enm); 4110 while (enm != NULL) { 4111 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 4112 /* 4113 * We must listen to a range of multicast addresses. 4114 * For now, just accept all multicasts, rather than 4115 * trying to set only those filter bits needed to match 4116 * the range. (At this time, the only use of address 4117 * ranges is for IP multicast routing, for which the 4118 * range is big enough to require all bits set.) 4119 */ 4120 goto allmulti; 4121 } 4122 4123 hash = wm_mchash(sc, enm->enm_addrlo); 4124 4125 reg = (hash >> 5); 4126 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 4127 || (sc->sc_type == WM_T_ICH10)) 4128 reg &= 0x1f; 4129 else 4130 reg &= 0x7f; 4131 bit = hash & 0x1f; 4132 4133 hash = CSR_READ(sc, mta_reg + (reg << 2)); 4134 hash |= 1U << bit; 4135 4136 /* XXX Hardware bug?? */ 4137 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) { 4138 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2)); 4139 CSR_WRITE(sc, mta_reg + (reg << 2), hash); 4140 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit); 4141 } else 4142 CSR_WRITE(sc, mta_reg + (reg << 2), hash); 4143 4144 ETHER_NEXT_MULTI(step, enm); 4145 } 4146 4147 ifp->if_flags &= ~IFF_ALLMULTI; 4148 goto setit; 4149 4150 allmulti: 4151 ifp->if_flags |= IFF_ALLMULTI; 4152 sc->sc_rctl |= RCTL_MPE; 4153 4154 setit: 4155 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl); 4156 } 4157 4158 /* 4159 * wm_tbi_mediainit: 4160 * 4161 * Initialize media for use on 1000BASE-X devices. 4162 */ 4163 static void 4164 wm_tbi_mediainit(struct wm_softc *sc) 4165 { 4166 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 4167 const char *sep = ""; 4168 4169 if (sc->sc_type < WM_T_82543) 4170 sc->sc_tipg = TIPG_WM_DFLT; 4171 else 4172 sc->sc_tipg = TIPG_LG_DFLT; 4173 4174 sc->sc_tbi_anegticks = 5; 4175 4176 /* Initialize our media structures */ 4177 sc->sc_mii.mii_ifp = ifp; 4178 4179 sc->sc_ethercom.ec_mii = &sc->sc_mii; 4180 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange, 4181 wm_tbi_mediastatus); 4182 4183 /* 4184 * SWD Pins: 4185 * 4186 * 0 = Link LED (output) 4187 * 1 = Loss Of Signal (input) 4188 */ 4189 sc->sc_ctrl |= CTRL_SWDPIO(0); 4190 sc->sc_ctrl &= ~CTRL_SWDPIO(1); 4191 4192 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4193 4194 #define ADD(ss, mm, dd) \ 4195 do { \ 4196 aprint_normal("%s%s", sep, ss); \ 4197 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \ 4198 sep = ", "; \ 4199 } while (/*CONSTCOND*/0) 4200 4201 aprint_normal_dev(sc->sc_dev, ""); 4202 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD); 4203 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD); 4204 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD); 4205 aprint_normal("\n"); 4206 4207 #undef ADD 4208 4209 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 4210 } 4211 4212 /* 4213 * wm_tbi_mediastatus: [ifmedia interface function] 4214 * 4215 * Get the current interface media status on a 1000BASE-X device. 4216 */ 4217 static void 4218 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 4219 { 4220 struct wm_softc *sc = ifp->if_softc; 4221 uint32_t ctrl, status; 4222 4223 ifmr->ifm_status = IFM_AVALID; 4224 ifmr->ifm_active = IFM_ETHER; 4225 4226 status = CSR_READ(sc, WMREG_STATUS); 4227 if ((status & STATUS_LU) == 0) { 4228 ifmr->ifm_active |= IFM_NONE; 4229 return; 4230 } 4231 4232 ifmr->ifm_status |= IFM_ACTIVE; 4233 ifmr->ifm_active |= IFM_1000_SX; 4234 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD) 4235 ifmr->ifm_active |= IFM_FDX; 4236 ctrl = CSR_READ(sc, WMREG_CTRL); 4237 if (ctrl & CTRL_RFCE) 4238 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE; 4239 if (ctrl & CTRL_TFCE) 4240 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE; 4241 } 4242 4243 /* 4244 * wm_tbi_mediachange: [ifmedia interface function] 4245 * 4246 * Set hardware to newly-selected media on a 1000BASE-X device. 4247 */ 4248 static int 4249 wm_tbi_mediachange(struct ifnet *ifp) 4250 { 4251 struct wm_softc *sc = ifp->if_softc; 4252 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 4253 uint32_t status; 4254 int i; 4255 4256 sc->sc_txcw = 0; 4257 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO || 4258 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0) 4259 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE; 4260 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 4261 sc->sc_txcw |= TXCW_ANE; 4262 } else { 4263 /* 4264 * If autonegotiation is turned off, force link up and turn on 4265 * full duplex 4266 */ 4267 sc->sc_txcw &= ~TXCW_ANE; 4268 sc->sc_ctrl |= CTRL_SLU | CTRL_FD; 4269 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE); 4270 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4271 delay(1000); 4272 } 4273 4274 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n", 4275 device_xname(sc->sc_dev),sc->sc_txcw)); 4276 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); 4277 delay(10000); 4278 4279 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1); 4280 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i)); 4281 4282 /* 4283 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the 4284 * optics detect a signal, 0 if they don't. 4285 */ 4286 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) { 4287 /* Have signal; wait for the link to come up. */ 4288 4289 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 4290 /* 4291 * Reset the link, and let autonegotiation do its thing 4292 */ 4293 sc->sc_ctrl |= CTRL_LRST; 4294 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4295 delay(1000); 4296 sc->sc_ctrl &= ~CTRL_LRST; 4297 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4298 delay(1000); 4299 } 4300 4301 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) { 4302 delay(10000); 4303 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU) 4304 break; 4305 } 4306 4307 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n", 4308 device_xname(sc->sc_dev),i)); 4309 4310 status = CSR_READ(sc, WMREG_STATUS); 4311 DPRINTF(WM_DEBUG_LINK, 4312 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n", 4313 device_xname(sc->sc_dev),status, STATUS_LU)); 4314 if (status & STATUS_LU) { 4315 /* Link is up. */ 4316 DPRINTF(WM_DEBUG_LINK, 4317 ("%s: LINK: set media -> link up %s\n", 4318 device_xname(sc->sc_dev), 4319 (status & STATUS_FD) ? "FDX" : "HDX")); 4320 4321 /* 4322 * NOTE: CTRL will update TFCE and RFCE automatically, 4323 * so we should update sc->sc_ctrl 4324 */ 4325 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); 4326 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 4327 sc->sc_fcrtl &= ~FCRTL_XONE; 4328 if (status & STATUS_FD) 4329 sc->sc_tctl |= 4330 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 4331 else 4332 sc->sc_tctl |= 4333 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 4334 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE) 4335 sc->sc_fcrtl |= FCRTL_XONE; 4336 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 4337 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 4338 WMREG_OLD_FCRTL : WMREG_FCRTL, 4339 sc->sc_fcrtl); 4340 sc->sc_tbi_linkup = 1; 4341 } else { 4342 if (i == WM_LINKUP_TIMEOUT) 4343 wm_check_for_link(sc); 4344 /* Link is down. */ 4345 DPRINTF(WM_DEBUG_LINK, 4346 ("%s: LINK: set media -> link down\n", 4347 device_xname(sc->sc_dev))); 4348 sc->sc_tbi_linkup = 0; 4349 } 4350 } else { 4351 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n", 4352 device_xname(sc->sc_dev))); 4353 sc->sc_tbi_linkup = 0; 4354 } 4355 4356 wm_tbi_set_linkled(sc); 4357 4358 return (0); 4359 } 4360 4361 /* 4362 * wm_tbi_set_linkled: 4363 * 4364 * Update the link LED on 1000BASE-X devices. 4365 */ 4366 static void 4367 wm_tbi_set_linkled(struct wm_softc *sc) 4368 { 4369 4370 if (sc->sc_tbi_linkup) 4371 sc->sc_ctrl |= CTRL_SWDPIN(0); 4372 else 4373 sc->sc_ctrl &= ~CTRL_SWDPIN(0); 4374 4375 /* 82540 or newer devices are active low */ 4376 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0; 4377 4378 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4379 } 4380 4381 /* 4382 * wm_tbi_check_link: 4383 * 4384 * Check the link on 1000BASE-X devices. 4385 */ 4386 static void 4387 wm_tbi_check_link(struct wm_softc *sc) 4388 { 4389 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 4390 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 4391 uint32_t rxcw, ctrl, status; 4392 4393 status = CSR_READ(sc, WMREG_STATUS); 4394 4395 rxcw = CSR_READ(sc, WMREG_RXCW); 4396 ctrl = CSR_READ(sc, WMREG_CTRL); 4397 4398 /* set link status */ 4399 if ((status & STATUS_LU) == 0) { 4400 DPRINTF(WM_DEBUG_LINK, 4401 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev))); 4402 sc->sc_tbi_linkup = 0; 4403 } else if (sc->sc_tbi_linkup == 0) { 4404 DPRINTF(WM_DEBUG_LINK, 4405 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev), 4406 (status & STATUS_FD) ? "FDX" : "HDX")); 4407 sc->sc_tbi_linkup = 1; 4408 } 4409 4410 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) 4411 && ((status & STATUS_LU) == 0)) { 4412 sc->sc_tbi_linkup = 0; 4413 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) { 4414 /* RXCFG storm! */ 4415 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n", 4416 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg)); 4417 wm_init(ifp); 4418 wm_start(ifp); 4419 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 4420 /* If the timer expired, retry autonegotiation */ 4421 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) { 4422 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n")); 4423 sc->sc_tbi_ticks = 0; 4424 /* 4425 * Reset the link, and let autonegotiation do 4426 * its thing 4427 */ 4428 sc->sc_ctrl |= CTRL_LRST; 4429 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4430 delay(1000); 4431 sc->sc_ctrl &= ~CTRL_LRST; 4432 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4433 delay(1000); 4434 CSR_WRITE(sc, WMREG_TXCW, 4435 sc->sc_txcw & ~TXCW_ANE); 4436 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); 4437 } 4438 } 4439 } 4440 4441 wm_tbi_set_linkled(sc); 4442 } 4443 4444 /* 4445 * wm_gmii_reset: 4446 * 4447 * Reset the PHY. 4448 */ 4449 static void 4450 wm_gmii_reset(struct wm_softc *sc) 4451 { 4452 uint32_t reg; 4453 int func = 0; /* XXX gcc */ 4454 4455 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 4456 || (sc->sc_type == WM_T_ICH10)) { 4457 if (wm_get_swfwhw_semaphore(sc)) { 4458 aprint_error_dev(sc->sc_dev, 4459 "%s: failed to get semaphore\n", __func__); 4460 return; 4461 } 4462 } 4463 if (sc->sc_type == WM_T_80003) { 4464 func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1; 4465 if (wm_get_swfw_semaphore(sc, 4466 func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) { 4467 aprint_error_dev(sc->sc_dev, 4468 "%s: failed to get semaphore\n", __func__); 4469 return; 4470 } 4471 } 4472 if (sc->sc_type >= WM_T_82544) { 4473 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 4474 delay(20000); 4475 4476 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4477 delay(20000); 4478 } else { 4479 /* 4480 * With 82543, we need to force speed and duplex on the MAC 4481 * equal to what the PHY speed and duplex configuration is. 4482 * In addition, we need to perform a hardware reset on the PHY 4483 * to take it out of reset. 4484 */ 4485 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; 4486 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4487 4488 /* The PHY reset pin is active-low. */ 4489 reg = CSR_READ(sc, WMREG_CTRL_EXT); 4490 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) | 4491 CTRL_EXT_SWDPIN(4)); 4492 reg |= CTRL_EXT_SWDPIO(4); 4493 4494 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4)); 4495 delay(10); 4496 4497 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 4498 delay(10000); 4499 4500 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4)); 4501 delay(10); 4502 #if 0 4503 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4); 4504 #endif 4505 } 4506 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 4507 || (sc->sc_type == WM_T_ICH10)) 4508 wm_put_swfwhw_semaphore(sc); 4509 if (sc->sc_type == WM_T_80003) 4510 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM); 4511 } 4512 4513 /* 4514 * wm_gmii_mediainit: 4515 * 4516 * Initialize media for use on 1000BASE-T devices. 4517 */ 4518 static void 4519 wm_gmii_mediainit(struct wm_softc *sc) 4520 { 4521 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 4522 4523 /* We have MII. */ 4524 sc->sc_flags |= WM_F_HAS_MII; 4525 4526 if (sc->sc_type == WM_T_80003) 4527 sc->sc_tipg = TIPG_1000T_80003_DFLT; 4528 else 4529 sc->sc_tipg = TIPG_1000T_DFLT; 4530 4531 /* 4532 * Let the chip set speed/duplex on its own based on 4533 * signals from the PHY. 4534 * XXXbouyer - I'm not sure this is right for the 80003, 4535 * the em driver only sets CTRL_SLU here - but it seems to work. 4536 */ 4537 sc->sc_ctrl |= CTRL_SLU; 4538 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4539 4540 /* Initialize our media structures and probe the GMII. */ 4541 sc->sc_mii.mii_ifp = ifp; 4542 4543 if (sc->sc_type == WM_T_ICH10) { 4544 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg; 4545 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg; 4546 } else if (sc->sc_type >= WM_T_80003) { 4547 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg; 4548 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg; 4549 } else if (sc->sc_type >= WM_T_82544) { 4550 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg; 4551 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg; 4552 } else { 4553 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg; 4554 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg; 4555 } 4556 sc->sc_mii.mii_statchg = wm_gmii_statchg; 4557 4558 wm_gmii_reset(sc); 4559 4560 sc->sc_ethercom.ec_mii = &sc->sc_mii; 4561 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange, 4562 wm_gmii_mediastatus); 4563 4564 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 4565 MII_OFFSET_ANY, MIIF_DOPAUSE); 4566 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 4567 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 4568 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 4569 } else 4570 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 4571 } 4572 4573 /* 4574 * wm_gmii_mediastatus: [ifmedia interface function] 4575 * 4576 * Get the current interface media status on a 1000BASE-T device. 4577 */ 4578 static void 4579 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 4580 { 4581 struct wm_softc *sc = ifp->if_softc; 4582 4583 ether_mediastatus(ifp, ifmr); 4584 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) | 4585 sc->sc_flowflags; 4586 } 4587 4588 /* 4589 * wm_gmii_mediachange: [ifmedia interface function] 4590 * 4591 * Set hardware to newly-selected media on a 1000BASE-T device. 4592 */ 4593 static int 4594 wm_gmii_mediachange(struct ifnet *ifp) 4595 { 4596 struct wm_softc *sc = ifp->if_softc; 4597 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 4598 int rc; 4599 4600 if ((ifp->if_flags & IFF_UP) == 0) 4601 return 0; 4602 4603 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); 4604 sc->sc_ctrl |= CTRL_SLU; 4605 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) 4606 || (sc->sc_type > WM_T_82543)) { 4607 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX); 4608 } else { 4609 sc->sc_ctrl &= ~CTRL_ASDE; 4610 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; 4611 if (ife->ifm_media & IFM_FDX) 4612 sc->sc_ctrl |= CTRL_FD; 4613 switch(IFM_SUBTYPE(ife->ifm_media)) { 4614 case IFM_10_T: 4615 sc->sc_ctrl |= CTRL_SPEED_10; 4616 break; 4617 case IFM_100_TX: 4618 sc->sc_ctrl |= CTRL_SPEED_100; 4619 break; 4620 case IFM_1000_T: 4621 sc->sc_ctrl |= CTRL_SPEED_1000; 4622 break; 4623 default: 4624 panic("wm_gmii_mediachange: bad media 0x%x", 4625 ife->ifm_media); 4626 } 4627 } 4628 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4629 if (sc->sc_type <= WM_T_82543) 4630 wm_gmii_reset(sc); 4631 4632 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO) 4633 return 0; 4634 return rc; 4635 } 4636 4637 #define MDI_IO CTRL_SWDPIN(2) 4638 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */ 4639 #define MDI_CLK CTRL_SWDPIN(3) 4640 4641 static void 4642 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits) 4643 { 4644 uint32_t i, v; 4645 4646 v = CSR_READ(sc, WMREG_CTRL); 4647 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 4648 v |= MDI_DIR | CTRL_SWDPIO(3); 4649 4650 for (i = 1 << (nbits - 1); i != 0; i >>= 1) { 4651 if (data & i) 4652 v |= MDI_IO; 4653 else 4654 v &= ~MDI_IO; 4655 CSR_WRITE(sc, WMREG_CTRL, v); 4656 delay(10); 4657 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 4658 delay(10); 4659 CSR_WRITE(sc, WMREG_CTRL, v); 4660 delay(10); 4661 } 4662 } 4663 4664 static uint32_t 4665 i82543_mii_recvbits(struct wm_softc *sc) 4666 { 4667 uint32_t v, i, data = 0; 4668 4669 v = CSR_READ(sc, WMREG_CTRL); 4670 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 4671 v |= CTRL_SWDPIO(3); 4672 4673 CSR_WRITE(sc, WMREG_CTRL, v); 4674 delay(10); 4675 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 4676 delay(10); 4677 CSR_WRITE(sc, WMREG_CTRL, v); 4678 delay(10); 4679 4680 for (i = 0; i < 16; i++) { 4681 data <<= 1; 4682 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 4683 delay(10); 4684 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO) 4685 data |= 1; 4686 CSR_WRITE(sc, WMREG_CTRL, v); 4687 delay(10); 4688 } 4689 4690 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 4691 delay(10); 4692 CSR_WRITE(sc, WMREG_CTRL, v); 4693 delay(10); 4694 4695 return (data); 4696 } 4697 4698 #undef MDI_IO 4699 #undef MDI_DIR 4700 #undef MDI_CLK 4701 4702 /* 4703 * wm_gmii_i82543_readreg: [mii interface function] 4704 * 4705 * Read a PHY register on the GMII (i82543 version). 4706 */ 4707 static int 4708 wm_gmii_i82543_readreg(device_t self, int phy, int reg) 4709 { 4710 struct wm_softc *sc = device_private(self); 4711 int rv; 4712 4713 i82543_mii_sendbits(sc, 0xffffffffU, 32); 4714 i82543_mii_sendbits(sc, reg | (phy << 5) | 4715 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14); 4716 rv = i82543_mii_recvbits(sc) & 0xffff; 4717 4718 DPRINTF(WM_DEBUG_GMII, 4719 ("%s: GMII: read phy %d reg %d -> 0x%04x\n", 4720 device_xname(sc->sc_dev), phy, reg, rv)); 4721 4722 return (rv); 4723 } 4724 4725 /* 4726 * wm_gmii_i82543_writereg: [mii interface function] 4727 * 4728 * Write a PHY register on the GMII (i82543 version). 4729 */ 4730 static void 4731 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val) 4732 { 4733 struct wm_softc *sc = device_private(self); 4734 4735 i82543_mii_sendbits(sc, 0xffffffffU, 32); 4736 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) | 4737 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) | 4738 (MII_COMMAND_START << 30), 32); 4739 } 4740 4741 /* 4742 * wm_gmii_i82544_readreg: [mii interface function] 4743 * 4744 * Read a PHY register on the GMII. 4745 */ 4746 static int 4747 wm_gmii_i82544_readreg(device_t self, int phy, int reg) 4748 { 4749 struct wm_softc *sc = device_private(self); 4750 uint32_t mdic = 0; 4751 int i, rv; 4752 4753 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) | 4754 MDIC_REGADD(reg)); 4755 4756 for (i = 0; i < 320; i++) { 4757 mdic = CSR_READ(sc, WMREG_MDIC); 4758 if (mdic & MDIC_READY) 4759 break; 4760 delay(10); 4761 } 4762 4763 if ((mdic & MDIC_READY) == 0) { 4764 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n", 4765 device_xname(sc->sc_dev), phy, reg); 4766 rv = 0; 4767 } else if (mdic & MDIC_E) { 4768 #if 0 /* This is normal if no PHY is present. */ 4769 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n", 4770 device_xname(sc->sc_dev), phy, reg); 4771 #endif 4772 rv = 0; 4773 } else { 4774 rv = MDIC_DATA(mdic); 4775 if (rv == 0xffff) 4776 rv = 0; 4777 } 4778 4779 return (rv); 4780 } 4781 4782 /* 4783 * wm_gmii_i82544_writereg: [mii interface function] 4784 * 4785 * Write a PHY register on the GMII. 4786 */ 4787 static void 4788 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val) 4789 { 4790 struct wm_softc *sc = device_private(self); 4791 uint32_t mdic = 0; 4792 int i; 4793 4794 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) | 4795 MDIC_REGADD(reg) | MDIC_DATA(val)); 4796 4797 for (i = 0; i < 320; i++) { 4798 mdic = CSR_READ(sc, WMREG_MDIC); 4799 if (mdic & MDIC_READY) 4800 break; 4801 delay(10); 4802 } 4803 4804 if ((mdic & MDIC_READY) == 0) 4805 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n", 4806 device_xname(sc->sc_dev), phy, reg); 4807 else if (mdic & MDIC_E) 4808 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n", 4809 device_xname(sc->sc_dev), phy, reg); 4810 } 4811 4812 /* 4813 * wm_gmii_i80003_readreg: [mii interface function] 4814 * 4815 * Read a PHY register on the kumeran 4816 * This could be handled by the PHY layer if we didn't have to lock the 4817 * ressource ... 4818 */ 4819 static int 4820 wm_gmii_i80003_readreg(device_t self, int phy, int reg) 4821 { 4822 struct wm_softc *sc = device_private(self); 4823 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1); 4824 int rv; 4825 4826 if (phy != 1) /* only one PHY on kumeran bus */ 4827 return 0; 4828 4829 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) { 4830 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 4831 __func__); 4832 return 0; 4833 } 4834 4835 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) { 4836 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT, 4837 reg >> GG82563_PAGE_SHIFT); 4838 } else { 4839 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT, 4840 reg >> GG82563_PAGE_SHIFT); 4841 } 4842 /* Wait more 200us for a bug of the ready bit in the MDIC register */ 4843 delay(200); 4844 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS); 4845 delay(200); 4846 4847 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM); 4848 return (rv); 4849 } 4850 4851 /* 4852 * wm_gmii_i80003_writereg: [mii interface function] 4853 * 4854 * Write a PHY register on the kumeran. 4855 * This could be handled by the PHY layer if we didn't have to lock the 4856 * ressource ... 4857 */ 4858 static void 4859 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val) 4860 { 4861 struct wm_softc *sc = device_private(self); 4862 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1); 4863 4864 if (phy != 1) /* only one PHY on kumeran bus */ 4865 return; 4866 4867 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) { 4868 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 4869 __func__); 4870 return; 4871 } 4872 4873 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) { 4874 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT, 4875 reg >> GG82563_PAGE_SHIFT); 4876 } else { 4877 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT, 4878 reg >> GG82563_PAGE_SHIFT); 4879 } 4880 /* Wait more 200us for a bug of the ready bit in the MDIC register */ 4881 delay(200); 4882 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val); 4883 delay(200); 4884 4885 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM); 4886 } 4887 4888 /* 4889 * wm_gmii_bm_readreg: [mii interface function] 4890 * 4891 * Read a PHY register on the kumeran 4892 * This could be handled by the PHY layer if we didn't have to lock the 4893 * ressource ... 4894 */ 4895 static int 4896 wm_gmii_bm_readreg(device_t self, int phy, int reg) 4897 { 4898 struct wm_softc *sc = device_private(self); 4899 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1); 4900 int rv; 4901 4902 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) { 4903 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 4904 __func__); 4905 return 0; 4906 } 4907 4908 if (reg > GG82563_MAX_REG_ADDRESS) { 4909 if (phy == 1) 4910 wm_gmii_i82544_writereg(self, phy, 0x1f, 4911 reg); 4912 else 4913 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT, 4914 reg >> GG82563_PAGE_SHIFT); 4915 4916 } 4917 4918 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS); 4919 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM); 4920 return (rv); 4921 } 4922 4923 /* 4924 * wm_gmii_bm_writereg: [mii interface function] 4925 * 4926 * Write a PHY register on the kumeran. 4927 * This could be handled by the PHY layer if we didn't have to lock the 4928 * ressource ... 4929 */ 4930 static void 4931 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val) 4932 { 4933 struct wm_softc *sc = device_private(self); 4934 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1); 4935 4936 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) { 4937 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 4938 __func__); 4939 return; 4940 } 4941 4942 if (reg > GG82563_MAX_REG_ADDRESS) { 4943 if (phy == 1) 4944 wm_gmii_i82544_writereg(self, phy, 0x1f, 4945 reg); 4946 else 4947 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT, 4948 reg >> GG82563_PAGE_SHIFT); 4949 4950 } 4951 4952 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val); 4953 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM); 4954 } 4955 4956 /* 4957 * wm_gmii_statchg: [mii interface function] 4958 * 4959 * Callback from MII layer when media changes. 4960 */ 4961 static void 4962 wm_gmii_statchg(device_t self) 4963 { 4964 struct wm_softc *sc = device_private(self); 4965 struct mii_data *mii = &sc->sc_mii; 4966 4967 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE); 4968 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 4969 sc->sc_fcrtl &= ~FCRTL_XONE; 4970 4971 /* 4972 * Get flow control negotiation result. 4973 */ 4974 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 4975 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { 4976 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 4977 mii->mii_media_active &= ~IFM_ETH_FMASK; 4978 } 4979 4980 if (sc->sc_flowflags & IFM_FLOW) { 4981 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) { 4982 sc->sc_ctrl |= CTRL_TFCE; 4983 sc->sc_fcrtl |= FCRTL_XONE; 4984 } 4985 if (sc->sc_flowflags & IFM_ETH_RXPAUSE) 4986 sc->sc_ctrl |= CTRL_RFCE; 4987 } 4988 4989 if (sc->sc_mii.mii_media_active & IFM_FDX) { 4990 DPRINTF(WM_DEBUG_LINK, 4991 ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev))); 4992 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 4993 } else { 4994 DPRINTF(WM_DEBUG_LINK, 4995 ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev))); 4996 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 4997 } 4998 4999 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 5000 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 5001 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL 5002 : WMREG_FCRTL, sc->sc_fcrtl); 5003 if (sc->sc_type == WM_T_80003) { 5004 switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { 5005 case IFM_1000_T: 5006 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL, 5007 KUMCTRLSTA_HD_CTRL_1000_DEFAULT); 5008 sc->sc_tipg = TIPG_1000T_80003_DFLT; 5009 break; 5010 default: 5011 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL, 5012 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT); 5013 sc->sc_tipg = TIPG_10_100_80003_DFLT; 5014 break; 5015 } 5016 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); 5017 } 5018 } 5019 5020 /* 5021 * wm_kmrn_readreg: 5022 * 5023 * Read a kumeran register 5024 */ 5025 static int 5026 wm_kmrn_readreg(struct wm_softc *sc, int reg) 5027 { 5028 int rv; 5029 5030 if (sc->sc_flags == WM_F_SWFW_SYNC) { 5031 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) { 5032 aprint_error_dev(sc->sc_dev, 5033 "%s: failed to get semaphore\n", __func__); 5034 return 0; 5035 } 5036 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) { 5037 if (wm_get_swfwhw_semaphore(sc)) { 5038 aprint_error_dev(sc->sc_dev, 5039 "%s: failed to get semaphore\n", __func__); 5040 return 0; 5041 } 5042 } 5043 5044 CSR_WRITE(sc, WMREG_KUMCTRLSTA, 5045 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | 5046 KUMCTRLSTA_REN); 5047 delay(2); 5048 5049 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK; 5050 5051 if (sc->sc_flags == WM_F_SWFW_SYNC) 5052 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM); 5053 else if (sc->sc_flags == WM_F_SWFWHW_SYNC) 5054 wm_put_swfwhw_semaphore(sc); 5055 5056 return (rv); 5057 } 5058 5059 /* 5060 * wm_kmrn_writereg: 5061 * 5062 * Write a kumeran register 5063 */ 5064 static void 5065 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val) 5066 { 5067 5068 if (sc->sc_flags == WM_F_SWFW_SYNC) { 5069 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) { 5070 aprint_error_dev(sc->sc_dev, 5071 "%s: failed to get semaphore\n", __func__); 5072 return; 5073 } 5074 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) { 5075 if (wm_get_swfwhw_semaphore(sc)) { 5076 aprint_error_dev(sc->sc_dev, 5077 "%s: failed to get semaphore\n", __func__); 5078 return; 5079 } 5080 } 5081 5082 CSR_WRITE(sc, WMREG_KUMCTRLSTA, 5083 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | 5084 (val & KUMCTRLSTA_MASK)); 5085 5086 if (sc->sc_flags == WM_F_SWFW_SYNC) 5087 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM); 5088 else if (sc->sc_flags == WM_F_SWFWHW_SYNC) 5089 wm_put_swfwhw_semaphore(sc); 5090 } 5091 5092 static int 5093 wm_is_onboard_nvm_eeprom(struct wm_softc *sc) 5094 { 5095 uint32_t eecd = 0; 5096 5097 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574) { 5098 eecd = CSR_READ(sc, WMREG_EECD); 5099 5100 /* Isolate bits 15 & 16 */ 5101 eecd = ((eecd >> 15) & 0x03); 5102 5103 /* If both bits are set, device is Flash type */ 5104 if (eecd == 0x03) { 5105 return 0; 5106 } 5107 } 5108 return 1; 5109 } 5110 5111 static int 5112 wm_get_swsm_semaphore(struct wm_softc *sc) 5113 { 5114 int32_t timeout; 5115 uint32_t swsm; 5116 5117 /* Get the FW semaphore. */ 5118 timeout = 1000 + 1; /* XXX */ 5119 while (timeout) { 5120 swsm = CSR_READ(sc, WMREG_SWSM); 5121 swsm |= SWSM_SWESMBI; 5122 CSR_WRITE(sc, WMREG_SWSM, swsm); 5123 /* if we managed to set the bit we got the semaphore. */ 5124 swsm = CSR_READ(sc, WMREG_SWSM); 5125 if (swsm & SWSM_SWESMBI) 5126 break; 5127 5128 delay(50); 5129 timeout--; 5130 } 5131 5132 if (timeout == 0) { 5133 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n"); 5134 /* Release semaphores */ 5135 wm_put_swsm_semaphore(sc); 5136 return 1; 5137 } 5138 return 0; 5139 } 5140 5141 static void 5142 wm_put_swsm_semaphore(struct wm_softc *sc) 5143 { 5144 uint32_t swsm; 5145 5146 swsm = CSR_READ(sc, WMREG_SWSM); 5147 swsm &= ~(SWSM_SWESMBI); 5148 CSR_WRITE(sc, WMREG_SWSM, swsm); 5149 } 5150 5151 static int 5152 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask) 5153 { 5154 uint32_t swfw_sync; 5155 uint32_t swmask = mask << SWFW_SOFT_SHIFT; 5156 uint32_t fwmask = mask << SWFW_FIRM_SHIFT; 5157 int timeout = 200; 5158 5159 for(timeout = 0; timeout < 200; timeout++) { 5160 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) { 5161 if (wm_get_swsm_semaphore(sc)) { 5162 aprint_error_dev(sc->sc_dev, 5163 "%s: failed to get semaphore\n", 5164 __func__); 5165 return 1; 5166 } 5167 } 5168 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC); 5169 if ((swfw_sync & (swmask | fwmask)) == 0) { 5170 swfw_sync |= swmask; 5171 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync); 5172 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 5173 wm_put_swsm_semaphore(sc); 5174 return 0; 5175 } 5176 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 5177 wm_put_swsm_semaphore(sc); 5178 delay(5000); 5179 } 5180 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n", 5181 device_xname(sc->sc_dev), mask, swfw_sync); 5182 return 1; 5183 } 5184 5185 static void 5186 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask) 5187 { 5188 uint32_t swfw_sync; 5189 5190 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) { 5191 while (wm_get_swsm_semaphore(sc) != 0) 5192 continue; 5193 } 5194 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC); 5195 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT); 5196 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync); 5197 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 5198 wm_put_swsm_semaphore(sc); 5199 } 5200 5201 static int 5202 wm_get_swfwhw_semaphore(struct wm_softc *sc) 5203 { 5204 uint32_t ext_ctrl; 5205 int timeout = 200; 5206 5207 for(timeout = 0; timeout < 200; timeout++) { 5208 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 5209 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; 5210 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 5211 5212 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 5213 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG) 5214 return 0; 5215 delay(5000); 5216 } 5217 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n", 5218 device_xname(sc->sc_dev), ext_ctrl); 5219 return 1; 5220 } 5221 5222 static void 5223 wm_put_swfwhw_semaphore(struct wm_softc *sc) 5224 { 5225 uint32_t ext_ctrl; 5226 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 5227 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 5228 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 5229 } 5230 5231 static int 5232 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank) 5233 { 5234 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1; 5235 uint8_t bank_high_byte; 5236 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t); 5237 5238 if (sc->sc_type != WM_T_ICH10) { 5239 /* Value of bit 22 corresponds to the flash bank we're on. */ 5240 *bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0; 5241 } else { 5242 wm_read_ich8_byte(sc, act_offset, &bank_high_byte); 5243 if ((bank_high_byte & 0xc0) == 0x80) 5244 *bank = 0; 5245 else { 5246 wm_read_ich8_byte(sc, act_offset + bank1_offset, 5247 &bank_high_byte); 5248 if ((bank_high_byte & 0xc0) == 0x80) 5249 *bank = 1; 5250 else { 5251 aprint_error_dev(sc->sc_dev, 5252 "EEPROM not present\n"); 5253 return -1; 5254 } 5255 } 5256 } 5257 5258 return 0; 5259 } 5260 5261 /****************************************************************************** 5262 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access 5263 * register. 5264 * 5265 * sc - Struct containing variables accessed by shared code 5266 * offset - offset of word in the EEPROM to read 5267 * data - word read from the EEPROM 5268 * words - number of words to read 5269 *****************************************************************************/ 5270 static int 5271 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data) 5272 { 5273 int32_t error = 0; 5274 uint32_t flash_bank = 0; 5275 uint32_t act_offset = 0; 5276 uint32_t bank_offset = 0; 5277 uint16_t word = 0; 5278 uint16_t i = 0; 5279 5280 /* We need to know which is the valid flash bank. In the event 5281 * that we didn't allocate eeprom_shadow_ram, we may not be 5282 * managing flash_bank. So it cannot be trusted and needs 5283 * to be updated with each read. 5284 */ 5285 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank); 5286 if (error) { 5287 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n", 5288 __func__); 5289 return error; 5290 } 5291 5292 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */ 5293 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2); 5294 5295 error = wm_get_swfwhw_semaphore(sc); 5296 if (error) { 5297 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 5298 __func__); 5299 return error; 5300 } 5301 5302 for (i = 0; i < words; i++) { 5303 /* The NVM part needs a byte offset, hence * 2 */ 5304 act_offset = bank_offset + ((offset + i) * 2); 5305 error = wm_read_ich8_word(sc, act_offset, &word); 5306 if (error) { 5307 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n", 5308 __func__); 5309 break; 5310 } 5311 data[i] = word; 5312 } 5313 5314 wm_put_swfwhw_semaphore(sc); 5315 return error; 5316 } 5317 5318 /****************************************************************************** 5319 * This function does initial flash setup so that a new read/write/erase cycle 5320 * can be started. 5321 * 5322 * sc - The pointer to the hw structure 5323 ****************************************************************************/ 5324 static int32_t 5325 wm_ich8_cycle_init(struct wm_softc *sc) 5326 { 5327 uint16_t hsfsts; 5328 int32_t error = 1; 5329 int32_t i = 0; 5330 5331 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 5332 5333 /* May be check the Flash Des Valid bit in Hw status */ 5334 if ((hsfsts & HSFSTS_FLDVAL) == 0) { 5335 return error; 5336 } 5337 5338 /* Clear FCERR in Hw status by writing 1 */ 5339 /* Clear DAEL in Hw status by writing a 1 */ 5340 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL; 5341 5342 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); 5343 5344 /* Either we should have a hardware SPI cycle in progress bit to check 5345 * against, in order to start a new cycle or FDONE bit should be changed 5346 * in the hardware so that it is 1 after harware reset, which can then be 5347 * used as an indication whether a cycle is in progress or has been 5348 * completed .. we should also have some software semaphore mechanism to 5349 * guard FDONE or the cycle in progress bit so that two threads access to 5350 * those bits can be sequentiallized or a way so that 2 threads dont 5351 * start the cycle at the same time */ 5352 5353 if ((hsfsts & HSFSTS_FLINPRO) == 0) { 5354 /* There is no cycle running at present, so we can start a cycle */ 5355 /* Begin by setting Flash Cycle Done. */ 5356 hsfsts |= HSFSTS_DONE; 5357 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); 5358 error = 0; 5359 } else { 5360 /* otherwise poll for sometime so the current cycle has a chance 5361 * to end before giving up. */ 5362 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) { 5363 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 5364 if ((hsfsts & HSFSTS_FLINPRO) == 0) { 5365 error = 0; 5366 break; 5367 } 5368 delay(1); 5369 } 5370 if (error == 0) { 5371 /* Successful in waiting for previous cycle to timeout, 5372 * now set the Flash Cycle Done. */ 5373 hsfsts |= HSFSTS_DONE; 5374 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); 5375 } 5376 } 5377 return error; 5378 } 5379 5380 /****************************************************************************** 5381 * This function starts a flash cycle and waits for its completion 5382 * 5383 * sc - The pointer to the hw structure 5384 ****************************************************************************/ 5385 static int32_t 5386 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout) 5387 { 5388 uint16_t hsflctl; 5389 uint16_t hsfsts; 5390 int32_t error = 1; 5391 uint32_t i = 0; 5392 5393 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ 5394 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL); 5395 hsflctl |= HSFCTL_GO; 5396 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl); 5397 5398 /* wait till FDONE bit is set to 1 */ 5399 do { 5400 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 5401 if (hsfsts & HSFSTS_DONE) 5402 break; 5403 delay(1); 5404 i++; 5405 } while (i < timeout); 5406 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) { 5407 error = 0; 5408 } 5409 return error; 5410 } 5411 5412 /****************************************************************************** 5413 * Reads a byte or word from the NVM using the ICH8 flash access registers. 5414 * 5415 * sc - The pointer to the hw structure 5416 * index - The index of the byte or word to read. 5417 * size - Size of data to read, 1=byte 2=word 5418 * data - Pointer to the word to store the value read. 5419 *****************************************************************************/ 5420 static int32_t 5421 wm_read_ich8_data(struct wm_softc *sc, uint32_t index, 5422 uint32_t size, uint16_t* data) 5423 { 5424 uint16_t hsfsts; 5425 uint16_t hsflctl; 5426 uint32_t flash_linear_address; 5427 uint32_t flash_data = 0; 5428 int32_t error = 1; 5429 int32_t count = 0; 5430 5431 if (size < 1 || size > 2 || data == 0x0 || 5432 index > ICH_FLASH_LINEAR_ADDR_MASK) 5433 return error; 5434 5435 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) + 5436 sc->sc_ich8_flash_base; 5437 5438 do { 5439 delay(1); 5440 /* Steps */ 5441 error = wm_ich8_cycle_init(sc); 5442 if (error) 5443 break; 5444 5445 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL); 5446 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 5447 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK; 5448 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT; 5449 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl); 5450 5451 /* Write the last 24 bits of index into Flash Linear address field in 5452 * Flash Address */ 5453 /* TODO: TBD maybe check the index against the size of flash */ 5454 5455 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address); 5456 5457 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT); 5458 5459 /* Check if FCERR is set to 1, if set to 1, clear it and try the whole 5460 * sequence a few more times, else read in (shift in) the Flash Data0, 5461 * the order is least significant byte first msb to lsb */ 5462 if (error == 0) { 5463 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0); 5464 if (size == 1) { 5465 *data = (uint8_t)(flash_data & 0x000000FF); 5466 } else if (size == 2) { 5467 *data = (uint16_t)(flash_data & 0x0000FFFF); 5468 } 5469 break; 5470 } else { 5471 /* If we've gotten here, then things are probably completely hosed, 5472 * but if the error condition is detected, it won't hurt to give 5473 * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times. 5474 */ 5475 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 5476 if (hsfsts & HSFSTS_ERR) { 5477 /* Repeat for some time before giving up. */ 5478 continue; 5479 } else if ((hsfsts & HSFSTS_DONE) == 0) { 5480 break; 5481 } 5482 } 5483 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 5484 5485 return error; 5486 } 5487 5488 /****************************************************************************** 5489 * Reads a single byte from the NVM using the ICH8 flash access registers. 5490 * 5491 * sc - pointer to wm_hw structure 5492 * index - The index of the byte to read. 5493 * data - Pointer to a byte to store the value read. 5494 *****************************************************************************/ 5495 static int32_t 5496 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data) 5497 { 5498 int32_t status; 5499 uint16_t word = 0; 5500 5501 status = wm_read_ich8_data(sc, index, 1, &word); 5502 if (status == 0) { 5503 *data = (uint8_t)word; 5504 } 5505 5506 return status; 5507 } 5508 5509 /****************************************************************************** 5510 * Reads a word from the NVM using the ICH8 flash access registers. 5511 * 5512 * sc - pointer to wm_hw structure 5513 * index - The starting byte index of the word to read. 5514 * data - Pointer to a word to store the value read. 5515 *****************************************************************************/ 5516 static int32_t 5517 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data) 5518 { 5519 int32_t status; 5520 5521 status = wm_read_ich8_data(sc, index, 2, data); 5522 return status; 5523 } 5524 5525 static int 5526 wm_check_mng_mode(struct wm_softc *sc) 5527 { 5528 int rv; 5529 5530 switch (sc->sc_type) { 5531 case WM_T_ICH8: 5532 case WM_T_ICH9: 5533 case WM_T_ICH10: 5534 rv = wm_check_mng_mode_ich8lan(sc); 5535 break; 5536 #if 0 5537 case WM_T_82574: 5538 /* 5539 * The function is provided in em driver, but it's not 5540 * used. Why? 5541 */ 5542 rv = wm_check_mng_mode_82574(sc); 5543 break; 5544 #endif 5545 case WM_T_82571: 5546 case WM_T_82572: 5547 case WM_T_82573: 5548 case WM_T_80003: 5549 rv = wm_check_mng_mode_generic(sc); 5550 break; 5551 default: 5552 /* noting to do */ 5553 rv = 0; 5554 break; 5555 } 5556 5557 return rv; 5558 } 5559 5560 static int 5561 wm_check_mng_mode_ich8lan(struct wm_softc *sc) 5562 { 5563 uint32_t fwsm; 5564 5565 fwsm = CSR_READ(sc, WMREG_FWSM); 5566 5567 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)) 5568 return 1; 5569 5570 return 0; 5571 } 5572 5573 #if 0 5574 static int 5575 wm_check_mng_mode_82574(struct wm_softc *sc) 5576 { 5577 uint16_t data; 5578 5579 wm_read_eeprom(sc, NVM_INIT_CONTROL2_REG, 1, &data); 5580 5581 if ((data & NVM_INIT_CTRL2_MNGM) != 0) 5582 return 1; 5583 5584 return 0; 5585 } 5586 #endif 5587 5588 static int 5589 wm_check_mng_mode_generic(struct wm_softc *sc) 5590 { 5591 uint32_t fwsm; 5592 5593 fwsm = CSR_READ(sc, WMREG_FWSM); 5594 5595 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT)) 5596 return 1; 5597 5598 return 0; 5599 } 5600 5601 static void 5602 wm_get_hw_control(struct wm_softc *sc) 5603 { 5604 uint32_t reg; 5605 5606 switch (sc->sc_type) { 5607 case WM_T_82573: 5608 #if 0 5609 case WM_T_82574: 5610 /* 5611 * FreeBSD's em driver has the function for 82574 to checks 5612 * the management mode, but it's not used. Why? 5613 */ 5614 #endif 5615 reg = CSR_READ(sc, WMREG_SWSM); 5616 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD); 5617 break; 5618 case WM_T_82571: 5619 case WM_T_82572: 5620 case WM_T_80003: 5621 case WM_T_ICH8: 5622 case WM_T_ICH9: 5623 case WM_T_ICH10: 5624 reg = CSR_READ(sc, WMREG_CTRL_EXT); 5625 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD); 5626 break; 5627 default: 5628 break; 5629 } 5630 } 5631 5632 /* XXX Currently TBI only */ 5633 static int 5634 wm_check_for_link(struct wm_softc *sc) 5635 { 5636 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 5637 uint32_t rxcw; 5638 uint32_t ctrl; 5639 uint32_t status; 5640 uint32_t sig; 5641 5642 rxcw = CSR_READ(sc, WMREG_RXCW); 5643 ctrl = CSR_READ(sc, WMREG_CTRL); 5644 status = CSR_READ(sc, WMREG_STATUS); 5645 5646 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0; 5647 5648 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n", 5649 device_xname(sc->sc_dev), __func__, 5650 ((ctrl & CTRL_SWDPIN(1)) == sig), 5651 ((status & STATUS_LU) != 0), 5652 ((rxcw & RXCW_C) != 0) 5653 )); 5654 5655 /* 5656 * SWDPIN LU RXCW 5657 * 0 0 0 5658 * 0 0 1 (should not happen) 5659 * 0 1 0 (should not happen) 5660 * 0 1 1 (should not happen) 5661 * 1 0 0 Disable autonego and force linkup 5662 * 1 0 1 got /C/ but not linkup yet 5663 * 1 1 0 (linkup) 5664 * 1 1 1 If IFM_AUTO, back to autonego 5665 * 5666 */ 5667 if (((ctrl & CTRL_SWDPIN(1)) == sig) 5668 && ((status & STATUS_LU) == 0) 5669 && ((rxcw & RXCW_C) == 0)) { 5670 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n", 5671 __func__)); 5672 sc->sc_tbi_linkup = 0; 5673 /* Disable auto-negotiation in the TXCW register */ 5674 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE)); 5675 5676 /* 5677 * Force link-up and also force full-duplex. 5678 * 5679 * NOTE: CTRL was updated TFCE and RFCE automatically, 5680 * so we should update sc->sc_ctrl 5681 */ 5682 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD; 5683 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 5684 } else if(((status & STATUS_LU) != 0) 5685 && ((rxcw & RXCW_C) != 0) 5686 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) { 5687 sc->sc_tbi_linkup = 1; 5688 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n", 5689 __func__)); 5690 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); 5691 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU)); 5692 } else if (((ctrl & CTRL_SWDPIN(1)) == sig) 5693 && ((rxcw & RXCW_C) != 0)) { 5694 DPRINTF(WM_DEBUG_LINK, ("/C/")); 5695 } else { 5696 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl, 5697 status)); 5698 } 5699 5700 return 0; 5701 } 5702