1 /* $NetBSD: if_wm.c,v 1.150 2007/12/14 00:23:49 tls Exp $ */ 2 3 /* 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /******************************************************************************* 39 40 Copyright (c) 2001-2005, Intel Corporation 41 All rights reserved. 42 43 Redistribution and use in source and binary forms, with or without 44 modification, are permitted provided that the following conditions are met: 45 46 1. Redistributions of source code must retain the above copyright notice, 47 this list of conditions and the following disclaimer. 48 49 2. Redistributions in binary form must reproduce the above copyright 50 notice, this list of conditions and the following disclaimer in the 51 documentation and/or other materials provided with the distribution. 52 53 3. Neither the name of the Intel Corporation nor the names of its 54 contributors may be used to endorse or promote products derived from 55 this software without specific prior written permission. 56 57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 67 POSSIBILITY OF SUCH DAMAGE. 68 69 *******************************************************************************/ 70 /* 71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips. 72 * 73 * TODO (in order of importance): 74 * 75 * - Rework how parameters are loaded from the EEPROM. 76 * - Figure out what to do with the i82545GM and i82546GB 77 * SERDES controllers. 78 * - Fix hw VLAN assist. 79 */ 80 81 #include <sys/cdefs.h> 82 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.150 2007/12/14 00:23:49 tls Exp $"); 83 84 #include "bpfilter.h" 85 #include "rnd.h" 86 87 #include <sys/param.h> 88 #include <sys/systm.h> 89 #include <sys/callout.h> 90 #include <sys/mbuf.h> 91 #include <sys/malloc.h> 92 #include <sys/kernel.h> 93 #include <sys/socket.h> 94 #include <sys/ioctl.h> 95 #include <sys/errno.h> 96 #include <sys/device.h> 97 #include <sys/queue.h> 98 #include <sys/syslog.h> 99 100 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */ 101 102 #if NRND > 0 103 #include <sys/rnd.h> 104 #endif 105 106 #include <net/if.h> 107 #include <net/if_dl.h> 108 #include <net/if_media.h> 109 #include <net/if_ether.h> 110 111 #if NBPFILTER > 0 112 #include <net/bpf.h> 113 #endif 114 115 #include <netinet/in.h> /* XXX for struct ip */ 116 #include <netinet/in_systm.h> /* XXX for struct ip */ 117 #include <netinet/ip.h> /* XXX for struct ip */ 118 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */ 119 #include <netinet/tcp.h> /* XXX for struct tcphdr */ 120 121 #include <sys/bus.h> 122 #include <sys/intr.h> 123 #include <machine/endian.h> 124 125 #include <dev/mii/mii.h> 126 #include <dev/mii/miivar.h> 127 #include <dev/mii/mii_bitbang.h> 128 #include <dev/mii/ikphyreg.h> 129 130 #include <dev/pci/pcireg.h> 131 #include <dev/pci/pcivar.h> 132 #include <dev/pci/pcidevs.h> 133 134 #include <dev/pci/if_wmreg.h> 135 136 #ifdef WM_DEBUG 137 #define WM_DEBUG_LINK 0x01 138 #define WM_DEBUG_TX 0x02 139 #define WM_DEBUG_RX 0x04 140 #define WM_DEBUG_GMII 0x08 141 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII; 142 143 #define DPRINTF(x, y) if (wm_debug & (x)) printf y 144 #else 145 #define DPRINTF(x, y) /* nothing */ 146 #endif /* WM_DEBUG */ 147 148 /* 149 * Transmit descriptor list size. Due to errata, we can only have 150 * 256 hardware descriptors in the ring on < 82544, but we use 4096 151 * on >= 82544. We tell the upper layers that they can queue a lot 152 * of packets, and we go ahead and manage up to 64 (16 for the i82547) 153 * of them at a time. 154 * 155 * We allow up to 256 (!) DMA segments per packet. Pathological packet 156 * chains containing many small mbufs have been observed in zero-copy 157 * situations with jumbo frames. 158 */ 159 #define WM_NTXSEGS 256 160 #define WM_IFQUEUELEN 256 161 #define WM_TXQUEUELEN_MAX 64 162 #define WM_TXQUEUELEN_MAX_82547 16 163 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum) 164 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1) 165 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8) 166 #define WM_NTXDESC_82542 256 167 #define WM_NTXDESC_82544 4096 168 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc) 169 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1) 170 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t)) 171 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc)) 172 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc)) 173 174 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */ 175 176 /* 177 * Receive descriptor list size. We have one Rx buffer for normal 178 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized 179 * packet. We allocate 256 receive descriptors, each with a 2k 180 * buffer (MCLBYTES), which gives us room for 50 jumbo packets. 181 */ 182 #define WM_NRXDESC 256 183 #define WM_NRXDESC_MASK (WM_NRXDESC - 1) 184 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) 185 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) 186 187 /* 188 * Control structures are DMA'd to the i82542 chip. We allocate them in 189 * a single clump that maps to a single DMA segment to make several things 190 * easier. 191 */ 192 struct wm_control_data_82544 { 193 /* 194 * The receive descriptors. 195 */ 196 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; 197 198 /* 199 * The transmit descriptors. Put these at the end, because 200 * we might use a smaller number of them. 201 */ 202 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544]; 203 }; 204 205 struct wm_control_data_82542 { 206 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; 207 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542]; 208 }; 209 210 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x) 211 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)]) 212 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)]) 213 214 /* 215 * Software state for transmit jobs. 216 */ 217 struct wm_txsoft { 218 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 219 bus_dmamap_t txs_dmamap; /* our DMA map */ 220 int txs_firstdesc; /* first descriptor in packet */ 221 int txs_lastdesc; /* last descriptor in packet */ 222 int txs_ndesc; /* # of descriptors used */ 223 }; 224 225 /* 226 * Software state for receive buffers. Each descriptor gets a 227 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill 228 * more than one buffer, we chain them together. 229 */ 230 struct wm_rxsoft { 231 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 232 bus_dmamap_t rxs_dmamap; /* our DMA map */ 233 }; 234 235 typedef enum { 236 WM_T_unknown = 0, 237 WM_T_82542_2_0, /* i82542 2.0 (really old) */ 238 WM_T_82542_2_1, /* i82542 2.1+ (old) */ 239 WM_T_82543, /* i82543 */ 240 WM_T_82544, /* i82544 */ 241 WM_T_82540, /* i82540 */ 242 WM_T_82545, /* i82545 */ 243 WM_T_82545_3, /* i82545 3.0+ */ 244 WM_T_82546, /* i82546 */ 245 WM_T_82546_3, /* i82546 3.0+ */ 246 WM_T_82541, /* i82541 */ 247 WM_T_82541_2, /* i82541 2.0+ */ 248 WM_T_82547, /* i82547 */ 249 WM_T_82547_2, /* i82547 2.0+ */ 250 WM_T_82571, /* i82571 */ 251 WM_T_82572, /* i82572 */ 252 WM_T_82573, /* i82573 */ 253 WM_T_80003, /* i80003 */ 254 WM_T_ICH8, /* ICH8 LAN */ 255 WM_T_ICH9, /* ICH9 LAN */ 256 } wm_chip_type; 257 258 /* 259 * Software state per device. 260 */ 261 struct wm_softc { 262 struct device sc_dev; /* generic device information */ 263 bus_space_tag_t sc_st; /* bus space tag */ 264 bus_space_handle_t sc_sh; /* bus space handle */ 265 bus_space_tag_t sc_iot; /* I/O space tag */ 266 bus_space_handle_t sc_ioh; /* I/O space handle */ 267 bus_space_tag_t sc_flasht; /* flash registers space tag */ 268 bus_space_handle_t sc_flashh; /* flash registers space handle */ 269 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 270 struct ethercom sc_ethercom; /* ethernet common data */ 271 pci_chipset_tag_t sc_pc; 272 pcitag_t sc_pcitag; 273 274 wm_chip_type sc_type; /* chip type */ 275 int sc_flags; /* flags; see below */ 276 int sc_bus_speed; /* PCI/PCIX bus speed */ 277 int sc_pcix_offset; /* PCIX capability register offset */ 278 int sc_flowflags; /* 802.3x flow control flags */ 279 280 void *sc_ih; /* interrupt cookie */ 281 282 int sc_ee_addrbits; /* EEPROM address bits */ 283 284 struct mii_data sc_mii; /* MII/media information */ 285 286 callout_t sc_tick_ch; /* tick callout */ 287 288 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 289 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 290 291 int sc_align_tweak; 292 293 /* 294 * Software state for the transmit and receive descriptors. 295 */ 296 int sc_txnum; /* must be a power of two */ 297 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX]; 298 struct wm_rxsoft sc_rxsoft[WM_NRXDESC]; 299 300 /* 301 * Control data structures. 302 */ 303 int sc_ntxdesc; /* must be a power of two */ 304 struct wm_control_data_82544 *sc_control_data; 305 #define sc_txdescs sc_control_data->wcd_txdescs 306 #define sc_rxdescs sc_control_data->wcd_rxdescs 307 308 #ifdef WM_EVENT_COUNTERS 309 /* Event counters. */ 310 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */ 311 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */ 312 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */ 313 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */ 314 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */ 315 struct evcnt sc_ev_rxintr; /* Rx interrupts */ 316 struct evcnt sc_ev_linkintr; /* Link interrupts */ 317 318 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */ 319 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */ 320 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */ 321 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */ 322 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */ 323 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */ 324 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */ 325 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */ 326 327 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ 328 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */ 329 330 struct evcnt sc_ev_tu; /* Tx underrun */ 331 332 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ 333 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ 334 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ 335 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ 336 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ 337 #endif /* WM_EVENT_COUNTERS */ 338 339 bus_addr_t sc_tdt_reg; /* offset of TDT register */ 340 341 int sc_txfree; /* number of free Tx descriptors */ 342 int sc_txnext; /* next ready Tx descriptor */ 343 344 int sc_txsfree; /* number of free Tx jobs */ 345 int sc_txsnext; /* next free Tx job */ 346 int sc_txsdirty; /* dirty Tx jobs */ 347 348 /* These 5 variables are used only on the 82547. */ 349 int sc_txfifo_size; /* Tx FIFO size */ 350 int sc_txfifo_head; /* current head of FIFO */ 351 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */ 352 int sc_txfifo_stall; /* Tx FIFO is stalled */ 353 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */ 354 355 bus_addr_t sc_rdt_reg; /* offset of RDT register */ 356 357 int sc_rxptr; /* next ready Rx descriptor/queue ent */ 358 int sc_rxdiscard; 359 int sc_rxlen; 360 struct mbuf *sc_rxhead; 361 struct mbuf *sc_rxtail; 362 struct mbuf **sc_rxtailp; 363 364 uint32_t sc_ctrl; /* prototype CTRL register */ 365 #if 0 366 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ 367 #endif 368 uint32_t sc_icr; /* prototype interrupt bits */ 369 uint32_t sc_itr; /* prototype intr throttling reg */ 370 uint32_t sc_tctl; /* prototype TCTL register */ 371 uint32_t sc_rctl; /* prototype RCTL register */ 372 uint32_t sc_txcw; /* prototype TXCW register */ 373 uint32_t sc_tipg; /* prototype TIPG register */ 374 uint32_t sc_fcrtl; /* prototype FCRTL register */ 375 uint32_t sc_pba; /* prototype PBA register */ 376 377 int sc_tbi_linkup; /* TBI link status */ 378 int sc_tbi_anstate; /* autonegotiation state */ 379 380 int sc_mchash_type; /* multicast filter offset */ 381 382 #if NRND > 0 383 rndsource_element_t rnd_source; /* random source */ 384 #endif 385 int sc_ich8_flash_base; 386 int sc_ich8_flash_bank_size; 387 }; 388 389 #define WM_RXCHAIN_RESET(sc) \ 390 do { \ 391 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \ 392 *(sc)->sc_rxtailp = NULL; \ 393 (sc)->sc_rxlen = 0; \ 394 } while (/*CONSTCOND*/0) 395 396 #define WM_RXCHAIN_LINK(sc, m) \ 397 do { \ 398 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \ 399 (sc)->sc_rxtailp = &(m)->m_next; \ 400 } while (/*CONSTCOND*/0) 401 402 /* sc_flags */ 403 #define WM_F_HAS_MII 0x0001 /* has MII */ 404 #define WM_F_EEPROM_HANDSHAKE 0x0002 /* requires EEPROM handshake */ 405 #define WM_F_EEPROM_SEMAPHORE 0x0004 /* EEPROM with semaphore */ 406 #define WM_F_EEPROM_EERDEEWR 0x0008 /* EEPROM access via EERD/EEWR */ 407 #define WM_F_EEPROM_SPI 0x0010 /* EEPROM is SPI */ 408 #define WM_F_EEPROM_FLASH 0x0020 /* EEPROM is FLASH */ 409 #define WM_F_EEPROM_INVALID 0x0040 /* EEPROM not present (bad checksum) */ 410 #define WM_F_IOH_VALID 0x0080 /* I/O handle is valid */ 411 #define WM_F_BUS64 0x0100 /* bus is 64-bit */ 412 #define WM_F_PCIX 0x0200 /* bus is PCI-X */ 413 #define WM_F_CSA 0x0400 /* bus is CSA */ 414 #define WM_F_PCIE 0x0800 /* bus is PCI-Express */ 415 #define WM_F_SWFW_SYNC 0x1000 /* Software-Firmware synchronisation */ 416 #define WM_F_SWFWHW_SYNC 0x2000 /* Software-Firmware synchronisation */ 417 418 #ifdef WM_EVENT_COUNTERS 419 #define WM_EVCNT_INCR(ev) (ev)->ev_count++ 420 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val) 421 #else 422 #define WM_EVCNT_INCR(ev) /* nothing */ 423 #define WM_EVCNT_ADD(ev, val) /* nothing */ 424 #endif 425 426 #define CSR_READ(sc, reg) \ 427 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) 428 #define CSR_WRITE(sc, reg, val) \ 429 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) 430 #define CSR_WRITE_FLUSH(sc) \ 431 (void) CSR_READ((sc), WMREG_STATUS) 432 433 #define ICH8_FLASH_READ32(sc, reg) \ 434 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg)) 435 #define ICH8_FLASH_WRITE32(sc, reg, data) \ 436 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) 437 438 #define ICH8_FLASH_READ16(sc, reg) \ 439 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg)) 440 #define ICH8_FLASH_WRITE16(sc, reg, data) \ 441 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) 442 443 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x))) 444 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x))) 445 446 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU) 447 #define WM_CDTXADDR_HI(sc, x) \ 448 (sizeof(bus_addr_t) == 8 ? \ 449 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0) 450 451 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU) 452 #define WM_CDRXADDR_HI(sc, x) \ 453 (sizeof(bus_addr_t) == 8 ? \ 454 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0) 455 456 #define WM_CDTXSYNC(sc, x, n, ops) \ 457 do { \ 458 int __x, __n; \ 459 \ 460 __x = (x); \ 461 __n = (n); \ 462 \ 463 /* If it will wrap around, sync to the end of the ring. */ \ 464 if ((__x + __n) > WM_NTXDESC(sc)) { \ 465 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 466 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \ 467 (WM_NTXDESC(sc) - __x), (ops)); \ 468 __n -= (WM_NTXDESC(sc) - __x); \ 469 __x = 0; \ 470 } \ 471 \ 472 /* Now sync whatever is left. */ \ 473 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 474 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \ 475 } while (/*CONSTCOND*/0) 476 477 #define WM_CDRXSYNC(sc, x, ops) \ 478 do { \ 479 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 480 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \ 481 } while (/*CONSTCOND*/0) 482 483 #define WM_INIT_RXDESC(sc, x) \ 484 do { \ 485 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 486 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \ 487 struct mbuf *__m = __rxs->rxs_mbuf; \ 488 \ 489 /* \ 490 * Note: We scoot the packet forward 2 bytes in the buffer \ 491 * so that the payload after the Ethernet header is aligned \ 492 * to a 4-byte boundary. \ 493 * \ 494 * XXX BRAINDAMAGE ALERT! \ 495 * The stupid chip uses the same size for every buffer, which \ 496 * is set in the Receive Control register. We are using the 2K \ 497 * size option, but what we REALLY want is (2K - 2)! For this \ 498 * reason, we can't "scoot" packets longer than the standard \ 499 * Ethernet MTU. On strict-alignment platforms, if the total \ 500 * size exceeds (2K - 2) we set align_tweak to 0 and let \ 501 * the upper layer copy the headers. \ 502 */ \ 503 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \ 504 \ 505 wm_set_dma_addr(&__rxd->wrx_addr, \ 506 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \ 507 __rxd->wrx_len = 0; \ 508 __rxd->wrx_cksum = 0; \ 509 __rxd->wrx_status = 0; \ 510 __rxd->wrx_errors = 0; \ 511 __rxd->wrx_special = 0; \ 512 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 513 \ 514 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \ 515 } while (/*CONSTCOND*/0) 516 517 static void wm_start(struct ifnet *); 518 static void wm_watchdog(struct ifnet *); 519 static int wm_ioctl(struct ifnet *, u_long, void *); 520 static int wm_init(struct ifnet *); 521 static void wm_stop(struct ifnet *, int); 522 523 static void wm_reset(struct wm_softc *); 524 static void wm_rxdrain(struct wm_softc *); 525 static int wm_add_rxbuf(struct wm_softc *, int); 526 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *); 527 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *); 528 static int wm_validate_eeprom_checksum(struct wm_softc *); 529 static void wm_tick(void *); 530 531 static void wm_set_filter(struct wm_softc *); 532 533 static int wm_intr(void *); 534 static void wm_txintr(struct wm_softc *); 535 static void wm_rxintr(struct wm_softc *); 536 static void wm_linkintr(struct wm_softc *, uint32_t); 537 538 static void wm_tbi_mediainit(struct wm_softc *); 539 static int wm_tbi_mediachange(struct ifnet *); 540 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); 541 542 static void wm_tbi_set_linkled(struct wm_softc *); 543 static void wm_tbi_check_link(struct wm_softc *); 544 545 static void wm_gmii_reset(struct wm_softc *); 546 547 static int wm_gmii_i82543_readreg(struct device *, int, int); 548 static void wm_gmii_i82543_writereg(struct device *, int, int, int); 549 550 static int wm_gmii_i82544_readreg(struct device *, int, int); 551 static void wm_gmii_i82544_writereg(struct device *, int, int, int); 552 553 static int wm_gmii_i80003_readreg(struct device *, int, int); 554 static void wm_gmii_i80003_writereg(struct device *, int, int, int); 555 556 static void wm_gmii_statchg(struct device *); 557 558 static void wm_gmii_mediainit(struct wm_softc *); 559 static int wm_gmii_mediachange(struct ifnet *); 560 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); 561 562 static int wm_kmrn_i80003_readreg(struct wm_softc *, int); 563 static void wm_kmrn_i80003_writereg(struct wm_softc *, int, int); 564 565 static int wm_match(struct device *, struct cfdata *, void *); 566 static void wm_attach(struct device *, struct device *, void *); 567 static int wm_is_onboard_nvm_eeprom(struct wm_softc *); 568 static void wm_get_auto_rd_done(struct wm_softc *); 569 static int wm_get_swsm_semaphore(struct wm_softc *); 570 static void wm_put_swsm_semaphore(struct wm_softc *); 571 static int wm_poll_eerd_eewr_done(struct wm_softc *, int); 572 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t); 573 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t); 574 static int wm_get_swfwhw_semaphore(struct wm_softc *); 575 static void wm_put_swfwhw_semaphore(struct wm_softc *); 576 577 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *); 578 static int32_t wm_ich8_cycle_init(struct wm_softc *); 579 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t); 580 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, 581 uint32_t, uint16_t *); 582 static int32_t wm_read_ich8_word(struct wm_softc *sc, uint32_t, uint16_t *); 583 584 CFATTACH_DECL(wm, sizeof(struct wm_softc), 585 wm_match, wm_attach, NULL, NULL); 586 587 static void wm_82547_txfifo_stall(void *); 588 589 /* 590 * Devices supported by this driver. 591 */ 592 static const struct wm_product { 593 pci_vendor_id_t wmp_vendor; 594 pci_product_id_t wmp_product; 595 const char *wmp_name; 596 wm_chip_type wmp_type; 597 int wmp_flags; 598 #define WMP_F_1000X 0x01 599 #define WMP_F_1000T 0x02 600 } wm_products[] = { 601 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, 602 "Intel i82542 1000BASE-X Ethernet", 603 WM_T_82542_2_1, WMP_F_1000X }, 604 605 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, 606 "Intel i82543GC 1000BASE-X Ethernet", 607 WM_T_82543, WMP_F_1000X }, 608 609 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, 610 "Intel i82543GC 1000BASE-T Ethernet", 611 WM_T_82543, WMP_F_1000T }, 612 613 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, 614 "Intel i82544EI 1000BASE-T Ethernet", 615 WM_T_82544, WMP_F_1000T }, 616 617 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, 618 "Intel i82544EI 1000BASE-X Ethernet", 619 WM_T_82544, WMP_F_1000X }, 620 621 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, 622 "Intel i82544GC 1000BASE-T Ethernet", 623 WM_T_82544, WMP_F_1000T }, 624 625 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, 626 "Intel i82544GC (LOM) 1000BASE-T Ethernet", 627 WM_T_82544, WMP_F_1000T }, 628 629 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, 630 "Intel i82540EM 1000BASE-T Ethernet", 631 WM_T_82540, WMP_F_1000T }, 632 633 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM, 634 "Intel i82540EM (LOM) 1000BASE-T Ethernet", 635 WM_T_82540, WMP_F_1000T }, 636 637 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM, 638 "Intel i82540EP 1000BASE-T Ethernet", 639 WM_T_82540, WMP_F_1000T }, 640 641 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP, 642 "Intel i82540EP 1000BASE-T Ethernet", 643 WM_T_82540, WMP_F_1000T }, 644 645 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP, 646 "Intel i82540EP 1000BASE-T Ethernet", 647 WM_T_82540, WMP_F_1000T }, 648 649 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER, 650 "Intel i82545EM 1000BASE-T Ethernet", 651 WM_T_82545, WMP_F_1000T }, 652 653 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER, 654 "Intel i82545GM 1000BASE-T Ethernet", 655 WM_T_82545_3, WMP_F_1000T }, 656 657 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER, 658 "Intel i82545GM 1000BASE-X Ethernet", 659 WM_T_82545_3, WMP_F_1000X }, 660 #if 0 661 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES, 662 "Intel i82545GM Gigabit Ethernet (SERDES)", 663 WM_T_82545_3, WMP_F_SERDES }, 664 #endif 665 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER, 666 "Intel i82546EB 1000BASE-T Ethernet", 667 WM_T_82546, WMP_F_1000T }, 668 669 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD, 670 "Intel i82546EB 1000BASE-T Ethernet", 671 WM_T_82546, WMP_F_1000T }, 672 673 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER, 674 "Intel i82545EM 1000BASE-X Ethernet", 675 WM_T_82545, WMP_F_1000X }, 676 677 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER, 678 "Intel i82546EB 1000BASE-X Ethernet", 679 WM_T_82546, WMP_F_1000X }, 680 681 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER, 682 "Intel i82546GB 1000BASE-T Ethernet", 683 WM_T_82546_3, WMP_F_1000T }, 684 685 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER, 686 "Intel i82546GB 1000BASE-X Ethernet", 687 WM_T_82546_3, WMP_F_1000X }, 688 #if 0 689 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES, 690 "Intel i82546GB Gigabit Ethernet (SERDES)", 691 WM_T_82546_3, WMP_F_SERDES }, 692 #endif 693 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER, 694 "i82546GB quad-port Gigabit Ethernet", 695 WM_T_82546_3, WMP_F_1000T }, 696 697 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3, 698 "i82546GB quad-port Gigabit Ethernet (KSP3)", 699 WM_T_82546_3, WMP_F_1000T }, 700 701 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE, 702 "Intel PRO/1000MT (82546GB)", 703 WM_T_82546_3, WMP_F_1000T }, 704 705 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI, 706 "Intel i82541EI 1000BASE-T Ethernet", 707 WM_T_82541, WMP_F_1000T }, 708 709 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM, 710 "Intel i82541ER (LOM) 1000BASE-T Ethernet", 711 WM_T_82541, WMP_F_1000T }, 712 713 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE, 714 "Intel i82541EI Mobile 1000BASE-T Ethernet", 715 WM_T_82541, WMP_F_1000T }, 716 717 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER, 718 "Intel i82541ER 1000BASE-T Ethernet", 719 WM_T_82541_2, WMP_F_1000T }, 720 721 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI, 722 "Intel i82541GI 1000BASE-T Ethernet", 723 WM_T_82541_2, WMP_F_1000T }, 724 725 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE, 726 "Intel i82541GI Mobile 1000BASE-T Ethernet", 727 WM_T_82541_2, WMP_F_1000T }, 728 729 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI, 730 "Intel i82541PI 1000BASE-T Ethernet", 731 WM_T_82541_2, WMP_F_1000T }, 732 733 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI, 734 "Intel i82547EI 1000BASE-T Ethernet", 735 WM_T_82547, WMP_F_1000T }, 736 737 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE, 738 "Intel i82547EI Mobile 1000BASE-T Ethernet", 739 WM_T_82547, WMP_F_1000T }, 740 741 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI, 742 "Intel i82547GI 1000BASE-T Ethernet", 743 WM_T_82547_2, WMP_F_1000T }, 744 745 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER, 746 "Intel PRO/1000 PT (82571EB)", 747 WM_T_82571, WMP_F_1000T }, 748 749 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER, 750 "Intel PRO/1000 PF (82571EB)", 751 WM_T_82571, WMP_F_1000X }, 752 #if 0 753 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES, 754 "Intel PRO/1000 PB (82571EB)", 755 WM_T_82571, WMP_F_SERDES }, 756 #endif 757 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER, 758 "Intel PRO/1000 QT (82571EB)", 759 WM_T_82571, WMP_F_1000T }, 760 761 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER, 762 "Intel i82572EI 1000baseT Ethernet", 763 WM_T_82572, WMP_F_1000T }, 764 765 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER, 766 "Intel i82572EI 1000baseX Ethernet", 767 WM_T_82572, WMP_F_1000X }, 768 #if 0 769 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES, 770 "Intel i82572EI Gigabit Ethernet (SERDES)", 771 WM_T_82572, WMP_F_SERDES }, 772 #endif 773 774 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI, 775 "Intel i82572EI 1000baseT Ethernet", 776 WM_T_82572, WMP_F_1000T }, 777 778 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E, 779 "Intel i82573E", 780 WM_T_82573, WMP_F_1000T }, 781 782 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT, 783 "Intel i82573E IAMT", 784 WM_T_82573, WMP_F_1000T }, 785 786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L, 787 "Intel i82573L Gigabit Ethernet", 788 WM_T_82573, WMP_F_1000T }, 789 790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT, 791 "i80003 dual 1000baseT Ethernet", 792 WM_T_80003, WMP_F_1000T }, 793 794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT, 795 "i80003 dual 1000baseX Ethernet", 796 WM_T_80003, WMP_F_1000T }, 797 #if 0 798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT, 799 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)", 800 WM_T_80003, WMP_F_SERDES }, 801 #endif 802 803 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT, 804 "Intel i80003 1000baseT Ethernet", 805 WM_T_80003, WMP_F_1000T }, 806 #if 0 807 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT, 808 "Intel i80003 Gigabit Ethernet (SERDES)", 809 WM_T_80003, WMP_F_SERDES }, 810 #endif 811 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT, 812 "Intel i82801H (M_AMT) LAN Controller", 813 WM_T_ICH8, WMP_F_1000T }, 814 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT, 815 "Intel i82801H (AMT) LAN Controller", 816 WM_T_ICH8, WMP_F_1000T }, 817 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN, 818 "Intel i82801H LAN Controller", 819 WM_T_ICH8, WMP_F_1000T }, 820 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN, 821 "Intel i82801H (IFE) LAN Controller", 822 WM_T_ICH8, WMP_F_1000T }, 823 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN, 824 "Intel i82801H (M) LAN Controller", 825 WM_T_ICH8, WMP_F_1000T }, 826 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT, 827 "Intel i82801H IFE (GT) LAN Controller", 828 WM_T_ICH8, WMP_F_1000T }, 829 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G, 830 "Intel i82801H IFE (G) LAN Controller", 831 WM_T_ICH8, WMP_F_1000T }, 832 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT, 833 "82801I (AMT) LAN Controller", 834 WM_T_ICH9, WMP_F_1000T }, 835 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE, 836 "82801I LAN Controller", 837 WM_T_ICH9, WMP_F_1000T }, 838 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G, 839 "82801I (G) LAN Controller", 840 WM_T_ICH9, WMP_F_1000T }, 841 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT, 842 "82801I (GT) LAN Controller", 843 WM_T_ICH9, WMP_F_1000T }, 844 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C, 845 "82801I (C) LAN Controller", 846 WM_T_ICH9, WMP_F_1000T }, 847 { 0, 0, 848 NULL, 849 0, 0 }, 850 }; 851 852 #ifdef WM_EVENT_COUNTERS 853 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")]; 854 #endif /* WM_EVENT_COUNTERS */ 855 856 #if 0 /* Not currently used */ 857 static inline uint32_t 858 wm_io_read(struct wm_softc *sc, int reg) 859 { 860 861 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 862 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4)); 863 } 864 #endif 865 866 static inline void 867 wm_io_write(struct wm_softc *sc, int reg, uint32_t val) 868 { 869 870 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 871 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val); 872 } 873 874 static inline void 875 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v) 876 { 877 wa->wa_low = htole32(v & 0xffffffffU); 878 if (sizeof(bus_addr_t) == 8) 879 wa->wa_high = htole32((uint64_t) v >> 32); 880 else 881 wa->wa_high = 0; 882 } 883 884 static const struct wm_product * 885 wm_lookup(const struct pci_attach_args *pa) 886 { 887 const struct wm_product *wmp; 888 889 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) { 890 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor && 891 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product) 892 return (wmp); 893 } 894 return (NULL); 895 } 896 897 static int 898 wm_match(struct device *parent, struct cfdata *cf, void *aux) 899 { 900 struct pci_attach_args *pa = aux; 901 902 if (wm_lookup(pa) != NULL) 903 return (1); 904 905 return (0); 906 } 907 908 static void 909 wm_attach(struct device *parent, struct device *self, void *aux) 910 { 911 struct wm_softc *sc = (void *) self; 912 struct pci_attach_args *pa = aux; 913 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 914 pci_chipset_tag_t pc = pa->pa_pc; 915 pci_intr_handle_t ih; 916 size_t cdata_size; 917 const char *intrstr = NULL; 918 const char *eetype; 919 bus_space_tag_t memt; 920 bus_space_handle_t memh; 921 bus_dma_segment_t seg; 922 int memh_valid; 923 int i, rseg, error; 924 const struct wm_product *wmp; 925 prop_data_t ea; 926 prop_number_t pn; 927 uint8_t enaddr[ETHER_ADDR_LEN]; 928 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin; 929 pcireg_t preg, memtype; 930 uint32_t reg; 931 932 callout_init(&sc->sc_tick_ch, 0); 933 934 wmp = wm_lookup(pa); 935 if (wmp == NULL) { 936 printf("\n"); 937 panic("wm_attach: impossible"); 938 } 939 940 sc->sc_pc = pa->pa_pc; 941 sc->sc_pcitag = pa->pa_tag; 942 943 if (pci_dma64_available(pa)) 944 sc->sc_dmat = pa->pa_dmat64; 945 else 946 sc->sc_dmat = pa->pa_dmat; 947 948 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG)); 949 aprint_naive(": Ethernet controller\n"); 950 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg); 951 952 sc->sc_type = wmp->wmp_type; 953 if (sc->sc_type < WM_T_82543) { 954 if (preg < 2) { 955 aprint_error("%s: i82542 must be at least rev. 2\n", 956 sc->sc_dev.dv_xname); 957 return; 958 } 959 if (preg < 3) 960 sc->sc_type = WM_T_82542_2_0; 961 } 962 963 /* 964 * Map the device. All devices support memory-mapped acccess, 965 * and it is really required for normal operation. 966 */ 967 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA); 968 switch (memtype) { 969 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 970 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 971 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA, 972 memtype, 0, &memt, &memh, NULL, NULL) == 0); 973 break; 974 default: 975 memh_valid = 0; 976 } 977 978 if (memh_valid) { 979 sc->sc_st = memt; 980 sc->sc_sh = memh; 981 } else { 982 aprint_error("%s: unable to map device registers\n", 983 sc->sc_dev.dv_xname); 984 return; 985 } 986 987 /* 988 * In addition, i82544 and later support I/O mapped indirect 989 * register access. It is not desirable (nor supported in 990 * this driver) to use it for normal operation, though it is 991 * required to work around bugs in some chip versions. 992 */ 993 if (sc->sc_type >= WM_T_82544) { 994 /* First we have to find the I/O BAR. */ 995 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) { 996 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) == 997 PCI_MAPREG_TYPE_IO) 998 break; 999 } 1000 if (i == PCI_MAPREG_END) 1001 aprint_error("%s: WARNING: unable to find I/O BAR\n", 1002 sc->sc_dev.dv_xname); 1003 else { 1004 /* 1005 * The i8254x doesn't apparently respond when the 1006 * I/O BAR is 0, which looks somewhat like it's not 1007 * been configured. 1008 */ 1009 preg = pci_conf_read(pc, pa->pa_tag, i); 1010 if (PCI_MAPREG_MEM_ADDR(preg) == 0) { 1011 aprint_error("%s: WARNING: I/O BAR at zero.\n", 1012 sc->sc_dev.dv_xname); 1013 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO, 1014 0, &sc->sc_iot, &sc->sc_ioh, 1015 NULL, NULL) == 0) { 1016 sc->sc_flags |= WM_F_IOH_VALID; 1017 } else { 1018 aprint_error("%s: WARNING: unable to map " 1019 "I/O space\n", sc->sc_dev.dv_xname); 1020 } 1021 } 1022 1023 } 1024 1025 /* Enable bus mastering. Disable MWI on the i82542 2.0. */ 1026 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 1027 preg |= PCI_COMMAND_MASTER_ENABLE; 1028 if (sc->sc_type < WM_T_82542_2_1) 1029 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE; 1030 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg); 1031 1032 /* power up chip */ 1033 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, sc, 1034 NULL)) && error != EOPNOTSUPP) { 1035 aprint_error("%s: cannot activate %d\n", sc->sc_dev.dv_xname, 1036 error); 1037 return; 1038 } 1039 1040 /* 1041 * Map and establish our interrupt. 1042 */ 1043 if (pci_intr_map(pa, &ih)) { 1044 aprint_error("%s: unable to map interrupt\n", 1045 sc->sc_dev.dv_xname); 1046 return; 1047 } 1048 intrstr = pci_intr_string(pc, ih); 1049 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc); 1050 if (sc->sc_ih == NULL) { 1051 aprint_error("%s: unable to establish interrupt", 1052 sc->sc_dev.dv_xname); 1053 if (intrstr != NULL) 1054 aprint_normal(" at %s", intrstr); 1055 aprint_normal("\n"); 1056 return; 1057 } 1058 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr); 1059 1060 /* 1061 * Determine a few things about the bus we're connected to. 1062 */ 1063 if (sc->sc_type < WM_T_82543) { 1064 /* We don't really know the bus characteristics here. */ 1065 sc->sc_bus_speed = 33; 1066 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) { 1067 /* 1068 * CSA (Communication Streaming Architecture) is about as fast 1069 * a 32-bit 66MHz PCI Bus. 1070 */ 1071 sc->sc_flags |= WM_F_CSA; 1072 sc->sc_bus_speed = 66; 1073 aprint_verbose("%s: Communication Streaming Architecture\n", 1074 sc->sc_dev.dv_xname); 1075 if (sc->sc_type == WM_T_82547) { 1076 callout_init(&sc->sc_txfifo_ch, 0); 1077 callout_setfunc(&sc->sc_txfifo_ch, 1078 wm_82547_txfifo_stall, sc); 1079 aprint_verbose("%s: using 82547 Tx FIFO stall " 1080 "work-around\n", sc->sc_dev.dv_xname); 1081 } 1082 } else if (sc->sc_type >= WM_T_82571) { 1083 sc->sc_flags |= WM_F_PCIE; 1084 if ((sc->sc_type != WM_T_ICH8) || (sc->sc_type != WM_T_ICH9)) 1085 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE; 1086 aprint_verbose("%s: PCI-Express bus\n", sc->sc_dev.dv_xname); 1087 } else { 1088 reg = CSR_READ(sc, WMREG_STATUS); 1089 if (reg & STATUS_BUS64) 1090 sc->sc_flags |= WM_F_BUS64; 1091 if (sc->sc_type >= WM_T_82544 && 1092 (reg & STATUS_PCIX_MODE) != 0) { 1093 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb; 1094 1095 sc->sc_flags |= WM_F_PCIX; 1096 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 1097 PCI_CAP_PCIX, 1098 &sc->sc_pcix_offset, NULL) == 0) 1099 aprint_error("%s: unable to find PCIX " 1100 "capability\n", sc->sc_dev.dv_xname); 1101 else if (sc->sc_type != WM_T_82545_3 && 1102 sc->sc_type != WM_T_82546_3) { 1103 /* 1104 * Work around a problem caused by the BIOS 1105 * setting the max memory read byte count 1106 * incorrectly. 1107 */ 1108 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, 1109 sc->sc_pcix_offset + PCI_PCIX_CMD); 1110 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag, 1111 sc->sc_pcix_offset + PCI_PCIX_STATUS); 1112 1113 bytecnt = 1114 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >> 1115 PCI_PCIX_CMD_BYTECNT_SHIFT; 1116 maxb = 1117 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >> 1118 PCI_PCIX_STATUS_MAXB_SHIFT; 1119 if (bytecnt > maxb) { 1120 aprint_verbose("%s: resetting PCI-X " 1121 "MMRBC: %d -> %d\n", 1122 sc->sc_dev.dv_xname, 1123 512 << bytecnt, 512 << maxb); 1124 pcix_cmd = (pcix_cmd & 1125 ~PCI_PCIX_CMD_BYTECNT_MASK) | 1126 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT); 1127 pci_conf_write(pa->pa_pc, pa->pa_tag, 1128 sc->sc_pcix_offset + PCI_PCIX_CMD, 1129 pcix_cmd); 1130 } 1131 } 1132 } 1133 /* 1134 * The quad port adapter is special; it has a PCIX-PCIX 1135 * bridge on the board, and can run the secondary bus at 1136 * a higher speed. 1137 */ 1138 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) { 1139 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120 1140 : 66; 1141 } else if (sc->sc_flags & WM_F_PCIX) { 1142 switch (reg & STATUS_PCIXSPD_MASK) { 1143 case STATUS_PCIXSPD_50_66: 1144 sc->sc_bus_speed = 66; 1145 break; 1146 case STATUS_PCIXSPD_66_100: 1147 sc->sc_bus_speed = 100; 1148 break; 1149 case STATUS_PCIXSPD_100_133: 1150 sc->sc_bus_speed = 133; 1151 break; 1152 default: 1153 aprint_error( 1154 "%s: unknown PCIXSPD %d; assuming 66MHz\n", 1155 sc->sc_dev.dv_xname, 1156 reg & STATUS_PCIXSPD_MASK); 1157 sc->sc_bus_speed = 66; 1158 } 1159 } else 1160 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33; 1161 aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname, 1162 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed, 1163 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI"); 1164 } 1165 1166 /* 1167 * Allocate the control data structures, and create and load the 1168 * DMA map for it. 1169 * 1170 * NOTE: All Tx descriptors must be in the same 4G segment of 1171 * memory. So must Rx descriptors. We simplify by allocating 1172 * both sets within the same 4G segment. 1173 */ 1174 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ? 1175 WM_NTXDESC_82542 : WM_NTXDESC_82544; 1176 cdata_size = sc->sc_type < WM_T_82544 ? 1177 sizeof(struct wm_control_data_82542) : 1178 sizeof(struct wm_control_data_82544); 1179 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE, 1180 (bus_size_t) 0x100000000ULL, 1181 &seg, 1, &rseg, 0)) != 0) { 1182 aprint_error( 1183 "%s: unable to allocate control data, error = %d\n", 1184 sc->sc_dev.dv_xname, error); 1185 goto fail_0; 1186 } 1187 1188 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size, 1189 (void **)&sc->sc_control_data, 0)) != 0) { 1190 aprint_error("%s: unable to map control data, error = %d\n", 1191 sc->sc_dev.dv_xname, error); 1192 goto fail_1; 1193 } 1194 1195 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size, 1196 0, 0, &sc->sc_cddmamap)) != 0) { 1197 aprint_error("%s: unable to create control data DMA map, " 1198 "error = %d\n", sc->sc_dev.dv_xname, error); 1199 goto fail_2; 1200 } 1201 1202 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 1203 sc->sc_control_data, cdata_size, NULL, 1204 0)) != 0) { 1205 aprint_error( 1206 "%s: unable to load control data DMA map, error = %d\n", 1207 sc->sc_dev.dv_xname, error); 1208 goto fail_3; 1209 } 1210 1211 1212 /* 1213 * Create the transmit buffer DMA maps. 1214 */ 1215 WM_TXQUEUELEN(sc) = 1216 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ? 1217 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX; 1218 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 1219 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA, 1220 WM_NTXSEGS, WTX_MAX_LEN, 0, 0, 1221 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 1222 aprint_error("%s: unable to create Tx DMA map %d, " 1223 "error = %d\n", sc->sc_dev.dv_xname, i, error); 1224 goto fail_4; 1225 } 1226 } 1227 1228 /* 1229 * Create the receive buffer DMA maps. 1230 */ 1231 for (i = 0; i < WM_NRXDESC; i++) { 1232 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1233 MCLBYTES, 0, 0, 1234 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 1235 aprint_error("%s: unable to create Rx DMA map %d, " 1236 "error = %d\n", sc->sc_dev.dv_xname, i, error); 1237 goto fail_5; 1238 } 1239 sc->sc_rxsoft[i].rxs_mbuf = NULL; 1240 } 1241 1242 /* clear interesting stat counters */ 1243 CSR_READ(sc, WMREG_COLC); 1244 CSR_READ(sc, WMREG_RXERRC); 1245 1246 /* 1247 * Reset the chip to a known state. 1248 */ 1249 wm_reset(sc); 1250 1251 /* 1252 * Get some information about the EEPROM. 1253 */ 1254 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) { 1255 uint32_t flash_size; 1256 sc->sc_flags |= WM_F_SWFWHW_SYNC | WM_F_EEPROM_FLASH; 1257 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH); 1258 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0, 1259 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) { 1260 printf("%s: can't map FLASH registers\n", 1261 sc->sc_dev.dv_xname); 1262 return; 1263 } 1264 flash_size = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG); 1265 sc->sc_ich8_flash_base = (flash_size & ICH_GFPREG_BASE_MASK) * 1266 ICH_FLASH_SECTOR_SIZE; 1267 sc->sc_ich8_flash_bank_size = 1268 ((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1; 1269 sc->sc_ich8_flash_bank_size -= 1270 (flash_size & ICH_GFPREG_BASE_MASK); 1271 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE; 1272 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t); 1273 } else if (sc->sc_type == WM_T_80003) 1274 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC; 1275 else if (sc->sc_type == WM_T_82573) 1276 sc->sc_flags |= WM_F_EEPROM_EERDEEWR; 1277 else if (sc->sc_type > WM_T_82544) 1278 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE; 1279 1280 if (sc->sc_type <= WM_T_82544) 1281 sc->sc_ee_addrbits = 6; 1282 else if (sc->sc_type <= WM_T_82546_3) { 1283 reg = CSR_READ(sc, WMREG_EECD); 1284 if (reg & EECD_EE_SIZE) 1285 sc->sc_ee_addrbits = 8; 1286 else 1287 sc->sc_ee_addrbits = 6; 1288 } else if (sc->sc_type <= WM_T_82547_2) { 1289 reg = CSR_READ(sc, WMREG_EECD); 1290 if (reg & EECD_EE_TYPE) { 1291 sc->sc_flags |= WM_F_EEPROM_SPI; 1292 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8; 1293 } else 1294 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6; 1295 } else if ((sc->sc_type == WM_T_82573) && 1296 (wm_is_onboard_nvm_eeprom(sc) == 0)) { 1297 sc->sc_flags |= WM_F_EEPROM_FLASH; 1298 } else { 1299 /* Assume everything else is SPI. */ 1300 reg = CSR_READ(sc, WMREG_EECD); 1301 sc->sc_flags |= WM_F_EEPROM_SPI; 1302 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8; 1303 } 1304 1305 /* 1306 * Defer printing the EEPROM type until after verifying the checksum 1307 * This allows the EEPROM type to be printed correctly in the case 1308 * that no EEPROM is attached. 1309 */ 1310 1311 1312 /* 1313 * Validate the EEPROM checksum. If the checksum fails, flag this for 1314 * later, so we can fail future reads from the EEPROM. 1315 */ 1316 if (wm_validate_eeprom_checksum(sc)) 1317 sc->sc_flags |= WM_F_EEPROM_INVALID; 1318 1319 if (sc->sc_flags & WM_F_EEPROM_INVALID) 1320 aprint_verbose("%s: No EEPROM\n", sc->sc_dev.dv_xname); 1321 else if (sc->sc_flags & WM_F_EEPROM_FLASH) { 1322 aprint_verbose("%s: FLASH\n", sc->sc_dev.dv_xname); 1323 } else { 1324 if (sc->sc_flags & WM_F_EEPROM_SPI) 1325 eetype = "SPI"; 1326 else 1327 eetype = "MicroWire"; 1328 aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n", 1329 sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits, 1330 sc->sc_ee_addrbits, eetype); 1331 } 1332 1333 /* 1334 * Read the Ethernet address from the EEPROM, if not first found 1335 * in device properties. 1336 */ 1337 ea = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-addr"); 1338 if (ea != NULL) { 1339 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 1340 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 1341 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN); 1342 } else { 1343 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR, 1344 sizeof(myea) / sizeof(myea[0]), myea)) { 1345 aprint_error("%s: unable to read Ethernet address\n", 1346 sc->sc_dev.dv_xname); 1347 return; 1348 } 1349 enaddr[0] = myea[0] & 0xff; 1350 enaddr[1] = myea[0] >> 8; 1351 enaddr[2] = myea[1] & 0xff; 1352 enaddr[3] = myea[1] >> 8; 1353 enaddr[4] = myea[2] & 0xff; 1354 enaddr[5] = myea[2] >> 8; 1355 } 1356 1357 /* 1358 * Toggle the LSB of the MAC address on the second port 1359 * of the dual port controller. 1360 */ 1361 if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3 1362 || sc->sc_type == WM_T_82571 || sc->sc_type == WM_T_80003) { 1363 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1) 1364 enaddr[5] ^= 1; 1365 } 1366 1367 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname, 1368 ether_sprintf(enaddr)); 1369 1370 /* 1371 * Read the config info from the EEPROM, and set up various 1372 * bits in the control registers based on their contents. 1373 */ 1374 pn = prop_dictionary_get(device_properties(&sc->sc_dev), 1375 "i82543-cfg1"); 1376 if (pn != NULL) { 1377 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1378 cfg1 = (uint16_t) prop_number_integer_value(pn); 1379 } else { 1380 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) { 1381 aprint_error("%s: unable to read CFG1\n", 1382 sc->sc_dev.dv_xname); 1383 return; 1384 } 1385 } 1386 1387 pn = prop_dictionary_get(device_properties(&sc->sc_dev), 1388 "i82543-cfg2"); 1389 if (pn != NULL) { 1390 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1391 cfg2 = (uint16_t) prop_number_integer_value(pn); 1392 } else { 1393 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) { 1394 aprint_error("%s: unable to read CFG2\n", 1395 sc->sc_dev.dv_xname); 1396 return; 1397 } 1398 } 1399 1400 if (sc->sc_type >= WM_T_82544) { 1401 pn = prop_dictionary_get(device_properties(&sc->sc_dev), 1402 "i82543-swdpin"); 1403 if (pn != NULL) { 1404 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1405 swdpin = (uint16_t) prop_number_integer_value(pn); 1406 } else { 1407 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) { 1408 aprint_error("%s: unable to read SWDPIN\n", 1409 sc->sc_dev.dv_xname); 1410 return; 1411 } 1412 } 1413 } 1414 1415 if (cfg1 & EEPROM_CFG1_ILOS) 1416 sc->sc_ctrl |= CTRL_ILOS; 1417 if (sc->sc_type >= WM_T_82544) { 1418 sc->sc_ctrl |= 1419 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) << 1420 CTRL_SWDPIO_SHIFT; 1421 sc->sc_ctrl |= 1422 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) << 1423 CTRL_SWDPINS_SHIFT; 1424 } else { 1425 sc->sc_ctrl |= 1426 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) << 1427 CTRL_SWDPIO_SHIFT; 1428 } 1429 1430 #if 0 1431 if (sc->sc_type >= WM_T_82544) { 1432 if (cfg1 & EEPROM_CFG1_IPS0) 1433 sc->sc_ctrl_ext |= CTRL_EXT_IPS; 1434 if (cfg1 & EEPROM_CFG1_IPS1) 1435 sc->sc_ctrl_ext |= CTRL_EXT_IPS1; 1436 sc->sc_ctrl_ext |= 1437 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) << 1438 CTRL_EXT_SWDPIO_SHIFT; 1439 sc->sc_ctrl_ext |= 1440 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) << 1441 CTRL_EXT_SWDPINS_SHIFT; 1442 } else { 1443 sc->sc_ctrl_ext |= 1444 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) << 1445 CTRL_EXT_SWDPIO_SHIFT; 1446 } 1447 #endif 1448 1449 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 1450 #if 0 1451 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 1452 #endif 1453 1454 /* 1455 * Set up some register offsets that are different between 1456 * the i82542 and the i82543 and later chips. 1457 */ 1458 if (sc->sc_type < WM_T_82543) { 1459 sc->sc_rdt_reg = WMREG_OLD_RDT0; 1460 sc->sc_tdt_reg = WMREG_OLD_TDT; 1461 } else { 1462 sc->sc_rdt_reg = WMREG_RDT; 1463 sc->sc_tdt_reg = WMREG_TDT; 1464 } 1465 1466 /* 1467 * Determine if we're TBI or GMII mode, and initialize the 1468 * media structures accordingly. 1469 */ 1470 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9 1471 || sc->sc_type == WM_T_82573) { 1472 /* STATUS_TBIMODE reserved/reused, can't rely on it */ 1473 wm_gmii_mediainit(sc); 1474 } else if (sc->sc_type < WM_T_82543 || 1475 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) { 1476 if (wmp->wmp_flags & WMP_F_1000T) 1477 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T " 1478 "product!\n", sc->sc_dev.dv_xname); 1479 wm_tbi_mediainit(sc); 1480 } else { 1481 if (wmp->wmp_flags & WMP_F_1000X) 1482 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X " 1483 "product!\n", sc->sc_dev.dv_xname); 1484 wm_gmii_mediainit(sc); 1485 } 1486 1487 ifp = &sc->sc_ethercom.ec_if; 1488 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 1489 ifp->if_softc = sc; 1490 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1491 ifp->if_ioctl = wm_ioctl; 1492 ifp->if_start = wm_start; 1493 ifp->if_watchdog = wm_watchdog; 1494 ifp->if_init = wm_init; 1495 ifp->if_stop = wm_stop; 1496 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN)); 1497 IFQ_SET_READY(&ifp->if_snd); 1498 1499 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_ICH8) 1500 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1501 1502 /* 1503 * If we're a i82543 or greater, we can support VLANs. 1504 */ 1505 if (sc->sc_type >= WM_T_82543) 1506 sc->sc_ethercom.ec_capabilities |= 1507 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */; 1508 1509 /* 1510 * We can perform TCPv4 and UDPv4 checkums in-bound. Only 1511 * on i82543 and later. 1512 */ 1513 if (sc->sc_type >= WM_T_82543) { 1514 ifp->if_capabilities |= 1515 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 1516 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 1517 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx | 1518 IFCAP_CSUM_TCPv6_Tx | 1519 IFCAP_CSUM_UDPv6_Tx; 1520 } 1521 1522 /* 1523 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL. 1524 * 1525 * 82541GI (8086:1076) ... no 1526 * 82572EI (8086:10b9) ... yes 1527 */ 1528 if (sc->sc_type >= WM_T_82571) { 1529 ifp->if_capabilities |= 1530 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 1531 } 1532 1533 /* 1534 * If we're a i82544 or greater (except i82547), we can do 1535 * TCP segmentation offload. 1536 */ 1537 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) { 1538 ifp->if_capabilities |= IFCAP_TSOv4; 1539 } 1540 1541 if (sc->sc_type >= WM_T_82571) { 1542 ifp->if_capabilities |= IFCAP_TSOv6; 1543 } 1544 1545 /* 1546 * Attach the interface. 1547 */ 1548 if_attach(ifp); 1549 ether_ifattach(ifp, enaddr); 1550 #if NRND > 0 1551 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname, 1552 RND_TYPE_NET, 0); 1553 #endif 1554 1555 #ifdef WM_EVENT_COUNTERS 1556 /* Attach event counters. */ 1557 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC, 1558 NULL, sc->sc_dev.dv_xname, "txsstall"); 1559 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC, 1560 NULL, sc->sc_dev.dv_xname, "txdstall"); 1561 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC, 1562 NULL, sc->sc_dev.dv_xname, "txfifo_stall"); 1563 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR, 1564 NULL, sc->sc_dev.dv_xname, "txdw"); 1565 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR, 1566 NULL, sc->sc_dev.dv_xname, "txqe"); 1567 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR, 1568 NULL, sc->sc_dev.dv_xname, "rxintr"); 1569 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR, 1570 NULL, sc->sc_dev.dv_xname, "linkintr"); 1571 1572 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC, 1573 NULL, sc->sc_dev.dv_xname, "rxipsum"); 1574 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC, 1575 NULL, sc->sc_dev.dv_xname, "rxtusum"); 1576 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC, 1577 NULL, sc->sc_dev.dv_xname, "txipsum"); 1578 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC, 1579 NULL, sc->sc_dev.dv_xname, "txtusum"); 1580 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC, 1581 NULL, sc->sc_dev.dv_xname, "txtusum6"); 1582 1583 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC, 1584 NULL, sc->sc_dev.dv_xname, "txtso"); 1585 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC, 1586 NULL, sc->sc_dev.dv_xname, "txtso6"); 1587 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC, 1588 NULL, sc->sc_dev.dv_xname, "txtsopain"); 1589 1590 for (i = 0; i < WM_NTXSEGS; i++) { 1591 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i); 1592 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC, 1593 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]); 1594 } 1595 1596 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC, 1597 NULL, sc->sc_dev.dv_xname, "txdrop"); 1598 1599 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC, 1600 NULL, sc->sc_dev.dv_xname, "tu"); 1601 1602 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC, 1603 NULL, sc->sc_dev.dv_xname, "tx_xoff"); 1604 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC, 1605 NULL, sc->sc_dev.dv_xname, "tx_xon"); 1606 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC, 1607 NULL, sc->sc_dev.dv_xname, "rx_xoff"); 1608 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC, 1609 NULL, sc->sc_dev.dv_xname, "rx_xon"); 1610 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC, 1611 NULL, sc->sc_dev.dv_xname, "rx_macctl"); 1612 #endif /* WM_EVENT_COUNTERS */ 1613 1614 if (!pmf_device_register(self, NULL, NULL)) 1615 aprint_error_dev(self, "couldn't establish power handler\n"); 1616 else 1617 pmf_class_network_register(self, ifp); 1618 1619 return; 1620 1621 /* 1622 * Free any resources we've allocated during the failed attach 1623 * attempt. Do this in reverse order and fall through. 1624 */ 1625 fail_5: 1626 for (i = 0; i < WM_NRXDESC; i++) { 1627 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 1628 bus_dmamap_destroy(sc->sc_dmat, 1629 sc->sc_rxsoft[i].rxs_dmamap); 1630 } 1631 fail_4: 1632 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 1633 if (sc->sc_txsoft[i].txs_dmamap != NULL) 1634 bus_dmamap_destroy(sc->sc_dmat, 1635 sc->sc_txsoft[i].txs_dmamap); 1636 } 1637 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 1638 fail_3: 1639 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 1640 fail_2: 1641 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 1642 cdata_size); 1643 fail_1: 1644 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1645 fail_0: 1646 return; 1647 } 1648 1649 /* 1650 * wm_tx_offload: 1651 * 1652 * Set up TCP/IP checksumming parameters for the 1653 * specified packet. 1654 */ 1655 static int 1656 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp, 1657 uint8_t *fieldsp) 1658 { 1659 struct mbuf *m0 = txs->txs_mbuf; 1660 struct livengood_tcpip_ctxdesc *t; 1661 uint32_t ipcs, tucs, cmd, cmdlen, seg; 1662 uint32_t ipcse; 1663 struct ether_header *eh; 1664 int offset, iphl; 1665 uint8_t fields; 1666 1667 /* 1668 * XXX It would be nice if the mbuf pkthdr had offset 1669 * fields for the protocol headers. 1670 */ 1671 1672 eh = mtod(m0, struct ether_header *); 1673 switch (htons(eh->ether_type)) { 1674 case ETHERTYPE_IP: 1675 case ETHERTYPE_IPV6: 1676 offset = ETHER_HDR_LEN; 1677 break; 1678 1679 case ETHERTYPE_VLAN: 1680 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1681 break; 1682 1683 default: 1684 /* 1685 * Don't support this protocol or encapsulation. 1686 */ 1687 *fieldsp = 0; 1688 *cmdp = 0; 1689 return (0); 1690 } 1691 1692 if ((m0->m_pkthdr.csum_flags & 1693 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) { 1694 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 1695 } else { 1696 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data); 1697 } 1698 ipcse = offset + iphl - 1; 1699 1700 cmd = WTX_CMD_DEXT | WTX_DTYP_D; 1701 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE; 1702 seg = 0; 1703 fields = 0; 1704 1705 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) { 1706 int hlen = offset + iphl; 1707 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 1708 1709 if (__predict_false(m0->m_len < 1710 (hlen + sizeof(struct tcphdr)))) { 1711 /* 1712 * TCP/IP headers are not in the first mbuf; we need 1713 * to do this the slow and painful way. Let's just 1714 * hope this doesn't happen very often. 1715 */ 1716 struct tcphdr th; 1717 1718 WM_EVCNT_INCR(&sc->sc_ev_txtsopain); 1719 1720 m_copydata(m0, hlen, sizeof(th), &th); 1721 if (v4) { 1722 struct ip ip; 1723 1724 m_copydata(m0, offset, sizeof(ip), &ip); 1725 ip.ip_len = 0; 1726 m_copyback(m0, 1727 offset + offsetof(struct ip, ip_len), 1728 sizeof(ip.ip_len), &ip.ip_len); 1729 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 1730 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 1731 } else { 1732 struct ip6_hdr ip6; 1733 1734 m_copydata(m0, offset, sizeof(ip6), &ip6); 1735 ip6.ip6_plen = 0; 1736 m_copyback(m0, 1737 offset + offsetof(struct ip6_hdr, ip6_plen), 1738 sizeof(ip6.ip6_plen), &ip6.ip6_plen); 1739 th.th_sum = in6_cksum_phdr(&ip6.ip6_src, 1740 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP)); 1741 } 1742 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 1743 sizeof(th.th_sum), &th.th_sum); 1744 1745 hlen += th.th_off << 2; 1746 } else { 1747 /* 1748 * TCP/IP headers are in the first mbuf; we can do 1749 * this the easy way. 1750 */ 1751 struct tcphdr *th; 1752 1753 if (v4) { 1754 struct ip *ip = 1755 (void *)(mtod(m0, char *) + offset); 1756 th = (void *)(mtod(m0, char *) + hlen); 1757 1758 ip->ip_len = 0; 1759 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 1760 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 1761 } else { 1762 struct ip6_hdr *ip6 = 1763 (void *)(mtod(m0, char *) + offset); 1764 th = (void *)(mtod(m0, char *) + hlen); 1765 1766 ip6->ip6_plen = 0; 1767 th->th_sum = in6_cksum_phdr(&ip6->ip6_src, 1768 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); 1769 } 1770 hlen += th->th_off << 2; 1771 } 1772 1773 if (v4) { 1774 WM_EVCNT_INCR(&sc->sc_ev_txtso); 1775 cmdlen |= WTX_TCPIP_CMD_IP; 1776 } else { 1777 WM_EVCNT_INCR(&sc->sc_ev_txtso6); 1778 ipcse = 0; 1779 } 1780 cmd |= WTX_TCPIP_CMD_TSE; 1781 cmdlen |= WTX_TCPIP_CMD_TSE | 1782 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen); 1783 seg = WTX_TCPIP_SEG_HDRLEN(hlen) | 1784 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz); 1785 } 1786 1787 /* 1788 * NOTE: Even if we're not using the IP or TCP/UDP checksum 1789 * offload feature, if we load the context descriptor, we 1790 * MUST provide valid values for IPCSS and TUCSS fields. 1791 */ 1792 1793 ipcs = WTX_TCPIP_IPCSS(offset) | 1794 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) | 1795 WTX_TCPIP_IPCSE(ipcse); 1796 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) { 1797 WM_EVCNT_INCR(&sc->sc_ev_txipsum); 1798 fields |= WTX_IXSM; 1799 } 1800 1801 offset += iphl; 1802 1803 if (m0->m_pkthdr.csum_flags & 1804 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) { 1805 WM_EVCNT_INCR(&sc->sc_ev_txtusum); 1806 fields |= WTX_TXSM; 1807 tucs = WTX_TCPIP_TUCSS(offset) | 1808 WTX_TCPIP_TUCSO(offset + 1809 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) | 1810 WTX_TCPIP_TUCSE(0) /* rest of packet */; 1811 } else if ((m0->m_pkthdr.csum_flags & 1812 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) { 1813 WM_EVCNT_INCR(&sc->sc_ev_txtusum6); 1814 fields |= WTX_TXSM; 1815 tucs = WTX_TCPIP_TUCSS(offset) | 1816 WTX_TCPIP_TUCSO(offset + 1817 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) | 1818 WTX_TCPIP_TUCSE(0) /* rest of packet */; 1819 } else { 1820 /* Just initialize it to a valid TCP context. */ 1821 tucs = WTX_TCPIP_TUCSS(offset) | 1822 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) | 1823 WTX_TCPIP_TUCSE(0) /* rest of packet */; 1824 } 1825 1826 /* Fill in the context descriptor. */ 1827 t = (struct livengood_tcpip_ctxdesc *) 1828 &sc->sc_txdescs[sc->sc_txnext]; 1829 t->tcpip_ipcs = htole32(ipcs); 1830 t->tcpip_tucs = htole32(tucs); 1831 t->tcpip_cmdlen = htole32(cmdlen); 1832 t->tcpip_seg = htole32(seg); 1833 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE); 1834 1835 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext); 1836 txs->txs_ndesc++; 1837 1838 *cmdp = cmd; 1839 *fieldsp = fields; 1840 1841 return (0); 1842 } 1843 1844 static void 1845 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0) 1846 { 1847 struct mbuf *m; 1848 int i; 1849 1850 log(LOG_DEBUG, "%s: mbuf chain:\n", sc->sc_dev.dv_xname); 1851 for (m = m0, i = 0; m != NULL; m = m->m_next, i++) 1852 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, " 1853 "m_flags = 0x%08x\n", sc->sc_dev.dv_xname, 1854 m->m_data, m->m_len, m->m_flags); 1855 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", sc->sc_dev.dv_xname, 1856 i, i == 1 ? "" : "s"); 1857 } 1858 1859 /* 1860 * wm_82547_txfifo_stall: 1861 * 1862 * Callout used to wait for the 82547 Tx FIFO to drain, 1863 * reset the FIFO pointers, and restart packet transmission. 1864 */ 1865 static void 1866 wm_82547_txfifo_stall(void *arg) 1867 { 1868 struct wm_softc *sc = arg; 1869 int s; 1870 1871 s = splnet(); 1872 1873 if (sc->sc_txfifo_stall) { 1874 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) && 1875 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) && 1876 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) { 1877 /* 1878 * Packets have drained. Stop transmitter, reset 1879 * FIFO pointers, restart transmitter, and kick 1880 * the packet queue. 1881 */ 1882 uint32_t tctl = CSR_READ(sc, WMREG_TCTL); 1883 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN); 1884 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr); 1885 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr); 1886 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr); 1887 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr); 1888 CSR_WRITE(sc, WMREG_TCTL, tctl); 1889 CSR_WRITE_FLUSH(sc); 1890 1891 sc->sc_txfifo_head = 0; 1892 sc->sc_txfifo_stall = 0; 1893 wm_start(&sc->sc_ethercom.ec_if); 1894 } else { 1895 /* 1896 * Still waiting for packets to drain; try again in 1897 * another tick. 1898 */ 1899 callout_schedule(&sc->sc_txfifo_ch, 1); 1900 } 1901 } 1902 1903 splx(s); 1904 } 1905 1906 /* 1907 * wm_82547_txfifo_bugchk: 1908 * 1909 * Check for bug condition in the 82547 Tx FIFO. We need to 1910 * prevent enqueueing a packet that would wrap around the end 1911 * if the Tx FIFO ring buffer, otherwise the chip will croak. 1912 * 1913 * We do this by checking the amount of space before the end 1914 * of the Tx FIFO buffer. If the packet will not fit, we "stall" 1915 * the Tx FIFO, wait for all remaining packets to drain, reset 1916 * the internal FIFO pointers to the beginning, and restart 1917 * transmission on the interface. 1918 */ 1919 #define WM_FIFO_HDR 0x10 1920 #define WM_82547_PAD_LEN 0x3e0 1921 static int 1922 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0) 1923 { 1924 int space = sc->sc_txfifo_size - sc->sc_txfifo_head; 1925 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR); 1926 1927 /* Just return if already stalled. */ 1928 if (sc->sc_txfifo_stall) 1929 return (1); 1930 1931 if (sc->sc_mii.mii_media_active & IFM_FDX) { 1932 /* Stall only occurs in half-duplex mode. */ 1933 goto send_packet; 1934 } 1935 1936 if (len >= WM_82547_PAD_LEN + space) { 1937 sc->sc_txfifo_stall = 1; 1938 callout_schedule(&sc->sc_txfifo_ch, 1); 1939 return (1); 1940 } 1941 1942 send_packet: 1943 sc->sc_txfifo_head += len; 1944 if (sc->sc_txfifo_head >= sc->sc_txfifo_size) 1945 sc->sc_txfifo_head -= sc->sc_txfifo_size; 1946 1947 return (0); 1948 } 1949 1950 /* 1951 * wm_start: [ifnet interface function] 1952 * 1953 * Start packet transmission on the interface. 1954 */ 1955 static void 1956 wm_start(struct ifnet *ifp) 1957 { 1958 struct wm_softc *sc = ifp->if_softc; 1959 struct mbuf *m0; 1960 #if 0 /* XXXJRT */ 1961 struct m_tag *mtag; 1962 #endif 1963 struct wm_txsoft *txs; 1964 bus_dmamap_t dmamap; 1965 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso; 1966 bus_addr_t curaddr; 1967 bus_size_t seglen, curlen; 1968 uint32_t cksumcmd; 1969 uint8_t cksumfields; 1970 1971 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 1972 return; 1973 1974 /* 1975 * Remember the previous number of free descriptors. 1976 */ 1977 ofree = sc->sc_txfree; 1978 1979 /* 1980 * Loop through the send queue, setting up transmit descriptors 1981 * until we drain the queue, or use up all available transmit 1982 * descriptors. 1983 */ 1984 for (;;) { 1985 /* Grab a packet off the queue. */ 1986 IFQ_POLL(&ifp->if_snd, m0); 1987 if (m0 == NULL) 1988 break; 1989 1990 DPRINTF(WM_DEBUG_TX, 1991 ("%s: TX: have packet to transmit: %p\n", 1992 sc->sc_dev.dv_xname, m0)); 1993 1994 /* Get a work queue entry. */ 1995 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) { 1996 wm_txintr(sc); 1997 if (sc->sc_txsfree == 0) { 1998 DPRINTF(WM_DEBUG_TX, 1999 ("%s: TX: no free job descriptors\n", 2000 sc->sc_dev.dv_xname)); 2001 WM_EVCNT_INCR(&sc->sc_ev_txsstall); 2002 break; 2003 } 2004 } 2005 2006 txs = &sc->sc_txsoft[sc->sc_txsnext]; 2007 dmamap = txs->txs_dmamap; 2008 2009 use_tso = (m0->m_pkthdr.csum_flags & 2010 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0; 2011 2012 /* 2013 * So says the Linux driver: 2014 * The controller does a simple calculation to make sure 2015 * there is enough room in the FIFO before initiating the 2016 * DMA for each buffer. The calc is: 2017 * 4 = ceil(buffer len / MSS) 2018 * To make sure we don't overrun the FIFO, adjust the max 2019 * buffer len if the MSS drops. 2020 */ 2021 dmamap->dm_maxsegsz = 2022 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN) 2023 ? m0->m_pkthdr.segsz << 2 2024 : WTX_MAX_LEN; 2025 2026 /* 2027 * Load the DMA map. If this fails, the packet either 2028 * didn't fit in the allotted number of segments, or we 2029 * were short on resources. For the too-many-segments 2030 * case, we simply report an error and drop the packet, 2031 * since we can't sanely copy a jumbo packet to a single 2032 * buffer. 2033 */ 2034 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 2035 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 2036 if (error) { 2037 if (error == EFBIG) { 2038 WM_EVCNT_INCR(&sc->sc_ev_txdrop); 2039 log(LOG_ERR, "%s: Tx packet consumes too many " 2040 "DMA segments, dropping...\n", 2041 sc->sc_dev.dv_xname); 2042 IFQ_DEQUEUE(&ifp->if_snd, m0); 2043 wm_dump_mbuf_chain(sc, m0); 2044 m_freem(m0); 2045 continue; 2046 } 2047 /* 2048 * Short on resources, just stop for now. 2049 */ 2050 DPRINTF(WM_DEBUG_TX, 2051 ("%s: TX: dmamap load failed: %d\n", 2052 sc->sc_dev.dv_xname, error)); 2053 break; 2054 } 2055 2056 segs_needed = dmamap->dm_nsegs; 2057 if (use_tso) { 2058 /* For sentinel descriptor; see below. */ 2059 segs_needed++; 2060 } 2061 2062 /* 2063 * Ensure we have enough descriptors free to describe 2064 * the packet. Note, we always reserve one descriptor 2065 * at the end of the ring due to the semantics of the 2066 * TDT register, plus one more in the event we need 2067 * to load offload context. 2068 */ 2069 if (segs_needed > sc->sc_txfree - 2) { 2070 /* 2071 * Not enough free descriptors to transmit this 2072 * packet. We haven't committed anything yet, 2073 * so just unload the DMA map, put the packet 2074 * pack on the queue, and punt. Notify the upper 2075 * layer that there are no more slots left. 2076 */ 2077 DPRINTF(WM_DEBUG_TX, 2078 ("%s: TX: need %d (%d) descriptors, have %d\n", 2079 sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed, 2080 sc->sc_txfree - 1)); 2081 ifp->if_flags |= IFF_OACTIVE; 2082 bus_dmamap_unload(sc->sc_dmat, dmamap); 2083 WM_EVCNT_INCR(&sc->sc_ev_txdstall); 2084 break; 2085 } 2086 2087 /* 2088 * Check for 82547 Tx FIFO bug. We need to do this 2089 * once we know we can transmit the packet, since we 2090 * do some internal FIFO space accounting here. 2091 */ 2092 if (sc->sc_type == WM_T_82547 && 2093 wm_82547_txfifo_bugchk(sc, m0)) { 2094 DPRINTF(WM_DEBUG_TX, 2095 ("%s: TX: 82547 Tx FIFO bug detected\n", 2096 sc->sc_dev.dv_xname)); 2097 ifp->if_flags |= IFF_OACTIVE; 2098 bus_dmamap_unload(sc->sc_dmat, dmamap); 2099 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall); 2100 break; 2101 } 2102 2103 IFQ_DEQUEUE(&ifp->if_snd, m0); 2104 2105 /* 2106 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 2107 */ 2108 2109 DPRINTF(WM_DEBUG_TX, 2110 ("%s: TX: packet has %d (%d) DMA segments\n", 2111 sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed)); 2112 2113 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]); 2114 2115 /* 2116 * Store a pointer to the packet so that we can free it 2117 * later. 2118 * 2119 * Initially, we consider the number of descriptors the 2120 * packet uses the number of DMA segments. This may be 2121 * incremented by 1 if we do checksum offload (a descriptor 2122 * is used to set the checksum context). 2123 */ 2124 txs->txs_mbuf = m0; 2125 txs->txs_firstdesc = sc->sc_txnext; 2126 txs->txs_ndesc = segs_needed; 2127 2128 /* Set up offload parameters for this packet. */ 2129 if (m0->m_pkthdr.csum_flags & 2130 (M_CSUM_TSOv4|M_CSUM_TSOv6| 2131 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4| 2132 M_CSUM_TCPv6|M_CSUM_UDPv6)) { 2133 if (wm_tx_offload(sc, txs, &cksumcmd, 2134 &cksumfields) != 0) { 2135 /* Error message already displayed. */ 2136 bus_dmamap_unload(sc->sc_dmat, dmamap); 2137 continue; 2138 } 2139 } else { 2140 cksumcmd = 0; 2141 cksumfields = 0; 2142 } 2143 2144 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS; 2145 2146 /* Sync the DMA map. */ 2147 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 2148 BUS_DMASYNC_PREWRITE); 2149 2150 /* 2151 * Initialize the transmit descriptor. 2152 */ 2153 for (nexttx = sc->sc_txnext, seg = 0; 2154 seg < dmamap->dm_nsegs; seg++) { 2155 for (seglen = dmamap->dm_segs[seg].ds_len, 2156 curaddr = dmamap->dm_segs[seg].ds_addr; 2157 seglen != 0; 2158 curaddr += curlen, seglen -= curlen, 2159 nexttx = WM_NEXTTX(sc, nexttx)) { 2160 curlen = seglen; 2161 2162 /* 2163 * So says the Linux driver: 2164 * Work around for premature descriptor 2165 * write-backs in TSO mode. Append a 2166 * 4-byte sentinel descriptor. 2167 */ 2168 if (use_tso && 2169 seg == dmamap->dm_nsegs - 1 && 2170 curlen > 8) 2171 curlen -= 4; 2172 2173 wm_set_dma_addr( 2174 &sc->sc_txdescs[nexttx].wtx_addr, 2175 curaddr); 2176 sc->sc_txdescs[nexttx].wtx_cmdlen = 2177 htole32(cksumcmd | curlen); 2178 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 2179 0; 2180 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 2181 cksumfields; 2182 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0; 2183 lasttx = nexttx; 2184 2185 DPRINTF(WM_DEBUG_TX, 2186 ("%s: TX: desc %d: low 0x%08lx, " 2187 "len 0x%04x\n", 2188 sc->sc_dev.dv_xname, nexttx, 2189 curaddr & 0xffffffffUL, (unsigned)curlen)); 2190 } 2191 } 2192 2193 KASSERT(lasttx != -1); 2194 2195 /* 2196 * Set up the command byte on the last descriptor of 2197 * the packet. If we're in the interrupt delay window, 2198 * delay the interrupt. 2199 */ 2200 sc->sc_txdescs[lasttx].wtx_cmdlen |= 2201 htole32(WTX_CMD_EOP | WTX_CMD_RS); 2202 2203 #if 0 /* XXXJRT */ 2204 /* 2205 * If VLANs are enabled and the packet has a VLAN tag, set 2206 * up the descriptor to encapsulate the packet for us. 2207 * 2208 * This is only valid on the last descriptor of the packet. 2209 */ 2210 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) { 2211 sc->sc_txdescs[lasttx].wtx_cmdlen |= 2212 htole32(WTX_CMD_VLE); 2213 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan 2214 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff); 2215 } 2216 #endif /* XXXJRT */ 2217 2218 txs->txs_lastdesc = lasttx; 2219 2220 DPRINTF(WM_DEBUG_TX, 2221 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname, 2222 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen))); 2223 2224 /* Sync the descriptors we're using. */ 2225 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc, 2226 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2227 2228 /* Give the packet to the chip. */ 2229 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx); 2230 2231 DPRINTF(WM_DEBUG_TX, 2232 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx)); 2233 2234 DPRINTF(WM_DEBUG_TX, 2235 ("%s: TX: finished transmitting packet, job %d\n", 2236 sc->sc_dev.dv_xname, sc->sc_txsnext)); 2237 2238 /* Advance the tx pointer. */ 2239 sc->sc_txfree -= txs->txs_ndesc; 2240 sc->sc_txnext = nexttx; 2241 2242 sc->sc_txsfree--; 2243 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext); 2244 2245 #if NBPFILTER > 0 2246 /* Pass the packet to any BPF listeners. */ 2247 if (ifp->if_bpf) 2248 bpf_mtap(ifp->if_bpf, m0); 2249 #endif /* NBPFILTER > 0 */ 2250 } 2251 2252 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) { 2253 /* No more slots; notify upper layer. */ 2254 ifp->if_flags |= IFF_OACTIVE; 2255 } 2256 2257 if (sc->sc_txfree != ofree) { 2258 /* Set a watchdog timer in case the chip flakes out. */ 2259 ifp->if_timer = 5; 2260 } 2261 } 2262 2263 /* 2264 * wm_watchdog: [ifnet interface function] 2265 * 2266 * Watchdog timer handler. 2267 */ 2268 static void 2269 wm_watchdog(struct ifnet *ifp) 2270 { 2271 struct wm_softc *sc = ifp->if_softc; 2272 2273 /* 2274 * Since we're using delayed interrupts, sweep up 2275 * before we report an error. 2276 */ 2277 wm_txintr(sc); 2278 2279 if (sc->sc_txfree != WM_NTXDESC(sc)) { 2280 log(LOG_ERR, 2281 "%s: device timeout (txfree %d txsfree %d txnext %d)\n", 2282 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree, 2283 sc->sc_txnext); 2284 ifp->if_oerrors++; 2285 2286 /* Reset the interface. */ 2287 (void) wm_init(ifp); 2288 } 2289 2290 /* Try to get more packets going. */ 2291 wm_start(ifp); 2292 } 2293 2294 /* 2295 * wm_ioctl: [ifnet interface function] 2296 * 2297 * Handle control requests from the operator. 2298 */ 2299 static int 2300 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data) 2301 { 2302 struct wm_softc *sc = ifp->if_softc; 2303 struct ifreq *ifr = (struct ifreq *) data; 2304 int s, error; 2305 2306 s = splnet(); 2307 2308 switch (cmd) { 2309 case SIOCSIFMEDIA: 2310 case SIOCGIFMEDIA: 2311 /* Flow control requires full-duplex mode. */ 2312 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 2313 (ifr->ifr_media & IFM_FDX) == 0) 2314 ifr->ifr_media &= ~IFM_ETH_FMASK; 2315 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 2316 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 2317 /* We can do both TXPAUSE and RXPAUSE. */ 2318 ifr->ifr_media |= 2319 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 2320 } 2321 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 2322 } 2323 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 2324 break; 2325 default: 2326 error = ether_ioctl(ifp, cmd, data); 2327 if (error == ENETRESET) { 2328 /* 2329 * Multicast list has changed; set the hardware filter 2330 * accordingly. 2331 */ 2332 if (ifp->if_flags & IFF_RUNNING) 2333 wm_set_filter(sc); 2334 error = 0; 2335 } 2336 break; 2337 } 2338 2339 /* Try to get more packets going. */ 2340 wm_start(ifp); 2341 2342 splx(s); 2343 return (error); 2344 } 2345 2346 /* 2347 * wm_intr: 2348 * 2349 * Interrupt service routine. 2350 */ 2351 static int 2352 wm_intr(void *arg) 2353 { 2354 struct wm_softc *sc = arg; 2355 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2356 uint32_t icr; 2357 int handled = 0; 2358 2359 while (1 /* CONSTCOND */) { 2360 icr = CSR_READ(sc, WMREG_ICR); 2361 if ((icr & sc->sc_icr) == 0) 2362 break; 2363 #if 0 /*NRND > 0*/ 2364 if (RND_ENABLED(&sc->rnd_source)) 2365 rnd_add_uint32(&sc->rnd_source, icr); 2366 #endif 2367 2368 handled = 1; 2369 2370 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 2371 if (icr & (ICR_RXDMT0|ICR_RXT0)) { 2372 DPRINTF(WM_DEBUG_RX, 2373 ("%s: RX: got Rx intr 0x%08x\n", 2374 sc->sc_dev.dv_xname, 2375 icr & (ICR_RXDMT0|ICR_RXT0))); 2376 WM_EVCNT_INCR(&sc->sc_ev_rxintr); 2377 } 2378 #endif 2379 wm_rxintr(sc); 2380 2381 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 2382 if (icr & ICR_TXDW) { 2383 DPRINTF(WM_DEBUG_TX, 2384 ("%s: TX: got TXDW interrupt\n", 2385 sc->sc_dev.dv_xname)); 2386 WM_EVCNT_INCR(&sc->sc_ev_txdw); 2387 } 2388 #endif 2389 wm_txintr(sc); 2390 2391 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) { 2392 WM_EVCNT_INCR(&sc->sc_ev_linkintr); 2393 wm_linkintr(sc, icr); 2394 } 2395 2396 if (icr & ICR_RXO) { 2397 ifp->if_ierrors++; 2398 #if defined(WM_DEBUG) 2399 log(LOG_WARNING, "%s: Receive overrun\n", 2400 sc->sc_dev.dv_xname); 2401 #endif /* defined(WM_DEBUG) */ 2402 } 2403 } 2404 2405 if (handled) { 2406 /* Try to get more packets going. */ 2407 wm_start(ifp); 2408 } 2409 2410 return (handled); 2411 } 2412 2413 /* 2414 * wm_txintr: 2415 * 2416 * Helper; handle transmit interrupts. 2417 */ 2418 static void 2419 wm_txintr(struct wm_softc *sc) 2420 { 2421 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2422 struct wm_txsoft *txs; 2423 uint8_t status; 2424 int i; 2425 2426 ifp->if_flags &= ~IFF_OACTIVE; 2427 2428 /* 2429 * Go through the Tx list and free mbufs for those 2430 * frames which have been transmitted. 2431 */ 2432 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc); 2433 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) { 2434 txs = &sc->sc_txsoft[i]; 2435 2436 DPRINTF(WM_DEBUG_TX, 2437 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i)); 2438 2439 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc, 2440 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2441 2442 status = 2443 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status; 2444 if ((status & WTX_ST_DD) == 0) { 2445 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1, 2446 BUS_DMASYNC_PREREAD); 2447 break; 2448 } 2449 2450 DPRINTF(WM_DEBUG_TX, 2451 ("%s: TX: job %d done: descs %d..%d\n", 2452 sc->sc_dev.dv_xname, i, txs->txs_firstdesc, 2453 txs->txs_lastdesc)); 2454 2455 /* 2456 * XXX We should probably be using the statistics 2457 * XXX registers, but I don't know if they exist 2458 * XXX on chips before the i82544. 2459 */ 2460 2461 #ifdef WM_EVENT_COUNTERS 2462 if (status & WTX_ST_TU) 2463 WM_EVCNT_INCR(&sc->sc_ev_tu); 2464 #endif /* WM_EVENT_COUNTERS */ 2465 2466 if (status & (WTX_ST_EC|WTX_ST_LC)) { 2467 ifp->if_oerrors++; 2468 if (status & WTX_ST_LC) 2469 log(LOG_WARNING, "%s: late collision\n", 2470 sc->sc_dev.dv_xname); 2471 else if (status & WTX_ST_EC) { 2472 ifp->if_collisions += 16; 2473 log(LOG_WARNING, "%s: excessive collisions\n", 2474 sc->sc_dev.dv_xname); 2475 } 2476 } else 2477 ifp->if_opackets++; 2478 2479 sc->sc_txfree += txs->txs_ndesc; 2480 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 2481 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2482 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 2483 m_freem(txs->txs_mbuf); 2484 txs->txs_mbuf = NULL; 2485 } 2486 2487 /* Update the dirty transmit buffer pointer. */ 2488 sc->sc_txsdirty = i; 2489 DPRINTF(WM_DEBUG_TX, 2490 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i)); 2491 2492 /* 2493 * If there are no more pending transmissions, cancel the watchdog 2494 * timer. 2495 */ 2496 if (sc->sc_txsfree == WM_TXQUEUELEN(sc)) 2497 ifp->if_timer = 0; 2498 } 2499 2500 /* 2501 * wm_rxintr: 2502 * 2503 * Helper; handle receive interrupts. 2504 */ 2505 static void 2506 wm_rxintr(struct wm_softc *sc) 2507 { 2508 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2509 struct wm_rxsoft *rxs; 2510 struct mbuf *m; 2511 int i, len; 2512 uint8_t status, errors; 2513 2514 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) { 2515 rxs = &sc->sc_rxsoft[i]; 2516 2517 DPRINTF(WM_DEBUG_RX, 2518 ("%s: RX: checking descriptor %d\n", 2519 sc->sc_dev.dv_xname, i)); 2520 2521 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2522 2523 status = sc->sc_rxdescs[i].wrx_status; 2524 errors = sc->sc_rxdescs[i].wrx_errors; 2525 len = le16toh(sc->sc_rxdescs[i].wrx_len); 2526 2527 if ((status & WRX_ST_DD) == 0) { 2528 /* 2529 * We have processed all of the receive descriptors. 2530 */ 2531 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD); 2532 break; 2533 } 2534 2535 if (__predict_false(sc->sc_rxdiscard)) { 2536 DPRINTF(WM_DEBUG_RX, 2537 ("%s: RX: discarding contents of descriptor %d\n", 2538 sc->sc_dev.dv_xname, i)); 2539 WM_INIT_RXDESC(sc, i); 2540 if (status & WRX_ST_EOP) { 2541 /* Reset our state. */ 2542 DPRINTF(WM_DEBUG_RX, 2543 ("%s: RX: resetting rxdiscard -> 0\n", 2544 sc->sc_dev.dv_xname)); 2545 sc->sc_rxdiscard = 0; 2546 } 2547 continue; 2548 } 2549 2550 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 2551 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2552 2553 m = rxs->rxs_mbuf; 2554 2555 /* 2556 * Add a new receive buffer to the ring, unless of 2557 * course the length is zero. Treat the latter as a 2558 * failed mapping. 2559 */ 2560 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) { 2561 /* 2562 * Failed, throw away what we've done so 2563 * far, and discard the rest of the packet. 2564 */ 2565 ifp->if_ierrors++; 2566 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 2567 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 2568 WM_INIT_RXDESC(sc, i); 2569 if ((status & WRX_ST_EOP) == 0) 2570 sc->sc_rxdiscard = 1; 2571 if (sc->sc_rxhead != NULL) 2572 m_freem(sc->sc_rxhead); 2573 WM_RXCHAIN_RESET(sc); 2574 DPRINTF(WM_DEBUG_RX, 2575 ("%s: RX: Rx buffer allocation failed, " 2576 "dropping packet%s\n", sc->sc_dev.dv_xname, 2577 sc->sc_rxdiscard ? " (discard)" : "")); 2578 continue; 2579 } 2580 2581 WM_RXCHAIN_LINK(sc, m); 2582 2583 m->m_len = len; 2584 2585 DPRINTF(WM_DEBUG_RX, 2586 ("%s: RX: buffer at %p len %d\n", 2587 sc->sc_dev.dv_xname, m->m_data, len)); 2588 2589 /* 2590 * If this is not the end of the packet, keep 2591 * looking. 2592 */ 2593 if ((status & WRX_ST_EOP) == 0) { 2594 sc->sc_rxlen += len; 2595 DPRINTF(WM_DEBUG_RX, 2596 ("%s: RX: not yet EOP, rxlen -> %d\n", 2597 sc->sc_dev.dv_xname, sc->sc_rxlen)); 2598 continue; 2599 } 2600 2601 /* 2602 * Okay, we have the entire packet now. The chip is 2603 * configured to include the FCS (not all chips can 2604 * be configured to strip it), so we need to trim it. 2605 */ 2606 m->m_len -= ETHER_CRC_LEN; 2607 2608 *sc->sc_rxtailp = NULL; 2609 len = m->m_len + sc->sc_rxlen; 2610 m = sc->sc_rxhead; 2611 2612 WM_RXCHAIN_RESET(sc); 2613 2614 DPRINTF(WM_DEBUG_RX, 2615 ("%s: RX: have entire packet, len -> %d\n", 2616 sc->sc_dev.dv_xname, len)); 2617 2618 /* 2619 * If an error occurred, update stats and drop the packet. 2620 */ 2621 if (errors & 2622 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) { 2623 ifp->if_ierrors++; 2624 if (errors & WRX_ER_SE) 2625 log(LOG_WARNING, "%s: symbol error\n", 2626 sc->sc_dev.dv_xname); 2627 else if (errors & WRX_ER_SEQ) 2628 log(LOG_WARNING, "%s: receive sequence error\n", 2629 sc->sc_dev.dv_xname); 2630 else if (errors & WRX_ER_CE) 2631 log(LOG_WARNING, "%s: CRC error\n", 2632 sc->sc_dev.dv_xname); 2633 m_freem(m); 2634 continue; 2635 } 2636 2637 /* 2638 * No errors. Receive the packet. 2639 */ 2640 m->m_pkthdr.rcvif = ifp; 2641 m->m_pkthdr.len = len; 2642 2643 #if 0 /* XXXJRT */ 2644 /* 2645 * If VLANs are enabled, VLAN packets have been unwrapped 2646 * for us. Associate the tag with the packet. 2647 */ 2648 if ((status & WRX_ST_VP) != 0) { 2649 VLAN_INPUT_TAG(ifp, m, 2650 le16toh(sc->sc_rxdescs[i].wrx_special, 2651 continue); 2652 } 2653 #endif /* XXXJRT */ 2654 2655 /* 2656 * Set up checksum info for this packet. 2657 */ 2658 if ((status & WRX_ST_IXSM) == 0) { 2659 if (status & WRX_ST_IPCS) { 2660 WM_EVCNT_INCR(&sc->sc_ev_rxipsum); 2661 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 2662 if (errors & WRX_ER_IPE) 2663 m->m_pkthdr.csum_flags |= 2664 M_CSUM_IPv4_BAD; 2665 } 2666 if (status & WRX_ST_TCPCS) { 2667 /* 2668 * Note: we don't know if this was TCP or UDP, 2669 * so we just set both bits, and expect the 2670 * upper layers to deal. 2671 */ 2672 WM_EVCNT_INCR(&sc->sc_ev_rxtusum); 2673 m->m_pkthdr.csum_flags |= 2674 M_CSUM_TCPv4 | M_CSUM_UDPv4 | 2675 M_CSUM_TCPv6 | M_CSUM_UDPv6; 2676 if (errors & WRX_ER_TCPE) 2677 m->m_pkthdr.csum_flags |= 2678 M_CSUM_TCP_UDP_BAD; 2679 } 2680 } 2681 2682 ifp->if_ipackets++; 2683 2684 #if NBPFILTER > 0 2685 /* Pass this up to any BPF listeners. */ 2686 if (ifp->if_bpf) 2687 bpf_mtap(ifp->if_bpf, m); 2688 #endif /* NBPFILTER > 0 */ 2689 2690 /* Pass it on. */ 2691 (*ifp->if_input)(ifp, m); 2692 } 2693 2694 /* Update the receive pointer. */ 2695 sc->sc_rxptr = i; 2696 2697 DPRINTF(WM_DEBUG_RX, 2698 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i)); 2699 } 2700 2701 /* 2702 * wm_linkintr: 2703 * 2704 * Helper; handle link interrupts. 2705 */ 2706 static void 2707 wm_linkintr(struct wm_softc *sc, uint32_t icr) 2708 { 2709 uint32_t status; 2710 2711 /* 2712 * If we get a link status interrupt on a 1000BASE-T 2713 * device, just fall into the normal MII tick path. 2714 */ 2715 if (sc->sc_flags & WM_F_HAS_MII) { 2716 if (icr & ICR_LSC) { 2717 DPRINTF(WM_DEBUG_LINK, 2718 ("%s: LINK: LSC -> mii_tick\n", 2719 sc->sc_dev.dv_xname)); 2720 mii_tick(&sc->sc_mii); 2721 } else if (icr & ICR_RXSEQ) { 2722 DPRINTF(WM_DEBUG_LINK, 2723 ("%s: LINK Receive sequence error\n", 2724 sc->sc_dev.dv_xname)); 2725 } 2726 return; 2727 } 2728 2729 /* 2730 * If we are now receiving /C/, check for link again in 2731 * a couple of link clock ticks. 2732 */ 2733 if (icr & ICR_RXCFG) { 2734 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n", 2735 sc->sc_dev.dv_xname)); 2736 sc->sc_tbi_anstate = 2; 2737 } 2738 2739 if (icr & ICR_LSC) { 2740 status = CSR_READ(sc, WMREG_STATUS); 2741 if (status & STATUS_LU) { 2742 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n", 2743 sc->sc_dev.dv_xname, 2744 (status & STATUS_FD) ? "FDX" : "HDX")); 2745 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 2746 sc->sc_fcrtl &= ~FCRTL_XONE; 2747 if (status & STATUS_FD) 2748 sc->sc_tctl |= 2749 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 2750 else 2751 sc->sc_tctl |= 2752 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 2753 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE) 2754 sc->sc_fcrtl |= FCRTL_XONE; 2755 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 2756 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 2757 WMREG_OLD_FCRTL : WMREG_FCRTL, 2758 sc->sc_fcrtl); 2759 sc->sc_tbi_linkup = 1; 2760 } else { 2761 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", 2762 sc->sc_dev.dv_xname)); 2763 sc->sc_tbi_linkup = 0; 2764 } 2765 sc->sc_tbi_anstate = 2; 2766 wm_tbi_set_linkled(sc); 2767 } else if (icr & ICR_RXSEQ) { 2768 DPRINTF(WM_DEBUG_LINK, 2769 ("%s: LINK: Receive sequence error\n", 2770 sc->sc_dev.dv_xname)); 2771 } 2772 } 2773 2774 /* 2775 * wm_tick: 2776 * 2777 * One second timer, used to check link status, sweep up 2778 * completed transmit jobs, etc. 2779 */ 2780 static void 2781 wm_tick(void *arg) 2782 { 2783 struct wm_softc *sc = arg; 2784 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2785 int s; 2786 2787 s = splnet(); 2788 2789 if (sc->sc_type >= WM_T_82542_2_1) { 2790 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC)); 2791 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC)); 2792 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC)); 2793 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC)); 2794 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC)); 2795 } 2796 2797 ifp->if_collisions += CSR_READ(sc, WMREG_COLC); 2798 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC); 2799 2800 2801 if (sc->sc_flags & WM_F_HAS_MII) 2802 mii_tick(&sc->sc_mii); 2803 else 2804 wm_tbi_check_link(sc); 2805 2806 splx(s); 2807 2808 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); 2809 } 2810 2811 /* 2812 * wm_reset: 2813 * 2814 * Reset the i82542 chip. 2815 */ 2816 static void 2817 wm_reset(struct wm_softc *sc) 2818 { 2819 uint32_t reg; 2820 2821 /* 2822 * Allocate on-chip memory according to the MTU size. 2823 * The Packet Buffer Allocation register must be written 2824 * before the chip is reset. 2825 */ 2826 switch (sc->sc_type) { 2827 case WM_T_82547: 2828 case WM_T_82547_2: 2829 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? 2830 PBA_22K : PBA_30K; 2831 sc->sc_txfifo_head = 0; 2832 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT; 2833 sc->sc_txfifo_size = 2834 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT; 2835 sc->sc_txfifo_stall = 0; 2836 break; 2837 case WM_T_82571: 2838 case WM_T_82572: 2839 case WM_T_80003: 2840 sc->sc_pba = PBA_32K; 2841 break; 2842 case WM_T_82573: 2843 sc->sc_pba = PBA_12K; 2844 break; 2845 case WM_T_ICH8: 2846 sc->sc_pba = PBA_8K; 2847 CSR_WRITE(sc, WMREG_PBS, PBA_16K); 2848 break; 2849 case WM_T_ICH9: 2850 sc->sc_pba = PBA_10K; 2851 break; 2852 default: 2853 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? 2854 PBA_40K : PBA_48K; 2855 break; 2856 } 2857 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba); 2858 2859 if (sc->sc_flags & WM_F_PCIE) { 2860 int timeout = 800; 2861 2862 sc->sc_ctrl |= CTRL_GIO_M_DIS; 2863 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 2864 2865 while (timeout) { 2866 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0) 2867 break; 2868 delay(100); 2869 } 2870 } 2871 2872 /* clear interrupt */ 2873 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 2874 2875 /* 2876 * 82541 Errata 29? & 82547 Errata 28? 2877 * See also the description about PHY_RST bit in CTRL register 2878 * in 8254x_GBe_SDM.pdf. 2879 */ 2880 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) { 2881 CSR_WRITE(sc, WMREG_CTRL, 2882 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET); 2883 delay(5000); 2884 } 2885 2886 switch (sc->sc_type) { 2887 case WM_T_82544: 2888 case WM_T_82540: 2889 case WM_T_82545: 2890 case WM_T_82546: 2891 case WM_T_82541: 2892 case WM_T_82541_2: 2893 /* 2894 * On some chipsets, a reset through a memory-mapped write 2895 * cycle can cause the chip to reset before completing the 2896 * write cycle. This causes major headache that can be 2897 * avoided by issuing the reset via indirect register writes 2898 * through I/O space. 2899 * 2900 * So, if we successfully mapped the I/O BAR at attach time, 2901 * use that. Otherwise, try our luck with a memory-mapped 2902 * reset. 2903 */ 2904 if (sc->sc_flags & WM_F_IOH_VALID) 2905 wm_io_write(sc, WMREG_CTRL, CTRL_RST); 2906 else 2907 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST); 2908 break; 2909 2910 case WM_T_82545_3: 2911 case WM_T_82546_3: 2912 /* Use the shadow control register on these chips. */ 2913 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST); 2914 break; 2915 2916 case WM_T_ICH8: 2917 case WM_T_ICH9: 2918 wm_get_swfwhw_semaphore(sc); 2919 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST | CTRL_PHY_RESET); 2920 delay(10000); 2921 2922 default: 2923 /* Everything else can safely use the documented method. */ 2924 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST); 2925 break; 2926 } 2927 delay(10000); 2928 2929 /* reload EEPROM */ 2930 switch(sc->sc_type) { 2931 case WM_T_82542_2_0: 2932 case WM_T_82542_2_1: 2933 case WM_T_82543: 2934 case WM_T_82544: 2935 delay(10); 2936 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; 2937 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 2938 delay(2000); 2939 break; 2940 case WM_T_82541: 2941 case WM_T_82541_2: 2942 case WM_T_82547: 2943 case WM_T_82547_2: 2944 delay(20000); 2945 break; 2946 case WM_T_82573: 2947 if (sc->sc_flags & WM_F_EEPROM_FLASH) { 2948 delay(10); 2949 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; 2950 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 2951 } 2952 /* FALLTHROUGH */ 2953 default: 2954 /* check EECD_EE_AUTORD */ 2955 wm_get_auto_rd_done(sc); 2956 } 2957 2958 #if 0 2959 for (i = 0; i < 1000; i++) { 2960 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) { 2961 return; 2962 } 2963 delay(20); 2964 } 2965 2966 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST) 2967 log(LOG_ERR, "%s: reset failed to complete\n", 2968 sc->sc_dev.dv_xname); 2969 #endif 2970 } 2971 2972 /* 2973 * wm_init: [ifnet interface function] 2974 * 2975 * Initialize the interface. Must be called at splnet(). 2976 */ 2977 static int 2978 wm_init(struct ifnet *ifp) 2979 { 2980 struct wm_softc *sc = ifp->if_softc; 2981 struct wm_rxsoft *rxs; 2982 int i, error = 0; 2983 uint32_t reg; 2984 2985 /* 2986 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set. 2987 * There is a small but measurable benefit to avoiding the adjusment 2988 * of the descriptor so that the headers are aligned, for normal mtu, 2989 * on such platforms. One possibility is that the DMA itself is 2990 * slightly more efficient if the front of the entire packet (instead 2991 * of the front of the headers) is aligned. 2992 * 2993 * Note we must always set align_tweak to 0 if we are using 2994 * jumbo frames. 2995 */ 2996 #ifdef __NO_STRICT_ALIGNMENT 2997 sc->sc_align_tweak = 0; 2998 #else 2999 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2)) 3000 sc->sc_align_tweak = 0; 3001 else 3002 sc->sc_align_tweak = 2; 3003 #endif /* __NO_STRICT_ALIGNMENT */ 3004 3005 /* Cancel any pending I/O. */ 3006 wm_stop(ifp, 0); 3007 3008 /* update statistics before reset */ 3009 ifp->if_collisions += CSR_READ(sc, WMREG_COLC); 3010 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC); 3011 3012 /* Reset the chip to a known state. */ 3013 wm_reset(sc); 3014 3015 /* Initialize the transmit descriptor ring. */ 3016 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc)); 3017 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc), 3018 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3019 sc->sc_txfree = WM_NTXDESC(sc); 3020 sc->sc_txnext = 0; 3021 3022 if (sc->sc_type < WM_T_82543) { 3023 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0)); 3024 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0)); 3025 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc)); 3026 CSR_WRITE(sc, WMREG_OLD_TDH, 0); 3027 CSR_WRITE(sc, WMREG_OLD_TDT, 0); 3028 CSR_WRITE(sc, WMREG_OLD_TIDV, 128); 3029 } else { 3030 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0)); 3031 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0)); 3032 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc)); 3033 CSR_WRITE(sc, WMREG_TDH, 0); 3034 CSR_WRITE(sc, WMREG_TDT, 0); 3035 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */ 3036 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */ 3037 3038 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) | 3039 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0)); 3040 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) | 3041 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1)); 3042 } 3043 CSR_WRITE(sc, WMREG_TQSA_LO, 0); 3044 CSR_WRITE(sc, WMREG_TQSA_HI, 0); 3045 3046 /* Initialize the transmit job descriptors. */ 3047 for (i = 0; i < WM_TXQUEUELEN(sc); i++) 3048 sc->sc_txsoft[i].txs_mbuf = NULL; 3049 sc->sc_txsfree = WM_TXQUEUELEN(sc); 3050 sc->sc_txsnext = 0; 3051 sc->sc_txsdirty = 0; 3052 3053 /* 3054 * Initialize the receive descriptor and receive job 3055 * descriptor rings. 3056 */ 3057 if (sc->sc_type < WM_T_82543) { 3058 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0)); 3059 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0)); 3060 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs)); 3061 CSR_WRITE(sc, WMREG_OLD_RDH0, 0); 3062 CSR_WRITE(sc, WMREG_OLD_RDT0, 0); 3063 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD); 3064 3065 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0); 3066 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0); 3067 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0); 3068 CSR_WRITE(sc, WMREG_OLD_RDH1, 0); 3069 CSR_WRITE(sc, WMREG_OLD_RDT1, 0); 3070 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0); 3071 } else { 3072 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0)); 3073 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0)); 3074 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs)); 3075 CSR_WRITE(sc, WMREG_RDH, 0); 3076 CSR_WRITE(sc, WMREG_RDT, 0); 3077 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */ 3078 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */ 3079 } 3080 for (i = 0; i < WM_NRXDESC; i++) { 3081 rxs = &sc->sc_rxsoft[i]; 3082 if (rxs->rxs_mbuf == NULL) { 3083 if ((error = wm_add_rxbuf(sc, i)) != 0) { 3084 log(LOG_ERR, "%s: unable to allocate or map rx " 3085 "buffer %d, error = %d\n", 3086 sc->sc_dev.dv_xname, i, error); 3087 /* 3088 * XXX Should attempt to run with fewer receive 3089 * XXX buffers instead of just failing. 3090 */ 3091 wm_rxdrain(sc); 3092 goto out; 3093 } 3094 } else 3095 WM_INIT_RXDESC(sc, i); 3096 } 3097 sc->sc_rxptr = 0; 3098 sc->sc_rxdiscard = 0; 3099 WM_RXCHAIN_RESET(sc); 3100 3101 /* 3102 * Clear out the VLAN table -- we don't use it (yet). 3103 */ 3104 CSR_WRITE(sc, WMREG_VET, 0); 3105 for (i = 0; i < WM_VLAN_TABSIZE; i++) 3106 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0); 3107 3108 /* 3109 * Set up flow-control parameters. 3110 * 3111 * XXX Values could probably stand some tuning. 3112 */ 3113 if (sc->sc_type != WM_T_ICH8) { 3114 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST); 3115 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST); 3116 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL); 3117 } 3118 3119 sc->sc_fcrtl = FCRTL_DFLT; 3120 if (sc->sc_type < WM_T_82543) { 3121 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT); 3122 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl); 3123 } else { 3124 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT); 3125 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl); 3126 } 3127 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT); 3128 3129 #if 0 /* XXXJRT */ 3130 /* Deal with VLAN enables. */ 3131 if (VLAN_ATTACHED(&sc->sc_ethercom)) 3132 sc->sc_ctrl |= CTRL_VME; 3133 else 3134 #endif /* XXXJRT */ 3135 sc->sc_ctrl &= ~CTRL_VME; 3136 3137 /* Write the control registers. */ 3138 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3139 if (sc->sc_type >= WM_T_80003 && (sc->sc_flags & WM_F_HAS_MII)) { 3140 int val; 3141 val = CSR_READ(sc, WMREG_CTRL_EXT); 3142 val &= ~CTRL_EXT_LINK_MODE_MASK; 3143 CSR_WRITE(sc, WMREG_CTRL_EXT, val); 3144 3145 /* Bypass RX and TX FIFO's */ 3146 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL, 3147 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS | 3148 KUMCTRLSTA_FIFO_CTRL_TX_BYPASS); 3149 3150 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL, 3151 KUMCTRLSTA_INB_CTRL_DIS_PADDING | 3152 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT); 3153 /* 3154 * Set the mac to wait the maximum time between each 3155 * iteration and increase the max iterations when 3156 * polling the phy; this fixes erroneous timeouts at 10Mbps. 3157 */ 3158 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, 0xFFFF); 3159 val = wm_kmrn_i80003_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM); 3160 val |= 0x3F; 3161 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM, val); 3162 } 3163 #if 0 3164 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 3165 #endif 3166 3167 /* 3168 * Set up checksum offload parameters. 3169 */ 3170 reg = CSR_READ(sc, WMREG_RXCSUM); 3171 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL); 3172 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) 3173 reg |= RXCSUM_IPOFL; 3174 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) 3175 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL; 3176 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx)) 3177 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL; 3178 CSR_WRITE(sc, WMREG_RXCSUM, reg); 3179 3180 /* 3181 * Set up the interrupt registers. 3182 */ 3183 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 3184 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 | 3185 ICR_RXO | ICR_RXT0; 3186 if ((sc->sc_flags & WM_F_HAS_MII) == 0) 3187 sc->sc_icr |= ICR_RXCFG; 3188 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr); 3189 3190 /* Set up the inter-packet gap. */ 3191 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); 3192 3193 if (sc->sc_type >= WM_T_82543) { 3194 /* 3195 * Set up the interrupt throttling register (units of 256ns) 3196 * Note that a footnote in Intel's documentation says this 3197 * ticker runs at 1/4 the rate when the chip is in 100Mbit 3198 * or 10Mbit mode. Empirically, it appears to be the case 3199 * that that is also true for the 1024ns units of the other 3200 * interrupt-related timer registers -- so, really, we ought 3201 * to divide this value by 4 when the link speed is low. 3202 * 3203 * XXX implement this division at link speed change! 3204 */ 3205 sc->sc_itr = 1000000000 / (1500 * 256); /* 2604 ints/sec */ 3206 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr); 3207 } 3208 3209 #if 0 /* XXXJRT */ 3210 /* Set the VLAN ethernetype. */ 3211 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN); 3212 #endif 3213 3214 /* 3215 * Set up the transmit control register; we start out with 3216 * a collision distance suitable for FDX, but update it whe 3217 * we resolve the media type. 3218 */ 3219 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) | 3220 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 3221 if (sc->sc_type >= WM_T_82571) 3222 sc->sc_tctl |= TCTL_MULR; 3223 if (sc->sc_type >= WM_T_80003) 3224 sc->sc_tctl |= TCTL_RTLC; 3225 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 3226 3227 /* Set the media. */ 3228 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp); 3229 3230 /* 3231 * Set up the receive control register; we actually program 3232 * the register when we set the receive filter. Use multicast 3233 * address offset type 0. 3234 * 3235 * Only the i82544 has the ability to strip the incoming 3236 * CRC, so we don't enable that feature. 3237 */ 3238 sc->sc_mchash_type = 0; 3239 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF 3240 | RCTL_MO(sc->sc_mchash_type); 3241 3242 /* 82573 doesn't support jumbo frame */ 3243 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_ICH8) 3244 sc->sc_rctl |= RCTL_LPE; 3245 3246 if (MCLBYTES == 2048) { 3247 sc->sc_rctl |= RCTL_2k; 3248 } else { 3249 if (sc->sc_type >= WM_T_82543) { 3250 switch(MCLBYTES) { 3251 case 4096: 3252 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k; 3253 break; 3254 case 8192: 3255 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k; 3256 break; 3257 case 16384: 3258 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k; 3259 break; 3260 default: 3261 panic("wm_init: MCLBYTES %d unsupported", 3262 MCLBYTES); 3263 break; 3264 } 3265 } else panic("wm_init: i82542 requires MCLBYTES = 2048"); 3266 } 3267 3268 /* Set the receive filter. */ 3269 wm_set_filter(sc); 3270 3271 /* Start the one second link check clock. */ 3272 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); 3273 3274 /* ...all done! */ 3275 ifp->if_flags |= IFF_RUNNING; 3276 ifp->if_flags &= ~IFF_OACTIVE; 3277 3278 out: 3279 if (error) 3280 log(LOG_ERR, "%s: interface not running\n", 3281 sc->sc_dev.dv_xname); 3282 return (error); 3283 } 3284 3285 /* 3286 * wm_rxdrain: 3287 * 3288 * Drain the receive queue. 3289 */ 3290 static void 3291 wm_rxdrain(struct wm_softc *sc) 3292 { 3293 struct wm_rxsoft *rxs; 3294 int i; 3295 3296 for (i = 0; i < WM_NRXDESC; i++) { 3297 rxs = &sc->sc_rxsoft[i]; 3298 if (rxs->rxs_mbuf != NULL) { 3299 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 3300 m_freem(rxs->rxs_mbuf); 3301 rxs->rxs_mbuf = NULL; 3302 } 3303 } 3304 } 3305 3306 /* 3307 * wm_stop: [ifnet interface function] 3308 * 3309 * Stop transmission on the interface. 3310 */ 3311 static void 3312 wm_stop(struct ifnet *ifp, int disable) 3313 { 3314 struct wm_softc *sc = ifp->if_softc; 3315 struct wm_txsoft *txs; 3316 int i; 3317 3318 /* Stop the one second clock. */ 3319 callout_stop(&sc->sc_tick_ch); 3320 3321 /* Stop the 82547 Tx FIFO stall check timer. */ 3322 if (sc->sc_type == WM_T_82547) 3323 callout_stop(&sc->sc_txfifo_ch); 3324 3325 if (sc->sc_flags & WM_F_HAS_MII) { 3326 /* Down the MII. */ 3327 mii_down(&sc->sc_mii); 3328 } 3329 3330 /* Stop the transmit and receive processes. */ 3331 CSR_WRITE(sc, WMREG_TCTL, 0); 3332 CSR_WRITE(sc, WMREG_RCTL, 0); 3333 3334 /* 3335 * Clear the interrupt mask to ensure the device cannot assert its 3336 * interrupt line. 3337 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service 3338 * any currently pending or shared interrupt. 3339 */ 3340 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 3341 sc->sc_icr = 0; 3342 3343 /* Release any queued transmit buffers. */ 3344 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 3345 txs = &sc->sc_txsoft[i]; 3346 if (txs->txs_mbuf != NULL) { 3347 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 3348 m_freem(txs->txs_mbuf); 3349 txs->txs_mbuf = NULL; 3350 } 3351 } 3352 3353 if (disable) 3354 wm_rxdrain(sc); 3355 3356 /* Mark the interface as down and cancel the watchdog timer. */ 3357 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3358 ifp->if_timer = 0; 3359 } 3360 3361 void 3362 wm_get_auto_rd_done(struct wm_softc *sc) 3363 { 3364 int i; 3365 3366 /* wait for eeprom to reload */ 3367 switch (sc->sc_type) { 3368 case WM_T_82571: 3369 case WM_T_82572: 3370 case WM_T_82573: 3371 case WM_T_80003: 3372 case WM_T_ICH8: 3373 case WM_T_ICH9: 3374 for (i = 10; i > 0; i--) { 3375 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD) 3376 break; 3377 delay(1000); 3378 } 3379 if (i == 0) { 3380 log(LOG_ERR, "%s: auto read from eeprom failed to " 3381 "complete\n", sc->sc_dev.dv_xname); 3382 } 3383 break; 3384 default: 3385 delay(5000); 3386 break; 3387 } 3388 3389 /* Phy configuration starts after EECD_AUTO_RD is set */ 3390 if (sc->sc_type == WM_T_82573) 3391 delay(25000); 3392 } 3393 3394 /* 3395 * wm_acquire_eeprom: 3396 * 3397 * Perform the EEPROM handshake required on some chips. 3398 */ 3399 static int 3400 wm_acquire_eeprom(struct wm_softc *sc) 3401 { 3402 uint32_t reg; 3403 int x; 3404 int ret = 0; 3405 3406 /* always success */ 3407 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0) 3408 return 0; 3409 3410 if (sc->sc_flags & WM_F_SWFWHW_SYNC) { 3411 ret = wm_get_swfwhw_semaphore(sc); 3412 } else if (sc->sc_flags & WM_F_SWFW_SYNC) { 3413 /* this will also do wm_get_swsm_semaphore() if needed */ 3414 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM); 3415 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) { 3416 ret = wm_get_swsm_semaphore(sc); 3417 } 3418 3419 if (ret) 3420 return 1; 3421 3422 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) { 3423 reg = CSR_READ(sc, WMREG_EECD); 3424 3425 /* Request EEPROM access. */ 3426 reg |= EECD_EE_REQ; 3427 CSR_WRITE(sc, WMREG_EECD, reg); 3428 3429 /* ..and wait for it to be granted. */ 3430 for (x = 0; x < 1000; x++) { 3431 reg = CSR_READ(sc, WMREG_EECD); 3432 if (reg & EECD_EE_GNT) 3433 break; 3434 delay(5); 3435 } 3436 if ((reg & EECD_EE_GNT) == 0) { 3437 aprint_error("%s: could not acquire EEPROM GNT\n", 3438 sc->sc_dev.dv_xname); 3439 reg &= ~EECD_EE_REQ; 3440 CSR_WRITE(sc, WMREG_EECD, reg); 3441 if (sc->sc_flags & WM_F_SWFWHW_SYNC) 3442 wm_put_swfwhw_semaphore(sc); 3443 if (sc->sc_flags & WM_F_SWFW_SYNC) 3444 wm_put_swfw_semaphore(sc, SWFW_EEP_SM); 3445 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 3446 wm_put_swsm_semaphore(sc); 3447 return (1); 3448 } 3449 } 3450 3451 return (0); 3452 } 3453 3454 /* 3455 * wm_release_eeprom: 3456 * 3457 * Release the EEPROM mutex. 3458 */ 3459 static void 3460 wm_release_eeprom(struct wm_softc *sc) 3461 { 3462 uint32_t reg; 3463 3464 /* always success */ 3465 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0) 3466 return; 3467 3468 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) { 3469 reg = CSR_READ(sc, WMREG_EECD); 3470 reg &= ~EECD_EE_REQ; 3471 CSR_WRITE(sc, WMREG_EECD, reg); 3472 } 3473 3474 if (sc->sc_flags & WM_F_SWFWHW_SYNC) 3475 wm_put_swfwhw_semaphore(sc); 3476 if (sc->sc_flags & WM_F_SWFW_SYNC) 3477 wm_put_swfw_semaphore(sc, SWFW_EEP_SM); 3478 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 3479 wm_put_swsm_semaphore(sc); 3480 } 3481 3482 /* 3483 * wm_eeprom_sendbits: 3484 * 3485 * Send a series of bits to the EEPROM. 3486 */ 3487 static void 3488 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits) 3489 { 3490 uint32_t reg; 3491 int x; 3492 3493 reg = CSR_READ(sc, WMREG_EECD); 3494 3495 for (x = nbits; x > 0; x--) { 3496 if (bits & (1U << (x - 1))) 3497 reg |= EECD_DI; 3498 else 3499 reg &= ~EECD_DI; 3500 CSR_WRITE(sc, WMREG_EECD, reg); 3501 delay(2); 3502 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK); 3503 delay(2); 3504 CSR_WRITE(sc, WMREG_EECD, reg); 3505 delay(2); 3506 } 3507 } 3508 3509 /* 3510 * wm_eeprom_recvbits: 3511 * 3512 * Receive a series of bits from the EEPROM. 3513 */ 3514 static void 3515 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits) 3516 { 3517 uint32_t reg, val; 3518 int x; 3519 3520 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI; 3521 3522 val = 0; 3523 for (x = nbits; x > 0; x--) { 3524 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK); 3525 delay(2); 3526 if (CSR_READ(sc, WMREG_EECD) & EECD_DO) 3527 val |= (1U << (x - 1)); 3528 CSR_WRITE(sc, WMREG_EECD, reg); 3529 delay(2); 3530 } 3531 *valp = val; 3532 } 3533 3534 /* 3535 * wm_read_eeprom_uwire: 3536 * 3537 * Read a word from the EEPROM using the MicroWire protocol. 3538 */ 3539 static int 3540 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 3541 { 3542 uint32_t reg, val; 3543 int i; 3544 3545 for (i = 0; i < wordcnt; i++) { 3546 /* Clear SK and DI. */ 3547 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI); 3548 CSR_WRITE(sc, WMREG_EECD, reg); 3549 3550 /* Set CHIP SELECT. */ 3551 reg |= EECD_CS; 3552 CSR_WRITE(sc, WMREG_EECD, reg); 3553 delay(2); 3554 3555 /* Shift in the READ command. */ 3556 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3); 3557 3558 /* Shift in address. */ 3559 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits); 3560 3561 /* Shift out the data. */ 3562 wm_eeprom_recvbits(sc, &val, 16); 3563 data[i] = val & 0xffff; 3564 3565 /* Clear CHIP SELECT. */ 3566 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS; 3567 CSR_WRITE(sc, WMREG_EECD, reg); 3568 delay(2); 3569 } 3570 3571 return (0); 3572 } 3573 3574 /* 3575 * wm_spi_eeprom_ready: 3576 * 3577 * Wait for a SPI EEPROM to be ready for commands. 3578 */ 3579 static int 3580 wm_spi_eeprom_ready(struct wm_softc *sc) 3581 { 3582 uint32_t val; 3583 int usec; 3584 3585 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) { 3586 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8); 3587 wm_eeprom_recvbits(sc, &val, 8); 3588 if ((val & SPI_SR_RDY) == 0) 3589 break; 3590 } 3591 if (usec >= SPI_MAX_RETRIES) { 3592 aprint_error("%s: EEPROM failed to become ready\n", 3593 sc->sc_dev.dv_xname); 3594 return (1); 3595 } 3596 return (0); 3597 } 3598 3599 /* 3600 * wm_read_eeprom_spi: 3601 * 3602 * Read a work from the EEPROM using the SPI protocol. 3603 */ 3604 static int 3605 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 3606 { 3607 uint32_t reg, val; 3608 int i; 3609 uint8_t opc; 3610 3611 /* Clear SK and CS. */ 3612 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS); 3613 CSR_WRITE(sc, WMREG_EECD, reg); 3614 delay(2); 3615 3616 if (wm_spi_eeprom_ready(sc)) 3617 return (1); 3618 3619 /* Toggle CS to flush commands. */ 3620 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS); 3621 delay(2); 3622 CSR_WRITE(sc, WMREG_EECD, reg); 3623 delay(2); 3624 3625 opc = SPI_OPC_READ; 3626 if (sc->sc_ee_addrbits == 8 && word >= 128) 3627 opc |= SPI_OPC_A8; 3628 3629 wm_eeprom_sendbits(sc, opc, 8); 3630 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits); 3631 3632 for (i = 0; i < wordcnt; i++) { 3633 wm_eeprom_recvbits(sc, &val, 16); 3634 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8); 3635 } 3636 3637 /* Raise CS and clear SK. */ 3638 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS; 3639 CSR_WRITE(sc, WMREG_EECD, reg); 3640 delay(2); 3641 3642 return (0); 3643 } 3644 3645 #define EEPROM_CHECKSUM 0xBABA 3646 #define EEPROM_SIZE 0x0040 3647 3648 /* 3649 * wm_validate_eeprom_checksum 3650 * 3651 * The checksum is defined as the sum of the first 64 (16 bit) words. 3652 */ 3653 static int 3654 wm_validate_eeprom_checksum(struct wm_softc *sc) 3655 { 3656 uint16_t checksum; 3657 uint16_t eeprom_data; 3658 int i; 3659 3660 checksum = 0; 3661 3662 for (i = 0; i < EEPROM_SIZE; i++) { 3663 if (wm_read_eeprom(sc, i, 1, &eeprom_data)) 3664 return 1; 3665 checksum += eeprom_data; 3666 } 3667 3668 if (checksum != (uint16_t) EEPROM_CHECKSUM) 3669 return 1; 3670 3671 return 0; 3672 } 3673 3674 /* 3675 * wm_read_eeprom: 3676 * 3677 * Read data from the serial EEPROM. 3678 */ 3679 static int 3680 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 3681 { 3682 int rv; 3683 3684 if (sc->sc_flags & WM_F_EEPROM_INVALID) 3685 return 1; 3686 3687 if (wm_acquire_eeprom(sc)) 3688 return 1; 3689 3690 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) 3691 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data); 3692 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR) 3693 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data); 3694 else if (sc->sc_flags & WM_F_EEPROM_SPI) 3695 rv = wm_read_eeprom_spi(sc, word, wordcnt, data); 3696 else 3697 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data); 3698 3699 wm_release_eeprom(sc); 3700 return rv; 3701 } 3702 3703 static int 3704 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt, 3705 uint16_t *data) 3706 { 3707 int i, eerd = 0; 3708 int error = 0; 3709 3710 for (i = 0; i < wordcnt; i++) { 3711 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START; 3712 3713 CSR_WRITE(sc, WMREG_EERD, eerd); 3714 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD); 3715 if (error != 0) 3716 break; 3717 3718 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT); 3719 } 3720 3721 return error; 3722 } 3723 3724 static int 3725 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw) 3726 { 3727 uint32_t attempts = 100000; 3728 uint32_t i, reg = 0; 3729 int32_t done = -1; 3730 3731 for (i = 0; i < attempts; i++) { 3732 reg = CSR_READ(sc, rw); 3733 3734 if (reg & EERD_DONE) { 3735 done = 0; 3736 break; 3737 } 3738 delay(5); 3739 } 3740 3741 return done; 3742 } 3743 3744 /* 3745 * wm_add_rxbuf: 3746 * 3747 * Add a receive buffer to the indiciated descriptor. 3748 */ 3749 static int 3750 wm_add_rxbuf(struct wm_softc *sc, int idx) 3751 { 3752 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx]; 3753 struct mbuf *m; 3754 int error; 3755 3756 MGETHDR(m, M_DONTWAIT, MT_DATA); 3757 if (m == NULL) 3758 return (ENOBUFS); 3759 3760 MCLGET(m, M_DONTWAIT); 3761 if ((m->m_flags & M_EXT) == 0) { 3762 m_freem(m); 3763 return (ENOBUFS); 3764 } 3765 3766 if (rxs->rxs_mbuf != NULL) 3767 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 3768 3769 rxs->rxs_mbuf = m; 3770 3771 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 3772 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m, 3773 BUS_DMA_READ|BUS_DMA_NOWAIT); 3774 if (error) { 3775 /* XXX XXX XXX */ 3776 printf("%s: unable to load rx DMA map %d, error = %d\n", 3777 sc->sc_dev.dv_xname, idx, error); 3778 panic("wm_add_rxbuf"); 3779 } 3780 3781 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 3782 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 3783 3784 WM_INIT_RXDESC(sc, idx); 3785 3786 return (0); 3787 } 3788 3789 /* 3790 * wm_set_ral: 3791 * 3792 * Set an entery in the receive address list. 3793 */ 3794 static void 3795 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx) 3796 { 3797 uint32_t ral_lo, ral_hi; 3798 3799 if (enaddr != NULL) { 3800 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) | 3801 (enaddr[3] << 24); 3802 ral_hi = enaddr[4] | (enaddr[5] << 8); 3803 ral_hi |= RAL_AV; 3804 } else { 3805 ral_lo = 0; 3806 ral_hi = 0; 3807 } 3808 3809 if (sc->sc_type >= WM_T_82544) { 3810 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx), 3811 ral_lo); 3812 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx), 3813 ral_hi); 3814 } else { 3815 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo); 3816 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi); 3817 } 3818 } 3819 3820 /* 3821 * wm_mchash: 3822 * 3823 * Compute the hash of the multicast address for the 4096-bit 3824 * multicast filter. 3825 */ 3826 static uint32_t 3827 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr) 3828 { 3829 static const int lo_shift[4] = { 4, 3, 2, 0 }; 3830 static const int hi_shift[4] = { 4, 5, 6, 8 }; 3831 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 }; 3832 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 }; 3833 uint32_t hash; 3834 3835 if (sc->sc_type == WM_T_ICH8) { 3836 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) | 3837 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]); 3838 return (hash & 0x3ff); 3839 } 3840 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) | 3841 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]); 3842 3843 return (hash & 0xfff); 3844 } 3845 3846 /* 3847 * wm_set_filter: 3848 * 3849 * Set up the receive filter. 3850 */ 3851 static void 3852 wm_set_filter(struct wm_softc *sc) 3853 { 3854 struct ethercom *ec = &sc->sc_ethercom; 3855 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3856 struct ether_multi *enm; 3857 struct ether_multistep step; 3858 bus_addr_t mta_reg; 3859 uint32_t hash, reg, bit; 3860 int i, size; 3861 3862 if (sc->sc_type >= WM_T_82544) 3863 mta_reg = WMREG_CORDOVA_MTA; 3864 else 3865 mta_reg = WMREG_MTA; 3866 3867 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE); 3868 3869 if (ifp->if_flags & IFF_BROADCAST) 3870 sc->sc_rctl |= RCTL_BAM; 3871 if (ifp->if_flags & IFF_PROMISC) { 3872 sc->sc_rctl |= RCTL_UPE; 3873 goto allmulti; 3874 } 3875 3876 /* 3877 * Set the station address in the first RAL slot, and 3878 * clear the remaining slots. 3879 */ 3880 if (sc->sc_type == WM_T_ICH8) 3881 size = WM_ICH8_RAL_TABSIZE; 3882 else 3883 size = WM_RAL_TABSIZE; 3884 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0); 3885 for (i = 1; i < size; i++) 3886 wm_set_ral(sc, NULL, i); 3887 3888 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) 3889 size = WM_ICH8_MC_TABSIZE; 3890 else 3891 size = WM_MC_TABSIZE; 3892 /* Clear out the multicast table. */ 3893 for (i = 0; i < size; i++) 3894 CSR_WRITE(sc, mta_reg + (i << 2), 0); 3895 3896 ETHER_FIRST_MULTI(step, ec, enm); 3897 while (enm != NULL) { 3898 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 3899 /* 3900 * We must listen to a range of multicast addresses. 3901 * For now, just accept all multicasts, rather than 3902 * trying to set only those filter bits needed to match 3903 * the range. (At this time, the only use of address 3904 * ranges is for IP multicast routing, for which the 3905 * range is big enough to require all bits set.) 3906 */ 3907 goto allmulti; 3908 } 3909 3910 hash = wm_mchash(sc, enm->enm_addrlo); 3911 3912 reg = (hash >> 5); 3913 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) 3914 reg &= 0x1f; 3915 else 3916 reg &= 0x7f; 3917 bit = hash & 0x1f; 3918 3919 hash = CSR_READ(sc, mta_reg + (reg << 2)); 3920 hash |= 1U << bit; 3921 3922 /* XXX Hardware bug?? */ 3923 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) { 3924 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2)); 3925 CSR_WRITE(sc, mta_reg + (reg << 2), hash); 3926 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit); 3927 } else 3928 CSR_WRITE(sc, mta_reg + (reg << 2), hash); 3929 3930 ETHER_NEXT_MULTI(step, enm); 3931 } 3932 3933 ifp->if_flags &= ~IFF_ALLMULTI; 3934 goto setit; 3935 3936 allmulti: 3937 ifp->if_flags |= IFF_ALLMULTI; 3938 sc->sc_rctl |= RCTL_MPE; 3939 3940 setit: 3941 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl); 3942 } 3943 3944 /* 3945 * wm_tbi_mediainit: 3946 * 3947 * Initialize media for use on 1000BASE-X devices. 3948 */ 3949 static void 3950 wm_tbi_mediainit(struct wm_softc *sc) 3951 { 3952 const char *sep = ""; 3953 3954 if (sc->sc_type < WM_T_82543) 3955 sc->sc_tipg = TIPG_WM_DFLT; 3956 else 3957 sc->sc_tipg = TIPG_LG_DFLT; 3958 3959 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange, 3960 wm_tbi_mediastatus); 3961 3962 /* 3963 * SWD Pins: 3964 * 3965 * 0 = Link LED (output) 3966 * 1 = Loss Of Signal (input) 3967 */ 3968 sc->sc_ctrl |= CTRL_SWDPIO(0); 3969 sc->sc_ctrl &= ~CTRL_SWDPIO(1); 3970 3971 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3972 3973 #define ADD(ss, mm, dd) \ 3974 do { \ 3975 aprint_normal("%s%s", sep, ss); \ 3976 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \ 3977 sep = ", "; \ 3978 } while (/*CONSTCOND*/0) 3979 3980 aprint_normal("%s: ", sc->sc_dev.dv_xname); 3981 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD); 3982 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD); 3983 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD); 3984 aprint_normal("\n"); 3985 3986 #undef ADD 3987 3988 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 3989 } 3990 3991 /* 3992 * wm_tbi_mediastatus: [ifmedia interface function] 3993 * 3994 * Get the current interface media status on a 1000BASE-X device. 3995 */ 3996 static void 3997 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 3998 { 3999 struct wm_softc *sc = ifp->if_softc; 4000 uint32_t ctrl; 4001 4002 ifmr->ifm_status = IFM_AVALID; 4003 ifmr->ifm_active = IFM_ETHER; 4004 4005 if (sc->sc_tbi_linkup == 0) { 4006 ifmr->ifm_active |= IFM_NONE; 4007 return; 4008 } 4009 4010 ifmr->ifm_status |= IFM_ACTIVE; 4011 ifmr->ifm_active |= IFM_1000_SX; 4012 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD) 4013 ifmr->ifm_active |= IFM_FDX; 4014 ctrl = CSR_READ(sc, WMREG_CTRL); 4015 if (ctrl & CTRL_RFCE) 4016 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE; 4017 if (ctrl & CTRL_TFCE) 4018 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE; 4019 } 4020 4021 /* 4022 * wm_tbi_mediachange: [ifmedia interface function] 4023 * 4024 * Set hardware to newly-selected media on a 1000BASE-X device. 4025 */ 4026 static int 4027 wm_tbi_mediachange(struct ifnet *ifp) 4028 { 4029 struct wm_softc *sc = ifp->if_softc; 4030 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 4031 uint32_t status; 4032 int i; 4033 4034 sc->sc_txcw = ife->ifm_data; 4035 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x on entry\n", 4036 sc->sc_dev.dv_xname,sc->sc_txcw)); 4037 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO || 4038 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0) 4039 sc->sc_txcw |= ANAR_X_PAUSE_SYM | ANAR_X_PAUSE_ASYM; 4040 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 4041 sc->sc_txcw |= TXCW_ANE; 4042 } else { 4043 /*If autonegotiation is turned off, force link up and turn on full duplex*/ 4044 sc->sc_txcw &= ~TXCW_ANE; 4045 sc->sc_ctrl |= CTRL_SLU | CTRL_FD; 4046 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4047 delay(1000); 4048 } 4049 4050 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n", 4051 sc->sc_dev.dv_xname,sc->sc_txcw)); 4052 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); 4053 delay(10000); 4054 4055 /* NOTE: CTRL will update TFCE and RFCE automatically. */ 4056 4057 sc->sc_tbi_anstate = 0; 4058 4059 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1); 4060 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", sc->sc_dev.dv_xname,i)); 4061 4062 /* 4063 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the 4064 * optics detect a signal, 0 if they don't. 4065 */ 4066 if (((i != 0) && (sc->sc_type >= WM_T_82544)) || (i == 0)) { 4067 /* Have signal; wait for the link to come up. */ 4068 4069 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 4070 /* 4071 * Reset the link, and let autonegotiation do its thing 4072 */ 4073 sc->sc_ctrl |= CTRL_LRST; 4074 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4075 delay(1000); 4076 sc->sc_ctrl &= ~CTRL_LRST; 4077 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4078 delay(1000); 4079 } 4080 4081 for (i = 0; i < 50; i++) { 4082 delay(10000); 4083 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU) 4084 break; 4085 } 4086 4087 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n", 4088 sc->sc_dev.dv_xname,i)); 4089 4090 status = CSR_READ(sc, WMREG_STATUS); 4091 DPRINTF(WM_DEBUG_LINK, 4092 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n", 4093 sc->sc_dev.dv_xname,status, STATUS_LU)); 4094 if (status & STATUS_LU) { 4095 /* Link is up. */ 4096 DPRINTF(WM_DEBUG_LINK, 4097 ("%s: LINK: set media -> link up %s\n", 4098 sc->sc_dev.dv_xname, 4099 (status & STATUS_FD) ? "FDX" : "HDX")); 4100 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 4101 sc->sc_fcrtl &= ~FCRTL_XONE; 4102 if (status & STATUS_FD) 4103 sc->sc_tctl |= 4104 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 4105 else 4106 sc->sc_tctl |= 4107 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 4108 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE) 4109 sc->sc_fcrtl |= FCRTL_XONE; 4110 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 4111 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 4112 WMREG_OLD_FCRTL : WMREG_FCRTL, 4113 sc->sc_fcrtl); 4114 sc->sc_tbi_linkup = 1; 4115 } else { 4116 /* Link is down. */ 4117 DPRINTF(WM_DEBUG_LINK, 4118 ("%s: LINK: set media -> link down\n", 4119 sc->sc_dev.dv_xname)); 4120 sc->sc_tbi_linkup = 0; 4121 } 4122 } else { 4123 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n", 4124 sc->sc_dev.dv_xname)); 4125 sc->sc_tbi_linkup = 0; 4126 } 4127 4128 wm_tbi_set_linkled(sc); 4129 4130 return (0); 4131 } 4132 4133 /* 4134 * wm_tbi_set_linkled: 4135 * 4136 * Update the link LED on 1000BASE-X devices. 4137 */ 4138 static void 4139 wm_tbi_set_linkled(struct wm_softc *sc) 4140 { 4141 4142 if (sc->sc_tbi_linkup) 4143 sc->sc_ctrl |= CTRL_SWDPIN(0); 4144 else 4145 sc->sc_ctrl &= ~CTRL_SWDPIN(0); 4146 4147 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4148 } 4149 4150 /* 4151 * wm_tbi_check_link: 4152 * 4153 * Check the link on 1000BASE-X devices. 4154 */ 4155 static void 4156 wm_tbi_check_link(struct wm_softc *sc) 4157 { 4158 uint32_t rxcw, ctrl, status; 4159 4160 if (sc->sc_tbi_anstate == 0) 4161 return; 4162 else if (sc->sc_tbi_anstate > 1) { 4163 DPRINTF(WM_DEBUG_LINK, 4164 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname, 4165 sc->sc_tbi_anstate)); 4166 sc->sc_tbi_anstate--; 4167 return; 4168 } 4169 4170 sc->sc_tbi_anstate = 0; 4171 4172 rxcw = CSR_READ(sc, WMREG_RXCW); 4173 ctrl = CSR_READ(sc, WMREG_CTRL); 4174 status = CSR_READ(sc, WMREG_STATUS); 4175 4176 if ((status & STATUS_LU) == 0) { 4177 DPRINTF(WM_DEBUG_LINK, 4178 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname)); 4179 sc->sc_tbi_linkup = 0; 4180 } else { 4181 DPRINTF(WM_DEBUG_LINK, 4182 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname, 4183 (status & STATUS_FD) ? "FDX" : "HDX")); 4184 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 4185 sc->sc_fcrtl &= ~FCRTL_XONE; 4186 if (status & STATUS_FD) 4187 sc->sc_tctl |= 4188 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 4189 else 4190 sc->sc_tctl |= 4191 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 4192 if (ctrl & CTRL_TFCE) 4193 sc->sc_fcrtl |= FCRTL_XONE; 4194 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 4195 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 4196 WMREG_OLD_FCRTL : WMREG_FCRTL, 4197 sc->sc_fcrtl); 4198 sc->sc_tbi_linkup = 1; 4199 } 4200 4201 wm_tbi_set_linkled(sc); 4202 } 4203 4204 /* 4205 * wm_gmii_reset: 4206 * 4207 * Reset the PHY. 4208 */ 4209 static void 4210 wm_gmii_reset(struct wm_softc *sc) 4211 { 4212 uint32_t reg; 4213 int func = 0; /* XXX gcc */ 4214 4215 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) { 4216 if (wm_get_swfwhw_semaphore(sc)) 4217 return; 4218 } 4219 if (sc->sc_type == WM_T_80003) { 4220 func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1; 4221 if (wm_get_swfw_semaphore(sc, 4222 func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) 4223 return; 4224 } 4225 if (sc->sc_type >= WM_T_82544) { 4226 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 4227 delay(20000); 4228 4229 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4230 delay(20000); 4231 } else { 4232 /* 4233 * With 82543, we need to force speed and duplex on the MAC 4234 * equal to what the PHY speed and duplex configuration is. 4235 * In addition, we need to perform a hardware reset on the PHY 4236 * to take it out of reset. 4237 */ 4238 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; 4239 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4240 4241 /* The PHY reset pin is active-low. */ 4242 reg = CSR_READ(sc, WMREG_CTRL_EXT); 4243 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) | 4244 CTRL_EXT_SWDPIN(4)); 4245 reg |= CTRL_EXT_SWDPIO(4); 4246 4247 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4)); 4248 delay(10); 4249 4250 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 4251 delay(10000); 4252 4253 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4)); 4254 delay(10); 4255 #if 0 4256 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4); 4257 #endif 4258 } 4259 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) 4260 wm_put_swfwhw_semaphore(sc); 4261 if (sc->sc_type == WM_T_80003) 4262 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM); 4263 } 4264 4265 /* 4266 * wm_gmii_mediainit: 4267 * 4268 * Initialize media for use on 1000BASE-T devices. 4269 */ 4270 static void 4271 wm_gmii_mediainit(struct wm_softc *sc) 4272 { 4273 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 4274 4275 /* We have MII. */ 4276 sc->sc_flags |= WM_F_HAS_MII; 4277 4278 if (sc->sc_type >= WM_T_80003) 4279 sc->sc_tipg = TIPG_1000T_80003_DFLT; 4280 else 4281 sc->sc_tipg = TIPG_1000T_DFLT; 4282 4283 /* 4284 * Let the chip set speed/duplex on its own based on 4285 * signals from the PHY. 4286 * XXXbouyer - I'm not sure this is right for the 80003, 4287 * the em driver only sets CTRL_SLU here - but it seems to work. 4288 */ 4289 sc->sc_ctrl |= CTRL_SLU; 4290 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4291 4292 /* Initialize our media structures and probe the GMII. */ 4293 sc->sc_mii.mii_ifp = ifp; 4294 4295 if (sc->sc_type >= WM_T_80003) { 4296 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg; 4297 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg; 4298 } else if (sc->sc_type >= WM_T_82544) { 4299 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg; 4300 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg; 4301 } else { 4302 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg; 4303 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg; 4304 } 4305 sc->sc_mii.mii_statchg = wm_gmii_statchg; 4306 4307 wm_gmii_reset(sc); 4308 4309 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange, 4310 wm_gmii_mediastatus); 4311 4312 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 4313 MII_OFFSET_ANY, MIIF_DOPAUSE); 4314 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 4315 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 4316 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 4317 } else 4318 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 4319 } 4320 4321 /* 4322 * wm_gmii_mediastatus: [ifmedia interface function] 4323 * 4324 * Get the current interface media status on a 1000BASE-T device. 4325 */ 4326 static void 4327 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 4328 { 4329 struct wm_softc *sc = ifp->if_softc; 4330 4331 mii_pollstat(&sc->sc_mii); 4332 ifmr->ifm_status = sc->sc_mii.mii_media_status; 4333 ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) | 4334 sc->sc_flowflags; 4335 } 4336 4337 /* 4338 * wm_gmii_mediachange: [ifmedia interface function] 4339 * 4340 * Set hardware to newly-selected media on a 1000BASE-T device. 4341 */ 4342 static int 4343 wm_gmii_mediachange(struct ifnet *ifp) 4344 { 4345 struct wm_softc *sc = ifp->if_softc; 4346 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 4347 4348 if (ifp->if_flags & IFF_UP) { 4349 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); 4350 sc->sc_ctrl |= CTRL_SLU; 4351 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) 4352 || (sc->sc_type > WM_T_82543)) { 4353 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX); 4354 } else { 4355 sc->sc_ctrl &= ~CTRL_ASDE; 4356 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; 4357 if (ife->ifm_media & IFM_FDX) 4358 sc->sc_ctrl |= CTRL_FD; 4359 switch(IFM_SUBTYPE(ife->ifm_media)) { 4360 case IFM_10_T: 4361 sc->sc_ctrl |= CTRL_SPEED_10; 4362 break; 4363 case IFM_100_TX: 4364 sc->sc_ctrl |= CTRL_SPEED_100; 4365 break; 4366 case IFM_1000_T: 4367 sc->sc_ctrl |= CTRL_SPEED_1000; 4368 break; 4369 default: 4370 panic("wm_gmii_mediachange: bad media 0x%x", 4371 ife->ifm_media); 4372 } 4373 } 4374 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4375 if (sc->sc_type <= WM_T_82543) 4376 wm_gmii_reset(sc); 4377 mii_mediachg(&sc->sc_mii); 4378 } 4379 return (0); 4380 } 4381 4382 #define MDI_IO CTRL_SWDPIN(2) 4383 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */ 4384 #define MDI_CLK CTRL_SWDPIN(3) 4385 4386 static void 4387 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits) 4388 { 4389 uint32_t i, v; 4390 4391 v = CSR_READ(sc, WMREG_CTRL); 4392 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 4393 v |= MDI_DIR | CTRL_SWDPIO(3); 4394 4395 for (i = 1 << (nbits - 1); i != 0; i >>= 1) { 4396 if (data & i) 4397 v |= MDI_IO; 4398 else 4399 v &= ~MDI_IO; 4400 CSR_WRITE(sc, WMREG_CTRL, v); 4401 delay(10); 4402 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 4403 delay(10); 4404 CSR_WRITE(sc, WMREG_CTRL, v); 4405 delay(10); 4406 } 4407 } 4408 4409 static uint32_t 4410 i82543_mii_recvbits(struct wm_softc *sc) 4411 { 4412 uint32_t v, i, data = 0; 4413 4414 v = CSR_READ(sc, WMREG_CTRL); 4415 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 4416 v |= CTRL_SWDPIO(3); 4417 4418 CSR_WRITE(sc, WMREG_CTRL, v); 4419 delay(10); 4420 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 4421 delay(10); 4422 CSR_WRITE(sc, WMREG_CTRL, v); 4423 delay(10); 4424 4425 for (i = 0; i < 16; i++) { 4426 data <<= 1; 4427 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 4428 delay(10); 4429 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO) 4430 data |= 1; 4431 CSR_WRITE(sc, WMREG_CTRL, v); 4432 delay(10); 4433 } 4434 4435 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 4436 delay(10); 4437 CSR_WRITE(sc, WMREG_CTRL, v); 4438 delay(10); 4439 4440 return (data); 4441 } 4442 4443 #undef MDI_IO 4444 #undef MDI_DIR 4445 #undef MDI_CLK 4446 4447 /* 4448 * wm_gmii_i82543_readreg: [mii interface function] 4449 * 4450 * Read a PHY register on the GMII (i82543 version). 4451 */ 4452 static int 4453 wm_gmii_i82543_readreg(struct device *self, int phy, int reg) 4454 { 4455 struct wm_softc *sc = (void *) self; 4456 int rv; 4457 4458 i82543_mii_sendbits(sc, 0xffffffffU, 32); 4459 i82543_mii_sendbits(sc, reg | (phy << 5) | 4460 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14); 4461 rv = i82543_mii_recvbits(sc) & 0xffff; 4462 4463 DPRINTF(WM_DEBUG_GMII, 4464 ("%s: GMII: read phy %d reg %d -> 0x%04x\n", 4465 sc->sc_dev.dv_xname, phy, reg, rv)); 4466 4467 return (rv); 4468 } 4469 4470 /* 4471 * wm_gmii_i82543_writereg: [mii interface function] 4472 * 4473 * Write a PHY register on the GMII (i82543 version). 4474 */ 4475 static void 4476 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val) 4477 { 4478 struct wm_softc *sc = (void *) self; 4479 4480 i82543_mii_sendbits(sc, 0xffffffffU, 32); 4481 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) | 4482 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) | 4483 (MII_COMMAND_START << 30), 32); 4484 } 4485 4486 /* 4487 * wm_gmii_i82544_readreg: [mii interface function] 4488 * 4489 * Read a PHY register on the GMII. 4490 */ 4491 static int 4492 wm_gmii_i82544_readreg(struct device *self, int phy, int reg) 4493 { 4494 struct wm_softc *sc = (void *) self; 4495 uint32_t mdic = 0; 4496 int i, rv; 4497 4498 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) | 4499 MDIC_REGADD(reg)); 4500 4501 for (i = 0; i < 320; i++) { 4502 mdic = CSR_READ(sc, WMREG_MDIC); 4503 if (mdic & MDIC_READY) 4504 break; 4505 delay(10); 4506 } 4507 4508 if ((mdic & MDIC_READY) == 0) { 4509 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n", 4510 sc->sc_dev.dv_xname, phy, reg); 4511 rv = 0; 4512 } else if (mdic & MDIC_E) { 4513 #if 0 /* This is normal if no PHY is present. */ 4514 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n", 4515 sc->sc_dev.dv_xname, phy, reg); 4516 #endif 4517 rv = 0; 4518 } else { 4519 rv = MDIC_DATA(mdic); 4520 if (rv == 0xffff) 4521 rv = 0; 4522 } 4523 4524 return (rv); 4525 } 4526 4527 /* 4528 * wm_gmii_i82544_writereg: [mii interface function] 4529 * 4530 * Write a PHY register on the GMII. 4531 */ 4532 static void 4533 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val) 4534 { 4535 struct wm_softc *sc = (void *) self; 4536 uint32_t mdic = 0; 4537 int i; 4538 4539 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) | 4540 MDIC_REGADD(reg) | MDIC_DATA(val)); 4541 4542 for (i = 0; i < 320; i++) { 4543 mdic = CSR_READ(sc, WMREG_MDIC); 4544 if (mdic & MDIC_READY) 4545 break; 4546 delay(10); 4547 } 4548 4549 if ((mdic & MDIC_READY) == 0) 4550 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n", 4551 sc->sc_dev.dv_xname, phy, reg); 4552 else if (mdic & MDIC_E) 4553 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n", 4554 sc->sc_dev.dv_xname, phy, reg); 4555 } 4556 4557 /* 4558 * wm_gmii_i80003_readreg: [mii interface function] 4559 * 4560 * Read a PHY register on the kumeran 4561 * This could be handled by the PHY layer if we didn't have to lock the 4562 * ressource ... 4563 */ 4564 static int 4565 wm_gmii_i80003_readreg(struct device *self, int phy, int reg) 4566 { 4567 struct wm_softc *sc = (void *) self; 4568 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1); 4569 int rv; 4570 4571 if (phy != 1) /* only one PHY on kumeran bus */ 4572 return 0; 4573 4574 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) 4575 return 0; 4576 4577 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) { 4578 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT, 4579 reg >> GG82563_PAGE_SHIFT); 4580 } else { 4581 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT, 4582 reg >> GG82563_PAGE_SHIFT); 4583 } 4584 4585 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS); 4586 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM); 4587 return (rv); 4588 } 4589 4590 /* 4591 * wm_gmii_i80003_writereg: [mii interface function] 4592 * 4593 * Write a PHY register on the kumeran. 4594 * This could be handled by the PHY layer if we didn't have to lock the 4595 * ressource ... 4596 */ 4597 static void 4598 wm_gmii_i80003_writereg(struct device *self, int phy, int reg, int val) 4599 { 4600 struct wm_softc *sc = (void *) self; 4601 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1); 4602 4603 if (phy != 1) /* only one PHY on kumeran bus */ 4604 return; 4605 4606 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) 4607 return; 4608 4609 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) { 4610 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT, 4611 reg >> GG82563_PAGE_SHIFT); 4612 } else { 4613 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT, 4614 reg >> GG82563_PAGE_SHIFT); 4615 } 4616 4617 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val); 4618 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM); 4619 } 4620 4621 /* 4622 * wm_gmii_statchg: [mii interface function] 4623 * 4624 * Callback from MII layer when media changes. 4625 */ 4626 static void 4627 wm_gmii_statchg(struct device *self) 4628 { 4629 struct wm_softc *sc = (void *) self; 4630 struct mii_data *mii = &sc->sc_mii; 4631 4632 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE); 4633 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 4634 sc->sc_fcrtl &= ~FCRTL_XONE; 4635 4636 /* 4637 * Get flow control negotiation result. 4638 */ 4639 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 4640 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { 4641 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 4642 mii->mii_media_active &= ~IFM_ETH_FMASK; 4643 } 4644 4645 if (sc->sc_flowflags & IFM_FLOW) { 4646 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) { 4647 sc->sc_ctrl |= CTRL_TFCE; 4648 sc->sc_fcrtl |= FCRTL_XONE; 4649 } 4650 if (sc->sc_flowflags & IFM_ETH_RXPAUSE) 4651 sc->sc_ctrl |= CTRL_RFCE; 4652 } 4653 4654 if (sc->sc_mii.mii_media_active & IFM_FDX) { 4655 DPRINTF(WM_DEBUG_LINK, 4656 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname)); 4657 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 4658 } else { 4659 DPRINTF(WM_DEBUG_LINK, 4660 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname)); 4661 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 4662 } 4663 4664 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4665 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 4666 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL 4667 : WMREG_FCRTL, sc->sc_fcrtl); 4668 if (sc->sc_type >= WM_T_80003) { 4669 switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { 4670 case IFM_1000_T: 4671 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL, 4672 KUMCTRLSTA_HD_CTRL_1000_DEFAULT); 4673 sc->sc_tipg = TIPG_1000T_80003_DFLT; 4674 break; 4675 default: 4676 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL, 4677 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT); 4678 sc->sc_tipg = TIPG_10_100_80003_DFLT; 4679 break; 4680 } 4681 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); 4682 } 4683 } 4684 4685 /* 4686 * wm_kmrn_i80003_readreg: 4687 * 4688 * Read a kumeran register 4689 */ 4690 static int 4691 wm_kmrn_i80003_readreg(struct wm_softc *sc, int reg) 4692 { 4693 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1); 4694 int rv; 4695 4696 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) 4697 return 0; 4698 4699 CSR_WRITE(sc, WMREG_KUMCTRLSTA, 4700 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | 4701 KUMCTRLSTA_REN); 4702 delay(2); 4703 4704 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK; 4705 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM); 4706 return (rv); 4707 } 4708 4709 /* 4710 * wm_kmrn_i80003_writereg: 4711 * 4712 * Write a kumeran register 4713 */ 4714 static void 4715 wm_kmrn_i80003_writereg(struct wm_softc *sc, int reg, int val) 4716 { 4717 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1); 4718 4719 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) 4720 return; 4721 4722 CSR_WRITE(sc, WMREG_KUMCTRLSTA, 4723 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | 4724 (val & KUMCTRLSTA_MASK)); 4725 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM); 4726 } 4727 4728 static int 4729 wm_is_onboard_nvm_eeprom(struct wm_softc *sc) 4730 { 4731 uint32_t eecd = 0; 4732 4733 if (sc->sc_type == WM_T_82573) { 4734 eecd = CSR_READ(sc, WMREG_EECD); 4735 4736 /* Isolate bits 15 & 16 */ 4737 eecd = ((eecd >> 15) & 0x03); 4738 4739 /* If both bits are set, device is Flash type */ 4740 if (eecd == 0x03) { 4741 return 0; 4742 } 4743 } 4744 return 1; 4745 } 4746 4747 static int 4748 wm_get_swsm_semaphore(struct wm_softc *sc) 4749 { 4750 int32_t timeout; 4751 uint32_t swsm; 4752 4753 /* Get the FW semaphore. */ 4754 timeout = 1000 + 1; /* XXX */ 4755 while (timeout) { 4756 swsm = CSR_READ(sc, WMREG_SWSM); 4757 swsm |= SWSM_SWESMBI; 4758 CSR_WRITE(sc, WMREG_SWSM, swsm); 4759 /* if we managed to set the bit we got the semaphore. */ 4760 swsm = CSR_READ(sc, WMREG_SWSM); 4761 if (swsm & SWSM_SWESMBI) 4762 break; 4763 4764 delay(50); 4765 timeout--; 4766 } 4767 4768 if (timeout == 0) { 4769 aprint_error("%s: could not acquire EEPROM GNT\n", 4770 sc->sc_dev.dv_xname); 4771 /* Release semaphores */ 4772 wm_put_swsm_semaphore(sc); 4773 return 1; 4774 } 4775 return 0; 4776 } 4777 4778 static void 4779 wm_put_swsm_semaphore(struct wm_softc *sc) 4780 { 4781 uint32_t swsm; 4782 4783 swsm = CSR_READ(sc, WMREG_SWSM); 4784 swsm &= ~(SWSM_SWESMBI); 4785 CSR_WRITE(sc, WMREG_SWSM, swsm); 4786 } 4787 4788 static int 4789 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask) 4790 { 4791 uint32_t swfw_sync; 4792 uint32_t swmask = mask << SWFW_SOFT_SHIFT; 4793 uint32_t fwmask = mask << SWFW_FIRM_SHIFT; 4794 int timeout = 200; 4795 4796 for(timeout = 0; timeout < 200; timeout++) { 4797 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) { 4798 if (wm_get_swsm_semaphore(sc)) 4799 return 1; 4800 } 4801 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC); 4802 if ((swfw_sync & (swmask | fwmask)) == 0) { 4803 swfw_sync |= swmask; 4804 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync); 4805 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 4806 wm_put_swsm_semaphore(sc); 4807 return 0; 4808 } 4809 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 4810 wm_put_swsm_semaphore(sc); 4811 delay(5000); 4812 } 4813 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n", 4814 sc->sc_dev.dv_xname, mask, swfw_sync); 4815 return 1; 4816 } 4817 4818 static void 4819 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask) 4820 { 4821 uint32_t swfw_sync; 4822 4823 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) { 4824 while (wm_get_swsm_semaphore(sc) != 0) 4825 continue; 4826 } 4827 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC); 4828 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT); 4829 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync); 4830 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 4831 wm_put_swsm_semaphore(sc); 4832 } 4833 4834 static int 4835 wm_get_swfwhw_semaphore(struct wm_softc *sc) 4836 { 4837 uint32_t ext_ctrl; 4838 int timeout = 200; 4839 4840 for(timeout = 0; timeout < 200; timeout++) { 4841 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 4842 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; 4843 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 4844 4845 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 4846 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG) 4847 return 0; 4848 delay(5000); 4849 } 4850 printf("%s: failed to get swfwgw semaphore ext_ctrl 0x%x\n", 4851 sc->sc_dev.dv_xname, ext_ctrl); 4852 return 1; 4853 } 4854 4855 static void 4856 wm_put_swfwhw_semaphore(struct wm_softc *sc) 4857 { 4858 uint32_t ext_ctrl; 4859 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 4860 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 4861 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 4862 } 4863 4864 /****************************************************************************** 4865 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access 4866 * register. 4867 * 4868 * sc - Struct containing variables accessed by shared code 4869 * offset - offset of word in the EEPROM to read 4870 * data - word read from the EEPROM 4871 * words - number of words to read 4872 *****************************************************************************/ 4873 static int 4874 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data) 4875 { 4876 int32_t error = 0; 4877 uint32_t flash_bank = 0; 4878 uint32_t act_offset = 0; 4879 uint32_t bank_offset = 0; 4880 uint16_t word = 0; 4881 uint16_t i = 0; 4882 4883 /* We need to know which is the valid flash bank. In the event 4884 * that we didn't allocate eeprom_shadow_ram, we may not be 4885 * managing flash_bank. So it cannot be trusted and needs 4886 * to be updated with each read. 4887 */ 4888 /* Value of bit 22 corresponds to the flash bank we're on. */ 4889 flash_bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0; 4890 4891 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */ 4892 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2); 4893 4894 error = wm_get_swfwhw_semaphore(sc); 4895 if (error) 4896 return error; 4897 4898 for (i = 0; i < words; i++) { 4899 /* The NVM part needs a byte offset, hence * 2 */ 4900 act_offset = bank_offset + ((offset + i) * 2); 4901 error = wm_read_ich8_word(sc, act_offset, &word); 4902 if (error) 4903 break; 4904 data[i] = word; 4905 } 4906 4907 wm_put_swfwhw_semaphore(sc); 4908 return error; 4909 } 4910 4911 /****************************************************************************** 4912 * This function does initial flash setup so that a new read/write/erase cycle 4913 * can be started. 4914 * 4915 * sc - The pointer to the hw structure 4916 ****************************************************************************/ 4917 static int32_t 4918 wm_ich8_cycle_init(struct wm_softc *sc) 4919 { 4920 uint16_t hsfsts; 4921 int32_t error = 1; 4922 int32_t i = 0; 4923 4924 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 4925 4926 /* May be check the Flash Des Valid bit in Hw status */ 4927 if ((hsfsts & HSFSTS_FLDVAL) == 0) { 4928 return error; 4929 } 4930 4931 /* Clear FCERR in Hw status by writing 1 */ 4932 /* Clear DAEL in Hw status by writing a 1 */ 4933 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL; 4934 4935 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); 4936 4937 /* Either we should have a hardware SPI cycle in progress bit to check 4938 * against, in order to start a new cycle or FDONE bit should be changed 4939 * in the hardware so that it is 1 after harware reset, which can then be 4940 * used as an indication whether a cycle is in progress or has been 4941 * completed .. we should also have some software semaphore mechanism to 4942 * guard FDONE or the cycle in progress bit so that two threads access to 4943 * those bits can be sequentiallized or a way so that 2 threads dont 4944 * start the cycle at the same time */ 4945 4946 if ((hsfsts & HSFSTS_FLINPRO) == 0) { 4947 /* There is no cycle running at present, so we can start a cycle */ 4948 /* Begin by setting Flash Cycle Done. */ 4949 hsfsts |= HSFSTS_DONE; 4950 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); 4951 error = 0; 4952 } else { 4953 /* otherwise poll for sometime so the current cycle has a chance 4954 * to end before giving up. */ 4955 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) { 4956 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 4957 if ((hsfsts & HSFSTS_FLINPRO) == 0) { 4958 error = 0; 4959 break; 4960 } 4961 delay(1); 4962 } 4963 if (error == 0) { 4964 /* Successful in waiting for previous cycle to timeout, 4965 * now set the Flash Cycle Done. */ 4966 hsfsts |= HSFSTS_DONE; 4967 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); 4968 } 4969 } 4970 return error; 4971 } 4972 4973 /****************************************************************************** 4974 * This function starts a flash cycle and waits for its completion 4975 * 4976 * sc - The pointer to the hw structure 4977 ****************************************************************************/ 4978 static int32_t 4979 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout) 4980 { 4981 uint16_t hsflctl; 4982 uint16_t hsfsts; 4983 int32_t error = 1; 4984 uint32_t i = 0; 4985 4986 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ 4987 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL); 4988 hsflctl |= HSFCTL_GO; 4989 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl); 4990 4991 /* wait till FDONE bit is set to 1 */ 4992 do { 4993 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 4994 if (hsfsts & HSFSTS_DONE) 4995 break; 4996 delay(1); 4997 i++; 4998 } while (i < timeout); 4999 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) { 5000 error = 0; 5001 } 5002 return error; 5003 } 5004 5005 /****************************************************************************** 5006 * Reads a byte or word from the NVM using the ICH8 flash access registers. 5007 * 5008 * sc - The pointer to the hw structure 5009 * index - The index of the byte or word to read. 5010 * size - Size of data to read, 1=byte 2=word 5011 * data - Pointer to the word to store the value read. 5012 *****************************************************************************/ 5013 static int32_t 5014 wm_read_ich8_data(struct wm_softc *sc, uint32_t index, 5015 uint32_t size, uint16_t* data) 5016 { 5017 uint16_t hsfsts; 5018 uint16_t hsflctl; 5019 uint32_t flash_linear_address; 5020 uint32_t flash_data = 0; 5021 int32_t error = 1; 5022 int32_t count = 0; 5023 5024 if (size < 1 || size > 2 || data == 0x0 || 5025 index > ICH_FLASH_LINEAR_ADDR_MASK) 5026 return error; 5027 5028 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) + 5029 sc->sc_ich8_flash_base; 5030 5031 do { 5032 delay(1); 5033 /* Steps */ 5034 error = wm_ich8_cycle_init(sc); 5035 if (error) 5036 break; 5037 5038 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL); 5039 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 5040 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK; 5041 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT; 5042 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl); 5043 5044 /* Write the last 24 bits of index into Flash Linear address field in 5045 * Flash Address */ 5046 /* TODO: TBD maybe check the index against the size of flash */ 5047 5048 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address); 5049 5050 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT); 5051 5052 /* Check if FCERR is set to 1, if set to 1, clear it and try the whole 5053 * sequence a few more times, else read in (shift in) the Flash Data0, 5054 * the order is least significant byte first msb to lsb */ 5055 if (error == 0) { 5056 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0); 5057 if (size == 1) { 5058 *data = (uint8_t)(flash_data & 0x000000FF); 5059 } else if (size == 2) { 5060 *data = (uint16_t)(flash_data & 0x0000FFFF); 5061 } 5062 break; 5063 } else { 5064 /* If we've gotten here, then things are probably completely hosed, 5065 * but if the error condition is detected, it won't hurt to give 5066 * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times. 5067 */ 5068 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 5069 if (hsfsts & HSFSTS_ERR) { 5070 /* Repeat for some time before giving up. */ 5071 continue; 5072 } else if ((hsfsts & HSFSTS_DONE) == 0) { 5073 break; 5074 } 5075 } 5076 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 5077 5078 return error; 5079 } 5080 5081 #if 0 5082 /****************************************************************************** 5083 * Reads a single byte from the NVM using the ICH8 flash access registers. 5084 * 5085 * sc - pointer to wm_hw structure 5086 * index - The index of the byte to read. 5087 * data - Pointer to a byte to store the value read. 5088 *****************************************************************************/ 5089 static int32_t 5090 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data) 5091 { 5092 int32_t status; 5093 uint16_t word = 0; 5094 5095 status = wm_read_ich8_data(sc, index, 1, &word); 5096 if (status == 0) { 5097 *data = (uint8_t)word; 5098 } 5099 5100 return status; 5101 } 5102 #endif 5103 5104 /****************************************************************************** 5105 * Reads a word from the NVM using the ICH8 flash access registers. 5106 * 5107 * sc - pointer to wm_hw structure 5108 * index - The starting byte index of the word to read. 5109 * data - Pointer to a word to store the value read. 5110 *****************************************************************************/ 5111 static int32_t 5112 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data) 5113 { 5114 int32_t status; 5115 5116 status = wm_read_ich8_data(sc, index, 2, data); 5117 return status; 5118 } 5119