1 /* $NetBSD: if_wm.c,v 1.154 2008/02/07 01:21:58 dyoung Exp $ */ 2 3 /* 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /******************************************************************************* 39 40 Copyright (c) 2001-2005, Intel Corporation 41 All rights reserved. 42 43 Redistribution and use in source and binary forms, with or without 44 modification, are permitted provided that the following conditions are met: 45 46 1. Redistributions of source code must retain the above copyright notice, 47 this list of conditions and the following disclaimer. 48 49 2. Redistributions in binary form must reproduce the above copyright 50 notice, this list of conditions and the following disclaimer in the 51 documentation and/or other materials provided with the distribution. 52 53 3. Neither the name of the Intel Corporation nor the names of its 54 contributors may be used to endorse or promote products derived from 55 this software without specific prior written permission. 56 57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 67 POSSIBILITY OF SUCH DAMAGE. 68 69 *******************************************************************************/ 70 /* 71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips. 72 * 73 * TODO (in order of importance): 74 * 75 * - Rework how parameters are loaded from the EEPROM. 76 * - Figure out what to do with the i82545GM and i82546GB 77 * SERDES controllers. 78 * - Fix hw VLAN assist. 79 */ 80 81 #include <sys/cdefs.h> 82 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.154 2008/02/07 01:21:58 dyoung Exp $"); 83 84 #include "bpfilter.h" 85 #include "rnd.h" 86 87 #include <sys/param.h> 88 #include <sys/systm.h> 89 #include <sys/callout.h> 90 #include <sys/mbuf.h> 91 #include <sys/malloc.h> 92 #include <sys/kernel.h> 93 #include <sys/socket.h> 94 #include <sys/ioctl.h> 95 #include <sys/errno.h> 96 #include <sys/device.h> 97 #include <sys/queue.h> 98 #include <sys/syslog.h> 99 100 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */ 101 102 #if NRND > 0 103 #include <sys/rnd.h> 104 #endif 105 106 #include <net/if.h> 107 #include <net/if_dl.h> 108 #include <net/if_media.h> 109 #include <net/if_ether.h> 110 111 #if NBPFILTER > 0 112 #include <net/bpf.h> 113 #endif 114 115 #include <netinet/in.h> /* XXX for struct ip */ 116 #include <netinet/in_systm.h> /* XXX for struct ip */ 117 #include <netinet/ip.h> /* XXX for struct ip */ 118 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */ 119 #include <netinet/tcp.h> /* XXX for struct tcphdr */ 120 121 #include <sys/bus.h> 122 #include <sys/intr.h> 123 #include <machine/endian.h> 124 125 #include <dev/mii/mii.h> 126 #include <dev/mii/miivar.h> 127 #include <dev/mii/mii_bitbang.h> 128 #include <dev/mii/ikphyreg.h> 129 130 #include <dev/pci/pcireg.h> 131 #include <dev/pci/pcivar.h> 132 #include <dev/pci/pcidevs.h> 133 134 #include <dev/pci/if_wmreg.h> 135 136 #ifdef WM_DEBUG 137 #define WM_DEBUG_LINK 0x01 138 #define WM_DEBUG_TX 0x02 139 #define WM_DEBUG_RX 0x04 140 #define WM_DEBUG_GMII 0x08 141 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII; 142 143 #define DPRINTF(x, y) if (wm_debug & (x)) printf y 144 #else 145 #define DPRINTF(x, y) /* nothing */ 146 #endif /* WM_DEBUG */ 147 148 /* 149 * Transmit descriptor list size. Due to errata, we can only have 150 * 256 hardware descriptors in the ring on < 82544, but we use 4096 151 * on >= 82544. We tell the upper layers that they can queue a lot 152 * of packets, and we go ahead and manage up to 64 (16 for the i82547) 153 * of them at a time. 154 * 155 * We allow up to 256 (!) DMA segments per packet. Pathological packet 156 * chains containing many small mbufs have been observed in zero-copy 157 * situations with jumbo frames. 158 */ 159 #define WM_NTXSEGS 256 160 #define WM_IFQUEUELEN 256 161 #define WM_TXQUEUELEN_MAX 64 162 #define WM_TXQUEUELEN_MAX_82547 16 163 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum) 164 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1) 165 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8) 166 #define WM_NTXDESC_82542 256 167 #define WM_NTXDESC_82544 4096 168 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc) 169 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1) 170 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t)) 171 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc)) 172 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc)) 173 174 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */ 175 176 /* 177 * Receive descriptor list size. We have one Rx buffer for normal 178 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized 179 * packet. We allocate 256 receive descriptors, each with a 2k 180 * buffer (MCLBYTES), which gives us room for 50 jumbo packets. 181 */ 182 #define WM_NRXDESC 256 183 #define WM_NRXDESC_MASK (WM_NRXDESC - 1) 184 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) 185 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) 186 187 /* 188 * Control structures are DMA'd to the i82542 chip. We allocate them in 189 * a single clump that maps to a single DMA segment to make several things 190 * easier. 191 */ 192 struct wm_control_data_82544 { 193 /* 194 * The receive descriptors. 195 */ 196 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; 197 198 /* 199 * The transmit descriptors. Put these at the end, because 200 * we might use a smaller number of them. 201 */ 202 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544]; 203 }; 204 205 struct wm_control_data_82542 { 206 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC]; 207 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542]; 208 }; 209 210 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x) 211 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)]) 212 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)]) 213 214 /* 215 * Software state for transmit jobs. 216 */ 217 struct wm_txsoft { 218 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 219 bus_dmamap_t txs_dmamap; /* our DMA map */ 220 int txs_firstdesc; /* first descriptor in packet */ 221 int txs_lastdesc; /* last descriptor in packet */ 222 int txs_ndesc; /* # of descriptors used */ 223 }; 224 225 /* 226 * Software state for receive buffers. Each descriptor gets a 227 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill 228 * more than one buffer, we chain them together. 229 */ 230 struct wm_rxsoft { 231 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 232 bus_dmamap_t rxs_dmamap; /* our DMA map */ 233 }; 234 235 typedef enum { 236 WM_T_unknown = 0, 237 WM_T_82542_2_0, /* i82542 2.0 (really old) */ 238 WM_T_82542_2_1, /* i82542 2.1+ (old) */ 239 WM_T_82543, /* i82543 */ 240 WM_T_82544, /* i82544 */ 241 WM_T_82540, /* i82540 */ 242 WM_T_82545, /* i82545 */ 243 WM_T_82545_3, /* i82545 3.0+ */ 244 WM_T_82546, /* i82546 */ 245 WM_T_82546_3, /* i82546 3.0+ */ 246 WM_T_82541, /* i82541 */ 247 WM_T_82541_2, /* i82541 2.0+ */ 248 WM_T_82547, /* i82547 */ 249 WM_T_82547_2, /* i82547 2.0+ */ 250 WM_T_82571, /* i82571 */ 251 WM_T_82572, /* i82572 */ 252 WM_T_82573, /* i82573 */ 253 WM_T_80003, /* i80003 */ 254 WM_T_ICH8, /* ICH8 LAN */ 255 WM_T_ICH9, /* ICH9 LAN */ 256 } wm_chip_type; 257 258 /* 259 * Software state per device. 260 */ 261 struct wm_softc { 262 struct device sc_dev; /* generic device information */ 263 bus_space_tag_t sc_st; /* bus space tag */ 264 bus_space_handle_t sc_sh; /* bus space handle */ 265 bus_space_tag_t sc_iot; /* I/O space tag */ 266 bus_space_handle_t sc_ioh; /* I/O space handle */ 267 bus_space_tag_t sc_flasht; /* flash registers space tag */ 268 bus_space_handle_t sc_flashh; /* flash registers space handle */ 269 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 270 struct ethercom sc_ethercom; /* ethernet common data */ 271 pci_chipset_tag_t sc_pc; 272 pcitag_t sc_pcitag; 273 274 wm_chip_type sc_type; /* chip type */ 275 int sc_flags; /* flags; see below */ 276 int sc_bus_speed; /* PCI/PCIX bus speed */ 277 int sc_pcix_offset; /* PCIX capability register offset */ 278 int sc_flowflags; /* 802.3x flow control flags */ 279 280 void *sc_ih; /* interrupt cookie */ 281 282 int sc_ee_addrbits; /* EEPROM address bits */ 283 284 struct mii_data sc_mii; /* MII/media information */ 285 286 callout_t sc_tick_ch; /* tick callout */ 287 288 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 289 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 290 291 int sc_align_tweak; 292 293 /* 294 * Software state for the transmit and receive descriptors. 295 */ 296 int sc_txnum; /* must be a power of two */ 297 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX]; 298 struct wm_rxsoft sc_rxsoft[WM_NRXDESC]; 299 300 /* 301 * Control data structures. 302 */ 303 int sc_ntxdesc; /* must be a power of two */ 304 struct wm_control_data_82544 *sc_control_data; 305 #define sc_txdescs sc_control_data->wcd_txdescs 306 #define sc_rxdescs sc_control_data->wcd_rxdescs 307 308 #ifdef WM_EVENT_COUNTERS 309 /* Event counters. */ 310 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */ 311 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */ 312 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */ 313 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */ 314 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */ 315 struct evcnt sc_ev_rxintr; /* Rx interrupts */ 316 struct evcnt sc_ev_linkintr; /* Link interrupts */ 317 318 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */ 319 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */ 320 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */ 321 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */ 322 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */ 323 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */ 324 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */ 325 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */ 326 327 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ 328 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */ 329 330 struct evcnt sc_ev_tu; /* Tx underrun */ 331 332 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ 333 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ 334 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ 335 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ 336 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ 337 #endif /* WM_EVENT_COUNTERS */ 338 339 bus_addr_t sc_tdt_reg; /* offset of TDT register */ 340 341 int sc_txfree; /* number of free Tx descriptors */ 342 int sc_txnext; /* next ready Tx descriptor */ 343 344 int sc_txsfree; /* number of free Tx jobs */ 345 int sc_txsnext; /* next free Tx job */ 346 int sc_txsdirty; /* dirty Tx jobs */ 347 348 /* These 5 variables are used only on the 82547. */ 349 int sc_txfifo_size; /* Tx FIFO size */ 350 int sc_txfifo_head; /* current head of FIFO */ 351 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */ 352 int sc_txfifo_stall; /* Tx FIFO is stalled */ 353 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */ 354 355 bus_addr_t sc_rdt_reg; /* offset of RDT register */ 356 357 int sc_rxptr; /* next ready Rx descriptor/queue ent */ 358 int sc_rxdiscard; 359 int sc_rxlen; 360 struct mbuf *sc_rxhead; 361 struct mbuf *sc_rxtail; 362 struct mbuf **sc_rxtailp; 363 364 uint32_t sc_ctrl; /* prototype CTRL register */ 365 #if 0 366 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ 367 #endif 368 uint32_t sc_icr; /* prototype interrupt bits */ 369 uint32_t sc_itr; /* prototype intr throttling reg */ 370 uint32_t sc_tctl; /* prototype TCTL register */ 371 uint32_t sc_rctl; /* prototype RCTL register */ 372 uint32_t sc_txcw; /* prototype TXCW register */ 373 uint32_t sc_tipg; /* prototype TIPG register */ 374 uint32_t sc_fcrtl; /* prototype FCRTL register */ 375 uint32_t sc_pba; /* prototype PBA register */ 376 377 int sc_tbi_linkup; /* TBI link status */ 378 int sc_tbi_anstate; /* autonegotiation state */ 379 380 int sc_mchash_type; /* multicast filter offset */ 381 382 #if NRND > 0 383 rndsource_element_t rnd_source; /* random source */ 384 #endif 385 int sc_ich8_flash_base; 386 int sc_ich8_flash_bank_size; 387 }; 388 389 #define WM_RXCHAIN_RESET(sc) \ 390 do { \ 391 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \ 392 *(sc)->sc_rxtailp = NULL; \ 393 (sc)->sc_rxlen = 0; \ 394 } while (/*CONSTCOND*/0) 395 396 #define WM_RXCHAIN_LINK(sc, m) \ 397 do { \ 398 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \ 399 (sc)->sc_rxtailp = &(m)->m_next; \ 400 } while (/*CONSTCOND*/0) 401 402 /* sc_flags */ 403 #define WM_F_HAS_MII 0x0001 /* has MII */ 404 #define WM_F_EEPROM_HANDSHAKE 0x0002 /* requires EEPROM handshake */ 405 #define WM_F_EEPROM_SEMAPHORE 0x0004 /* EEPROM with semaphore */ 406 #define WM_F_EEPROM_EERDEEWR 0x0008 /* EEPROM access via EERD/EEWR */ 407 #define WM_F_EEPROM_SPI 0x0010 /* EEPROM is SPI */ 408 #define WM_F_EEPROM_FLASH 0x0020 /* EEPROM is FLASH */ 409 #define WM_F_EEPROM_INVALID 0x0040 /* EEPROM not present (bad checksum) */ 410 #define WM_F_IOH_VALID 0x0080 /* I/O handle is valid */ 411 #define WM_F_BUS64 0x0100 /* bus is 64-bit */ 412 #define WM_F_PCIX 0x0200 /* bus is PCI-X */ 413 #define WM_F_CSA 0x0400 /* bus is CSA */ 414 #define WM_F_PCIE 0x0800 /* bus is PCI-Express */ 415 #define WM_F_SWFW_SYNC 0x1000 /* Software-Firmware synchronisation */ 416 #define WM_F_SWFWHW_SYNC 0x2000 /* Software-Firmware synchronisation */ 417 418 #ifdef WM_EVENT_COUNTERS 419 #define WM_EVCNT_INCR(ev) (ev)->ev_count++ 420 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val) 421 #else 422 #define WM_EVCNT_INCR(ev) /* nothing */ 423 #define WM_EVCNT_ADD(ev, val) /* nothing */ 424 #endif 425 426 #define CSR_READ(sc, reg) \ 427 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) 428 #define CSR_WRITE(sc, reg, val) \ 429 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) 430 #define CSR_WRITE_FLUSH(sc) \ 431 (void) CSR_READ((sc), WMREG_STATUS) 432 433 #define ICH8_FLASH_READ32(sc, reg) \ 434 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg)) 435 #define ICH8_FLASH_WRITE32(sc, reg, data) \ 436 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) 437 438 #define ICH8_FLASH_READ16(sc, reg) \ 439 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg)) 440 #define ICH8_FLASH_WRITE16(sc, reg, data) \ 441 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data)) 442 443 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x))) 444 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x))) 445 446 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU) 447 #define WM_CDTXADDR_HI(sc, x) \ 448 (sizeof(bus_addr_t) == 8 ? \ 449 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0) 450 451 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU) 452 #define WM_CDRXADDR_HI(sc, x) \ 453 (sizeof(bus_addr_t) == 8 ? \ 454 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0) 455 456 #define WM_CDTXSYNC(sc, x, n, ops) \ 457 do { \ 458 int __x, __n; \ 459 \ 460 __x = (x); \ 461 __n = (n); \ 462 \ 463 /* If it will wrap around, sync to the end of the ring. */ \ 464 if ((__x + __n) > WM_NTXDESC(sc)) { \ 465 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 466 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \ 467 (WM_NTXDESC(sc) - __x), (ops)); \ 468 __n -= (WM_NTXDESC(sc) - __x); \ 469 __x = 0; \ 470 } \ 471 \ 472 /* Now sync whatever is left. */ \ 473 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 474 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \ 475 } while (/*CONSTCOND*/0) 476 477 #define WM_CDRXSYNC(sc, x, ops) \ 478 do { \ 479 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 480 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \ 481 } while (/*CONSTCOND*/0) 482 483 #define WM_INIT_RXDESC(sc, x) \ 484 do { \ 485 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 486 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \ 487 struct mbuf *__m = __rxs->rxs_mbuf; \ 488 \ 489 /* \ 490 * Note: We scoot the packet forward 2 bytes in the buffer \ 491 * so that the payload after the Ethernet header is aligned \ 492 * to a 4-byte boundary. \ 493 * \ 494 * XXX BRAINDAMAGE ALERT! \ 495 * The stupid chip uses the same size for every buffer, which \ 496 * is set in the Receive Control register. We are using the 2K \ 497 * size option, but what we REALLY want is (2K - 2)! For this \ 498 * reason, we can't "scoot" packets longer than the standard \ 499 * Ethernet MTU. On strict-alignment platforms, if the total \ 500 * size exceeds (2K - 2) we set align_tweak to 0 and let \ 501 * the upper layer copy the headers. \ 502 */ \ 503 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \ 504 \ 505 wm_set_dma_addr(&__rxd->wrx_addr, \ 506 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \ 507 __rxd->wrx_len = 0; \ 508 __rxd->wrx_cksum = 0; \ 509 __rxd->wrx_status = 0; \ 510 __rxd->wrx_errors = 0; \ 511 __rxd->wrx_special = 0; \ 512 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 513 \ 514 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \ 515 } while (/*CONSTCOND*/0) 516 517 static void wm_start(struct ifnet *); 518 static void wm_watchdog(struct ifnet *); 519 static int wm_ioctl(struct ifnet *, u_long, void *); 520 static int wm_init(struct ifnet *); 521 static void wm_stop(struct ifnet *, int); 522 523 static void wm_reset(struct wm_softc *); 524 static void wm_rxdrain(struct wm_softc *); 525 static int wm_add_rxbuf(struct wm_softc *, int); 526 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *); 527 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *); 528 static int wm_validate_eeprom_checksum(struct wm_softc *); 529 static void wm_tick(void *); 530 531 static void wm_set_filter(struct wm_softc *); 532 533 static int wm_intr(void *); 534 static void wm_txintr(struct wm_softc *); 535 static void wm_rxintr(struct wm_softc *); 536 static void wm_linkintr(struct wm_softc *, uint32_t); 537 538 static void wm_tbi_mediainit(struct wm_softc *); 539 static int wm_tbi_mediachange(struct ifnet *); 540 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); 541 542 static void wm_tbi_set_linkled(struct wm_softc *); 543 static void wm_tbi_check_link(struct wm_softc *); 544 545 static void wm_gmii_reset(struct wm_softc *); 546 547 static int wm_gmii_i82543_readreg(struct device *, int, int); 548 static void wm_gmii_i82543_writereg(struct device *, int, int, int); 549 550 static int wm_gmii_i82544_readreg(struct device *, int, int); 551 static void wm_gmii_i82544_writereg(struct device *, int, int, int); 552 553 static int wm_gmii_i80003_readreg(struct device *, int, int); 554 static void wm_gmii_i80003_writereg(struct device *, int, int, int); 555 556 static void wm_gmii_statchg(struct device *); 557 558 static void wm_gmii_mediainit(struct wm_softc *); 559 static int wm_gmii_mediachange(struct ifnet *); 560 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); 561 562 static int wm_kmrn_i80003_readreg(struct wm_softc *, int); 563 static void wm_kmrn_i80003_writereg(struct wm_softc *, int, int); 564 565 static int wm_match(struct device *, struct cfdata *, void *); 566 static void wm_attach(struct device *, struct device *, void *); 567 static int wm_is_onboard_nvm_eeprom(struct wm_softc *); 568 static void wm_get_auto_rd_done(struct wm_softc *); 569 static int wm_get_swsm_semaphore(struct wm_softc *); 570 static void wm_put_swsm_semaphore(struct wm_softc *); 571 static int wm_poll_eerd_eewr_done(struct wm_softc *, int); 572 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t); 573 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t); 574 static int wm_get_swfwhw_semaphore(struct wm_softc *); 575 static void wm_put_swfwhw_semaphore(struct wm_softc *); 576 577 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *); 578 static int32_t wm_ich8_cycle_init(struct wm_softc *); 579 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t); 580 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, 581 uint32_t, uint16_t *); 582 static int32_t wm_read_ich8_word(struct wm_softc *sc, uint32_t, uint16_t *); 583 584 CFATTACH_DECL(wm, sizeof(struct wm_softc), 585 wm_match, wm_attach, NULL, NULL); 586 587 static void wm_82547_txfifo_stall(void *); 588 589 /* 590 * Devices supported by this driver. 591 */ 592 static const struct wm_product { 593 pci_vendor_id_t wmp_vendor; 594 pci_product_id_t wmp_product; 595 const char *wmp_name; 596 wm_chip_type wmp_type; 597 int wmp_flags; 598 #define WMP_F_1000X 0x01 599 #define WMP_F_1000T 0x02 600 } wm_products[] = { 601 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, 602 "Intel i82542 1000BASE-X Ethernet", 603 WM_T_82542_2_1, WMP_F_1000X }, 604 605 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, 606 "Intel i82543GC 1000BASE-X Ethernet", 607 WM_T_82543, WMP_F_1000X }, 608 609 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, 610 "Intel i82543GC 1000BASE-T Ethernet", 611 WM_T_82543, WMP_F_1000T }, 612 613 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, 614 "Intel i82544EI 1000BASE-T Ethernet", 615 WM_T_82544, WMP_F_1000T }, 616 617 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, 618 "Intel i82544EI 1000BASE-X Ethernet", 619 WM_T_82544, WMP_F_1000X }, 620 621 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, 622 "Intel i82544GC 1000BASE-T Ethernet", 623 WM_T_82544, WMP_F_1000T }, 624 625 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, 626 "Intel i82544GC (LOM) 1000BASE-T Ethernet", 627 WM_T_82544, WMP_F_1000T }, 628 629 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, 630 "Intel i82540EM 1000BASE-T Ethernet", 631 WM_T_82540, WMP_F_1000T }, 632 633 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM, 634 "Intel i82540EM (LOM) 1000BASE-T Ethernet", 635 WM_T_82540, WMP_F_1000T }, 636 637 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM, 638 "Intel i82540EP 1000BASE-T Ethernet", 639 WM_T_82540, WMP_F_1000T }, 640 641 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP, 642 "Intel i82540EP 1000BASE-T Ethernet", 643 WM_T_82540, WMP_F_1000T }, 644 645 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP, 646 "Intel i82540EP 1000BASE-T Ethernet", 647 WM_T_82540, WMP_F_1000T }, 648 649 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER, 650 "Intel i82545EM 1000BASE-T Ethernet", 651 WM_T_82545, WMP_F_1000T }, 652 653 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER, 654 "Intel i82545GM 1000BASE-T Ethernet", 655 WM_T_82545_3, WMP_F_1000T }, 656 657 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER, 658 "Intel i82545GM 1000BASE-X Ethernet", 659 WM_T_82545_3, WMP_F_1000X }, 660 #if 0 661 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES, 662 "Intel i82545GM Gigabit Ethernet (SERDES)", 663 WM_T_82545_3, WMP_F_SERDES }, 664 #endif 665 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER, 666 "Intel i82546EB 1000BASE-T Ethernet", 667 WM_T_82546, WMP_F_1000T }, 668 669 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD, 670 "Intel i82546EB 1000BASE-T Ethernet", 671 WM_T_82546, WMP_F_1000T }, 672 673 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER, 674 "Intel i82545EM 1000BASE-X Ethernet", 675 WM_T_82545, WMP_F_1000X }, 676 677 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER, 678 "Intel i82546EB 1000BASE-X Ethernet", 679 WM_T_82546, WMP_F_1000X }, 680 681 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER, 682 "Intel i82546GB 1000BASE-T Ethernet", 683 WM_T_82546_3, WMP_F_1000T }, 684 685 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER, 686 "Intel i82546GB 1000BASE-X Ethernet", 687 WM_T_82546_3, WMP_F_1000X }, 688 #if 0 689 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES, 690 "Intel i82546GB Gigabit Ethernet (SERDES)", 691 WM_T_82546_3, WMP_F_SERDES }, 692 #endif 693 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER, 694 "i82546GB quad-port Gigabit Ethernet", 695 WM_T_82546_3, WMP_F_1000T }, 696 697 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3, 698 "i82546GB quad-port Gigabit Ethernet (KSP3)", 699 WM_T_82546_3, WMP_F_1000T }, 700 701 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE, 702 "Intel PRO/1000MT (82546GB)", 703 WM_T_82546_3, WMP_F_1000T }, 704 705 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI, 706 "Intel i82541EI 1000BASE-T Ethernet", 707 WM_T_82541, WMP_F_1000T }, 708 709 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM, 710 "Intel i82541ER (LOM) 1000BASE-T Ethernet", 711 WM_T_82541, WMP_F_1000T }, 712 713 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE, 714 "Intel i82541EI Mobile 1000BASE-T Ethernet", 715 WM_T_82541, WMP_F_1000T }, 716 717 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER, 718 "Intel i82541ER 1000BASE-T Ethernet", 719 WM_T_82541_2, WMP_F_1000T }, 720 721 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI, 722 "Intel i82541GI 1000BASE-T Ethernet", 723 WM_T_82541_2, WMP_F_1000T }, 724 725 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE, 726 "Intel i82541GI Mobile 1000BASE-T Ethernet", 727 WM_T_82541_2, WMP_F_1000T }, 728 729 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI, 730 "Intel i82541PI 1000BASE-T Ethernet", 731 WM_T_82541_2, WMP_F_1000T }, 732 733 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI, 734 "Intel i82547EI 1000BASE-T Ethernet", 735 WM_T_82547, WMP_F_1000T }, 736 737 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE, 738 "Intel i82547EI Mobile 1000BASE-T Ethernet", 739 WM_T_82547, WMP_F_1000T }, 740 741 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI, 742 "Intel i82547GI 1000BASE-T Ethernet", 743 WM_T_82547_2, WMP_F_1000T }, 744 745 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER, 746 "Intel PRO/1000 PT (82571EB)", 747 WM_T_82571, WMP_F_1000T }, 748 749 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER, 750 "Intel PRO/1000 PF (82571EB)", 751 WM_T_82571, WMP_F_1000X }, 752 #if 0 753 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES, 754 "Intel PRO/1000 PB (82571EB)", 755 WM_T_82571, WMP_F_SERDES }, 756 #endif 757 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER, 758 "Intel PRO/1000 QT (82571EB)", 759 WM_T_82571, WMP_F_1000T }, 760 761 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER, 762 "Intel i82572EI 1000baseT Ethernet", 763 WM_T_82572, WMP_F_1000T }, 764 765 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER, 766 "Intel� PRO/1000 PT Quad Port Server Adapter", 767 WM_T_82571, WMP_F_1000T, }, 768 769 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER, 770 "Intel i82572EI 1000baseX Ethernet", 771 WM_T_82572, WMP_F_1000X }, 772 #if 0 773 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES, 774 "Intel i82572EI Gigabit Ethernet (SERDES)", 775 WM_T_82572, WMP_F_SERDES }, 776 #endif 777 778 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI, 779 "Intel i82572EI 1000baseT Ethernet", 780 WM_T_82572, WMP_F_1000T }, 781 782 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E, 783 "Intel i82573E", 784 WM_T_82573, WMP_F_1000T }, 785 786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT, 787 "Intel i82573E IAMT", 788 WM_T_82573, WMP_F_1000T }, 789 790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L, 791 "Intel i82573L Gigabit Ethernet", 792 WM_T_82573, WMP_F_1000T }, 793 794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT, 795 "i80003 dual 1000baseT Ethernet", 796 WM_T_80003, WMP_F_1000T }, 797 798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT, 799 "i80003 dual 1000baseX Ethernet", 800 WM_T_80003, WMP_F_1000T }, 801 #if 0 802 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT, 803 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)", 804 WM_T_80003, WMP_F_SERDES }, 805 #endif 806 807 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT, 808 "Intel i80003 1000baseT Ethernet", 809 WM_T_80003, WMP_F_1000T }, 810 #if 0 811 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT, 812 "Intel i80003 Gigabit Ethernet (SERDES)", 813 WM_T_80003, WMP_F_SERDES }, 814 #endif 815 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT, 816 "Intel i82801H (M_AMT) LAN Controller", 817 WM_T_ICH8, WMP_F_1000T }, 818 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT, 819 "Intel i82801H (AMT) LAN Controller", 820 WM_T_ICH8, WMP_F_1000T }, 821 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN, 822 "Intel i82801H LAN Controller", 823 WM_T_ICH8, WMP_F_1000T }, 824 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN, 825 "Intel i82801H (IFE) LAN Controller", 826 WM_T_ICH8, WMP_F_1000T }, 827 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN, 828 "Intel i82801H (M) LAN Controller", 829 WM_T_ICH8, WMP_F_1000T }, 830 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT, 831 "Intel i82801H IFE (GT) LAN Controller", 832 WM_T_ICH8, WMP_F_1000T }, 833 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G, 834 "Intel i82801H IFE (G) LAN Controller", 835 WM_T_ICH8, WMP_F_1000T }, 836 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT, 837 "82801I (AMT) LAN Controller", 838 WM_T_ICH9, WMP_F_1000T }, 839 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE, 840 "82801I LAN Controller", 841 WM_T_ICH9, WMP_F_1000T }, 842 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G, 843 "82801I (G) LAN Controller", 844 WM_T_ICH9, WMP_F_1000T }, 845 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT, 846 "82801I (GT) LAN Controller", 847 WM_T_ICH9, WMP_F_1000T }, 848 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C, 849 "82801I (C) LAN Controller", 850 WM_T_ICH9, WMP_F_1000T }, 851 { 0, 0, 852 NULL, 853 0, 0 }, 854 }; 855 856 #ifdef WM_EVENT_COUNTERS 857 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")]; 858 #endif /* WM_EVENT_COUNTERS */ 859 860 #if 0 /* Not currently used */ 861 static inline uint32_t 862 wm_io_read(struct wm_softc *sc, int reg) 863 { 864 865 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 866 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4)); 867 } 868 #endif 869 870 static inline void 871 wm_io_write(struct wm_softc *sc, int reg, uint32_t val) 872 { 873 874 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 875 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val); 876 } 877 878 static inline void 879 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v) 880 { 881 wa->wa_low = htole32(v & 0xffffffffU); 882 if (sizeof(bus_addr_t) == 8) 883 wa->wa_high = htole32((uint64_t) v >> 32); 884 else 885 wa->wa_high = 0; 886 } 887 888 static const struct wm_product * 889 wm_lookup(const struct pci_attach_args *pa) 890 { 891 const struct wm_product *wmp; 892 893 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) { 894 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor && 895 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product) 896 return (wmp); 897 } 898 return (NULL); 899 } 900 901 static int 902 wm_match(struct device *parent, struct cfdata *cf, void *aux) 903 { 904 struct pci_attach_args *pa = aux; 905 906 if (wm_lookup(pa) != NULL) 907 return (1); 908 909 return (0); 910 } 911 912 static void 913 wm_attach(struct device *parent, struct device *self, void *aux) 914 { 915 struct wm_softc *sc = (void *) self; 916 struct pci_attach_args *pa = aux; 917 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 918 pci_chipset_tag_t pc = pa->pa_pc; 919 pci_intr_handle_t ih; 920 size_t cdata_size; 921 const char *intrstr = NULL; 922 const char *eetype; 923 bus_space_tag_t memt; 924 bus_space_handle_t memh; 925 bus_dma_segment_t seg; 926 int memh_valid; 927 int i, rseg, error; 928 const struct wm_product *wmp; 929 prop_data_t ea; 930 prop_number_t pn; 931 uint8_t enaddr[ETHER_ADDR_LEN]; 932 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin; 933 pcireg_t preg, memtype; 934 uint32_t reg; 935 936 callout_init(&sc->sc_tick_ch, 0); 937 938 wmp = wm_lookup(pa); 939 if (wmp == NULL) { 940 printf("\n"); 941 panic("wm_attach: impossible"); 942 } 943 944 sc->sc_pc = pa->pa_pc; 945 sc->sc_pcitag = pa->pa_tag; 946 947 if (pci_dma64_available(pa)) 948 sc->sc_dmat = pa->pa_dmat64; 949 else 950 sc->sc_dmat = pa->pa_dmat; 951 952 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG)); 953 aprint_naive(": Ethernet controller\n"); 954 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg); 955 956 sc->sc_type = wmp->wmp_type; 957 if (sc->sc_type < WM_T_82543) { 958 if (preg < 2) { 959 aprint_error("%s: i82542 must be at least rev. 2\n", 960 sc->sc_dev.dv_xname); 961 return; 962 } 963 if (preg < 3) 964 sc->sc_type = WM_T_82542_2_0; 965 } 966 967 /* 968 * Map the device. All devices support memory-mapped acccess, 969 * and it is really required for normal operation. 970 */ 971 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA); 972 switch (memtype) { 973 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 974 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 975 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA, 976 memtype, 0, &memt, &memh, NULL, NULL) == 0); 977 break; 978 default: 979 memh_valid = 0; 980 } 981 982 if (memh_valid) { 983 sc->sc_st = memt; 984 sc->sc_sh = memh; 985 } else { 986 aprint_error("%s: unable to map device registers\n", 987 sc->sc_dev.dv_xname); 988 return; 989 } 990 991 /* 992 * In addition, i82544 and later support I/O mapped indirect 993 * register access. It is not desirable (nor supported in 994 * this driver) to use it for normal operation, though it is 995 * required to work around bugs in some chip versions. 996 */ 997 if (sc->sc_type >= WM_T_82544) { 998 /* First we have to find the I/O BAR. */ 999 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) { 1000 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) == 1001 PCI_MAPREG_TYPE_IO) 1002 break; 1003 } 1004 if (i == PCI_MAPREG_END) 1005 aprint_error("%s: WARNING: unable to find I/O BAR\n", 1006 sc->sc_dev.dv_xname); 1007 else { 1008 /* 1009 * The i8254x doesn't apparently respond when the 1010 * I/O BAR is 0, which looks somewhat like it's not 1011 * been configured. 1012 */ 1013 preg = pci_conf_read(pc, pa->pa_tag, i); 1014 if (PCI_MAPREG_MEM_ADDR(preg) == 0) { 1015 aprint_error("%s: WARNING: I/O BAR at zero.\n", 1016 sc->sc_dev.dv_xname); 1017 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO, 1018 0, &sc->sc_iot, &sc->sc_ioh, 1019 NULL, NULL) == 0) { 1020 sc->sc_flags |= WM_F_IOH_VALID; 1021 } else { 1022 aprint_error("%s: WARNING: unable to map " 1023 "I/O space\n", sc->sc_dev.dv_xname); 1024 } 1025 } 1026 1027 } 1028 1029 /* Enable bus mastering. Disable MWI on the i82542 2.0. */ 1030 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 1031 preg |= PCI_COMMAND_MASTER_ENABLE; 1032 if (sc->sc_type < WM_T_82542_2_1) 1033 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE; 1034 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg); 1035 1036 /* power up chip */ 1037 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, sc, 1038 NULL)) && error != EOPNOTSUPP) { 1039 aprint_error("%s: cannot activate %d\n", sc->sc_dev.dv_xname, 1040 error); 1041 return; 1042 } 1043 1044 /* 1045 * Map and establish our interrupt. 1046 */ 1047 if (pci_intr_map(pa, &ih)) { 1048 aprint_error("%s: unable to map interrupt\n", 1049 sc->sc_dev.dv_xname); 1050 return; 1051 } 1052 intrstr = pci_intr_string(pc, ih); 1053 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc); 1054 if (sc->sc_ih == NULL) { 1055 aprint_error("%s: unable to establish interrupt", 1056 sc->sc_dev.dv_xname); 1057 if (intrstr != NULL) 1058 aprint_normal(" at %s", intrstr); 1059 aprint_normal("\n"); 1060 return; 1061 } 1062 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr); 1063 1064 /* 1065 * Determine a few things about the bus we're connected to. 1066 */ 1067 if (sc->sc_type < WM_T_82543) { 1068 /* We don't really know the bus characteristics here. */ 1069 sc->sc_bus_speed = 33; 1070 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) { 1071 /* 1072 * CSA (Communication Streaming Architecture) is about as fast 1073 * a 32-bit 66MHz PCI Bus. 1074 */ 1075 sc->sc_flags |= WM_F_CSA; 1076 sc->sc_bus_speed = 66; 1077 aprint_verbose("%s: Communication Streaming Architecture\n", 1078 sc->sc_dev.dv_xname); 1079 if (sc->sc_type == WM_T_82547) { 1080 callout_init(&sc->sc_txfifo_ch, 0); 1081 callout_setfunc(&sc->sc_txfifo_ch, 1082 wm_82547_txfifo_stall, sc); 1083 aprint_verbose("%s: using 82547 Tx FIFO stall " 1084 "work-around\n", sc->sc_dev.dv_xname); 1085 } 1086 } else if (sc->sc_type >= WM_T_82571) { 1087 sc->sc_flags |= WM_F_PCIE; 1088 if ((sc->sc_type != WM_T_ICH8) || (sc->sc_type != WM_T_ICH9)) 1089 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE; 1090 aprint_verbose("%s: PCI-Express bus\n", sc->sc_dev.dv_xname); 1091 } else { 1092 reg = CSR_READ(sc, WMREG_STATUS); 1093 if (reg & STATUS_BUS64) 1094 sc->sc_flags |= WM_F_BUS64; 1095 if (sc->sc_type >= WM_T_82544 && 1096 (reg & STATUS_PCIX_MODE) != 0) { 1097 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb; 1098 1099 sc->sc_flags |= WM_F_PCIX; 1100 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 1101 PCI_CAP_PCIX, 1102 &sc->sc_pcix_offset, NULL) == 0) 1103 aprint_error("%s: unable to find PCIX " 1104 "capability\n", sc->sc_dev.dv_xname); 1105 else if (sc->sc_type != WM_T_82545_3 && 1106 sc->sc_type != WM_T_82546_3) { 1107 /* 1108 * Work around a problem caused by the BIOS 1109 * setting the max memory read byte count 1110 * incorrectly. 1111 */ 1112 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, 1113 sc->sc_pcix_offset + PCI_PCIX_CMD); 1114 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag, 1115 sc->sc_pcix_offset + PCI_PCIX_STATUS); 1116 1117 bytecnt = 1118 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >> 1119 PCI_PCIX_CMD_BYTECNT_SHIFT; 1120 maxb = 1121 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >> 1122 PCI_PCIX_STATUS_MAXB_SHIFT; 1123 if (bytecnt > maxb) { 1124 aprint_verbose("%s: resetting PCI-X " 1125 "MMRBC: %d -> %d\n", 1126 sc->sc_dev.dv_xname, 1127 512 << bytecnt, 512 << maxb); 1128 pcix_cmd = (pcix_cmd & 1129 ~PCI_PCIX_CMD_BYTECNT_MASK) | 1130 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT); 1131 pci_conf_write(pa->pa_pc, pa->pa_tag, 1132 sc->sc_pcix_offset + PCI_PCIX_CMD, 1133 pcix_cmd); 1134 } 1135 } 1136 } 1137 /* 1138 * The quad port adapter is special; it has a PCIX-PCIX 1139 * bridge on the board, and can run the secondary bus at 1140 * a higher speed. 1141 */ 1142 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) { 1143 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120 1144 : 66; 1145 } else if (sc->sc_flags & WM_F_PCIX) { 1146 switch (reg & STATUS_PCIXSPD_MASK) { 1147 case STATUS_PCIXSPD_50_66: 1148 sc->sc_bus_speed = 66; 1149 break; 1150 case STATUS_PCIXSPD_66_100: 1151 sc->sc_bus_speed = 100; 1152 break; 1153 case STATUS_PCIXSPD_100_133: 1154 sc->sc_bus_speed = 133; 1155 break; 1156 default: 1157 aprint_error( 1158 "%s: unknown PCIXSPD %d; assuming 66MHz\n", 1159 sc->sc_dev.dv_xname, 1160 reg & STATUS_PCIXSPD_MASK); 1161 sc->sc_bus_speed = 66; 1162 } 1163 } else 1164 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33; 1165 aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname, 1166 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed, 1167 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI"); 1168 } 1169 1170 /* 1171 * Allocate the control data structures, and create and load the 1172 * DMA map for it. 1173 * 1174 * NOTE: All Tx descriptors must be in the same 4G segment of 1175 * memory. So must Rx descriptors. We simplify by allocating 1176 * both sets within the same 4G segment. 1177 */ 1178 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ? 1179 WM_NTXDESC_82542 : WM_NTXDESC_82544; 1180 cdata_size = sc->sc_type < WM_T_82544 ? 1181 sizeof(struct wm_control_data_82542) : 1182 sizeof(struct wm_control_data_82544); 1183 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE, 1184 (bus_size_t) 0x100000000ULL, 1185 &seg, 1, &rseg, 0)) != 0) { 1186 aprint_error( 1187 "%s: unable to allocate control data, error = %d\n", 1188 sc->sc_dev.dv_xname, error); 1189 goto fail_0; 1190 } 1191 1192 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size, 1193 (void **)&sc->sc_control_data, 0)) != 0) { 1194 aprint_error("%s: unable to map control data, error = %d\n", 1195 sc->sc_dev.dv_xname, error); 1196 goto fail_1; 1197 } 1198 1199 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size, 1200 0, 0, &sc->sc_cddmamap)) != 0) { 1201 aprint_error("%s: unable to create control data DMA map, " 1202 "error = %d\n", sc->sc_dev.dv_xname, error); 1203 goto fail_2; 1204 } 1205 1206 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 1207 sc->sc_control_data, cdata_size, NULL, 1208 0)) != 0) { 1209 aprint_error( 1210 "%s: unable to load control data DMA map, error = %d\n", 1211 sc->sc_dev.dv_xname, error); 1212 goto fail_3; 1213 } 1214 1215 1216 /* 1217 * Create the transmit buffer DMA maps. 1218 */ 1219 WM_TXQUEUELEN(sc) = 1220 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ? 1221 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX; 1222 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 1223 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA, 1224 WM_NTXSEGS, WTX_MAX_LEN, 0, 0, 1225 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 1226 aprint_error("%s: unable to create Tx DMA map %d, " 1227 "error = %d\n", sc->sc_dev.dv_xname, i, error); 1228 goto fail_4; 1229 } 1230 } 1231 1232 /* 1233 * Create the receive buffer DMA maps. 1234 */ 1235 for (i = 0; i < WM_NRXDESC; i++) { 1236 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1237 MCLBYTES, 0, 0, 1238 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 1239 aprint_error("%s: unable to create Rx DMA map %d, " 1240 "error = %d\n", sc->sc_dev.dv_xname, i, error); 1241 goto fail_5; 1242 } 1243 sc->sc_rxsoft[i].rxs_mbuf = NULL; 1244 } 1245 1246 /* clear interesting stat counters */ 1247 CSR_READ(sc, WMREG_COLC); 1248 CSR_READ(sc, WMREG_RXERRC); 1249 1250 /* 1251 * Reset the chip to a known state. 1252 */ 1253 wm_reset(sc); 1254 1255 /* 1256 * Get some information about the EEPROM. 1257 */ 1258 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) { 1259 uint32_t flash_size; 1260 sc->sc_flags |= WM_F_SWFWHW_SYNC | WM_F_EEPROM_FLASH; 1261 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH); 1262 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0, 1263 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) { 1264 printf("%s: can't map FLASH registers\n", 1265 sc->sc_dev.dv_xname); 1266 return; 1267 } 1268 flash_size = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG); 1269 sc->sc_ich8_flash_base = (flash_size & ICH_GFPREG_BASE_MASK) * 1270 ICH_FLASH_SECTOR_SIZE; 1271 sc->sc_ich8_flash_bank_size = 1272 ((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1; 1273 sc->sc_ich8_flash_bank_size -= 1274 (flash_size & ICH_GFPREG_BASE_MASK); 1275 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE; 1276 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t); 1277 } else if (sc->sc_type == WM_T_80003) 1278 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC; 1279 else if (sc->sc_type == WM_T_82573) 1280 sc->sc_flags |= WM_F_EEPROM_EERDEEWR; 1281 else if (sc->sc_type > WM_T_82544) 1282 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE; 1283 1284 if (sc->sc_type <= WM_T_82544) 1285 sc->sc_ee_addrbits = 6; 1286 else if (sc->sc_type <= WM_T_82546_3) { 1287 reg = CSR_READ(sc, WMREG_EECD); 1288 if (reg & EECD_EE_SIZE) 1289 sc->sc_ee_addrbits = 8; 1290 else 1291 sc->sc_ee_addrbits = 6; 1292 } else if (sc->sc_type <= WM_T_82547_2) { 1293 reg = CSR_READ(sc, WMREG_EECD); 1294 if (reg & EECD_EE_TYPE) { 1295 sc->sc_flags |= WM_F_EEPROM_SPI; 1296 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8; 1297 } else 1298 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6; 1299 } else if ((sc->sc_type == WM_T_82573) && 1300 (wm_is_onboard_nvm_eeprom(sc) == 0)) { 1301 sc->sc_flags |= WM_F_EEPROM_FLASH; 1302 } else { 1303 /* Assume everything else is SPI. */ 1304 reg = CSR_READ(sc, WMREG_EECD); 1305 sc->sc_flags |= WM_F_EEPROM_SPI; 1306 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8; 1307 } 1308 1309 /* 1310 * Defer printing the EEPROM type until after verifying the checksum 1311 * This allows the EEPROM type to be printed correctly in the case 1312 * that no EEPROM is attached. 1313 */ 1314 1315 1316 /* 1317 * Validate the EEPROM checksum. If the checksum fails, flag this for 1318 * later, so we can fail future reads from the EEPROM. 1319 */ 1320 if (wm_validate_eeprom_checksum(sc)) 1321 sc->sc_flags |= WM_F_EEPROM_INVALID; 1322 1323 if (sc->sc_flags & WM_F_EEPROM_INVALID) 1324 aprint_verbose("%s: No EEPROM\n", sc->sc_dev.dv_xname); 1325 else if (sc->sc_flags & WM_F_EEPROM_FLASH) { 1326 aprint_verbose("%s: FLASH\n", sc->sc_dev.dv_xname); 1327 } else { 1328 if (sc->sc_flags & WM_F_EEPROM_SPI) 1329 eetype = "SPI"; 1330 else 1331 eetype = "MicroWire"; 1332 aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n", 1333 sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits, 1334 sc->sc_ee_addrbits, eetype); 1335 } 1336 1337 /* 1338 * Read the Ethernet address from the EEPROM, if not first found 1339 * in device properties. 1340 */ 1341 ea = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-addr"); 1342 if (ea != NULL) { 1343 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 1344 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 1345 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN); 1346 } else { 1347 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR, 1348 sizeof(myea) / sizeof(myea[0]), myea)) { 1349 aprint_error("%s: unable to read Ethernet address\n", 1350 sc->sc_dev.dv_xname); 1351 return; 1352 } 1353 enaddr[0] = myea[0] & 0xff; 1354 enaddr[1] = myea[0] >> 8; 1355 enaddr[2] = myea[1] & 0xff; 1356 enaddr[3] = myea[1] >> 8; 1357 enaddr[4] = myea[2] & 0xff; 1358 enaddr[5] = myea[2] >> 8; 1359 } 1360 1361 /* 1362 * Toggle the LSB of the MAC address on the second port 1363 * of the dual port controller. 1364 */ 1365 if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3 1366 || sc->sc_type == WM_T_82571 || sc->sc_type == WM_T_80003) { 1367 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1) 1368 enaddr[5] ^= 1; 1369 } 1370 1371 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname, 1372 ether_sprintf(enaddr)); 1373 1374 /* 1375 * Read the config info from the EEPROM, and set up various 1376 * bits in the control registers based on their contents. 1377 */ 1378 pn = prop_dictionary_get(device_properties(&sc->sc_dev), 1379 "i82543-cfg1"); 1380 if (pn != NULL) { 1381 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1382 cfg1 = (uint16_t) prop_number_integer_value(pn); 1383 } else { 1384 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) { 1385 aprint_error("%s: unable to read CFG1\n", 1386 sc->sc_dev.dv_xname); 1387 return; 1388 } 1389 } 1390 1391 pn = prop_dictionary_get(device_properties(&sc->sc_dev), 1392 "i82543-cfg2"); 1393 if (pn != NULL) { 1394 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1395 cfg2 = (uint16_t) prop_number_integer_value(pn); 1396 } else { 1397 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) { 1398 aprint_error("%s: unable to read CFG2\n", 1399 sc->sc_dev.dv_xname); 1400 return; 1401 } 1402 } 1403 1404 if (sc->sc_type >= WM_T_82544) { 1405 pn = prop_dictionary_get(device_properties(&sc->sc_dev), 1406 "i82543-swdpin"); 1407 if (pn != NULL) { 1408 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 1409 swdpin = (uint16_t) prop_number_integer_value(pn); 1410 } else { 1411 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) { 1412 aprint_error("%s: unable to read SWDPIN\n", 1413 sc->sc_dev.dv_xname); 1414 return; 1415 } 1416 } 1417 } 1418 1419 if (cfg1 & EEPROM_CFG1_ILOS) 1420 sc->sc_ctrl |= CTRL_ILOS; 1421 if (sc->sc_type >= WM_T_82544) { 1422 sc->sc_ctrl |= 1423 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) << 1424 CTRL_SWDPIO_SHIFT; 1425 sc->sc_ctrl |= 1426 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) << 1427 CTRL_SWDPINS_SHIFT; 1428 } else { 1429 sc->sc_ctrl |= 1430 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) << 1431 CTRL_SWDPIO_SHIFT; 1432 } 1433 1434 #if 0 1435 if (sc->sc_type >= WM_T_82544) { 1436 if (cfg1 & EEPROM_CFG1_IPS0) 1437 sc->sc_ctrl_ext |= CTRL_EXT_IPS; 1438 if (cfg1 & EEPROM_CFG1_IPS1) 1439 sc->sc_ctrl_ext |= CTRL_EXT_IPS1; 1440 sc->sc_ctrl_ext |= 1441 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) << 1442 CTRL_EXT_SWDPIO_SHIFT; 1443 sc->sc_ctrl_ext |= 1444 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) << 1445 CTRL_EXT_SWDPINS_SHIFT; 1446 } else { 1447 sc->sc_ctrl_ext |= 1448 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) << 1449 CTRL_EXT_SWDPIO_SHIFT; 1450 } 1451 #endif 1452 1453 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 1454 #if 0 1455 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 1456 #endif 1457 1458 /* 1459 * Set up some register offsets that are different between 1460 * the i82542 and the i82543 and later chips. 1461 */ 1462 if (sc->sc_type < WM_T_82543) { 1463 sc->sc_rdt_reg = WMREG_OLD_RDT0; 1464 sc->sc_tdt_reg = WMREG_OLD_TDT; 1465 } else { 1466 sc->sc_rdt_reg = WMREG_RDT; 1467 sc->sc_tdt_reg = WMREG_TDT; 1468 } 1469 1470 /* 1471 * Determine if we're TBI or GMII mode, and initialize the 1472 * media structures accordingly. 1473 */ 1474 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9 1475 || sc->sc_type == WM_T_82573) { 1476 /* STATUS_TBIMODE reserved/reused, can't rely on it */ 1477 wm_gmii_mediainit(sc); 1478 } else if (sc->sc_type < WM_T_82543 || 1479 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) { 1480 if (wmp->wmp_flags & WMP_F_1000T) 1481 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T " 1482 "product!\n", sc->sc_dev.dv_xname); 1483 wm_tbi_mediainit(sc); 1484 } else { 1485 if (wmp->wmp_flags & WMP_F_1000X) 1486 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X " 1487 "product!\n", sc->sc_dev.dv_xname); 1488 wm_gmii_mediainit(sc); 1489 } 1490 1491 ifp = &sc->sc_ethercom.ec_if; 1492 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 1493 ifp->if_softc = sc; 1494 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1495 ifp->if_ioctl = wm_ioctl; 1496 ifp->if_start = wm_start; 1497 ifp->if_watchdog = wm_watchdog; 1498 ifp->if_init = wm_init; 1499 ifp->if_stop = wm_stop; 1500 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN)); 1501 IFQ_SET_READY(&ifp->if_snd); 1502 1503 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_ICH8) 1504 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 1505 1506 /* 1507 * If we're a i82543 or greater, we can support VLANs. 1508 */ 1509 if (sc->sc_type >= WM_T_82543) 1510 sc->sc_ethercom.ec_capabilities |= 1511 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */; 1512 1513 /* 1514 * We can perform TCPv4 and UDPv4 checkums in-bound. Only 1515 * on i82543 and later. 1516 */ 1517 if (sc->sc_type >= WM_T_82543) { 1518 ifp->if_capabilities |= 1519 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 1520 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 1521 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx | 1522 IFCAP_CSUM_TCPv6_Tx | 1523 IFCAP_CSUM_UDPv6_Tx; 1524 } 1525 1526 /* 1527 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL. 1528 * 1529 * 82541GI (8086:1076) ... no 1530 * 82572EI (8086:10b9) ... yes 1531 */ 1532 if (sc->sc_type >= WM_T_82571) { 1533 ifp->if_capabilities |= 1534 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 1535 } 1536 1537 /* 1538 * If we're a i82544 or greater (except i82547), we can do 1539 * TCP segmentation offload. 1540 */ 1541 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) { 1542 ifp->if_capabilities |= IFCAP_TSOv4; 1543 } 1544 1545 if (sc->sc_type >= WM_T_82571) { 1546 ifp->if_capabilities |= IFCAP_TSOv6; 1547 } 1548 1549 /* 1550 * Attach the interface. 1551 */ 1552 if_attach(ifp); 1553 ether_ifattach(ifp, enaddr); 1554 #if NRND > 0 1555 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname, 1556 RND_TYPE_NET, 0); 1557 #endif 1558 1559 #ifdef WM_EVENT_COUNTERS 1560 /* Attach event counters. */ 1561 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC, 1562 NULL, sc->sc_dev.dv_xname, "txsstall"); 1563 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC, 1564 NULL, sc->sc_dev.dv_xname, "txdstall"); 1565 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC, 1566 NULL, sc->sc_dev.dv_xname, "txfifo_stall"); 1567 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR, 1568 NULL, sc->sc_dev.dv_xname, "txdw"); 1569 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR, 1570 NULL, sc->sc_dev.dv_xname, "txqe"); 1571 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR, 1572 NULL, sc->sc_dev.dv_xname, "rxintr"); 1573 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR, 1574 NULL, sc->sc_dev.dv_xname, "linkintr"); 1575 1576 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC, 1577 NULL, sc->sc_dev.dv_xname, "rxipsum"); 1578 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC, 1579 NULL, sc->sc_dev.dv_xname, "rxtusum"); 1580 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC, 1581 NULL, sc->sc_dev.dv_xname, "txipsum"); 1582 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC, 1583 NULL, sc->sc_dev.dv_xname, "txtusum"); 1584 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC, 1585 NULL, sc->sc_dev.dv_xname, "txtusum6"); 1586 1587 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC, 1588 NULL, sc->sc_dev.dv_xname, "txtso"); 1589 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC, 1590 NULL, sc->sc_dev.dv_xname, "txtso6"); 1591 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC, 1592 NULL, sc->sc_dev.dv_xname, "txtsopain"); 1593 1594 for (i = 0; i < WM_NTXSEGS; i++) { 1595 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i); 1596 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC, 1597 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]); 1598 } 1599 1600 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC, 1601 NULL, sc->sc_dev.dv_xname, "txdrop"); 1602 1603 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC, 1604 NULL, sc->sc_dev.dv_xname, "tu"); 1605 1606 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC, 1607 NULL, sc->sc_dev.dv_xname, "tx_xoff"); 1608 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC, 1609 NULL, sc->sc_dev.dv_xname, "tx_xon"); 1610 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC, 1611 NULL, sc->sc_dev.dv_xname, "rx_xoff"); 1612 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC, 1613 NULL, sc->sc_dev.dv_xname, "rx_xon"); 1614 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC, 1615 NULL, sc->sc_dev.dv_xname, "rx_macctl"); 1616 #endif /* WM_EVENT_COUNTERS */ 1617 1618 if (!pmf_device_register(self, NULL, NULL)) 1619 aprint_error_dev(self, "couldn't establish power handler\n"); 1620 else 1621 pmf_class_network_register(self, ifp); 1622 1623 return; 1624 1625 /* 1626 * Free any resources we've allocated during the failed attach 1627 * attempt. Do this in reverse order and fall through. 1628 */ 1629 fail_5: 1630 for (i = 0; i < WM_NRXDESC; i++) { 1631 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 1632 bus_dmamap_destroy(sc->sc_dmat, 1633 sc->sc_rxsoft[i].rxs_dmamap); 1634 } 1635 fail_4: 1636 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 1637 if (sc->sc_txsoft[i].txs_dmamap != NULL) 1638 bus_dmamap_destroy(sc->sc_dmat, 1639 sc->sc_txsoft[i].txs_dmamap); 1640 } 1641 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 1642 fail_3: 1643 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 1644 fail_2: 1645 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 1646 cdata_size); 1647 fail_1: 1648 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1649 fail_0: 1650 return; 1651 } 1652 1653 /* 1654 * wm_tx_offload: 1655 * 1656 * Set up TCP/IP checksumming parameters for the 1657 * specified packet. 1658 */ 1659 static int 1660 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp, 1661 uint8_t *fieldsp) 1662 { 1663 struct mbuf *m0 = txs->txs_mbuf; 1664 struct livengood_tcpip_ctxdesc *t; 1665 uint32_t ipcs, tucs, cmd, cmdlen, seg; 1666 uint32_t ipcse; 1667 struct ether_header *eh; 1668 int offset, iphl; 1669 uint8_t fields; 1670 1671 /* 1672 * XXX It would be nice if the mbuf pkthdr had offset 1673 * fields for the protocol headers. 1674 */ 1675 1676 eh = mtod(m0, struct ether_header *); 1677 switch (htons(eh->ether_type)) { 1678 case ETHERTYPE_IP: 1679 case ETHERTYPE_IPV6: 1680 offset = ETHER_HDR_LEN; 1681 break; 1682 1683 case ETHERTYPE_VLAN: 1684 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1685 break; 1686 1687 default: 1688 /* 1689 * Don't support this protocol or encapsulation. 1690 */ 1691 *fieldsp = 0; 1692 *cmdp = 0; 1693 return (0); 1694 } 1695 1696 if ((m0->m_pkthdr.csum_flags & 1697 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) { 1698 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 1699 } else { 1700 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data); 1701 } 1702 ipcse = offset + iphl - 1; 1703 1704 cmd = WTX_CMD_DEXT | WTX_DTYP_D; 1705 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE; 1706 seg = 0; 1707 fields = 0; 1708 1709 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) { 1710 int hlen = offset + iphl; 1711 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 1712 1713 if (__predict_false(m0->m_len < 1714 (hlen + sizeof(struct tcphdr)))) { 1715 /* 1716 * TCP/IP headers are not in the first mbuf; we need 1717 * to do this the slow and painful way. Let's just 1718 * hope this doesn't happen very often. 1719 */ 1720 struct tcphdr th; 1721 1722 WM_EVCNT_INCR(&sc->sc_ev_txtsopain); 1723 1724 m_copydata(m0, hlen, sizeof(th), &th); 1725 if (v4) { 1726 struct ip ip; 1727 1728 m_copydata(m0, offset, sizeof(ip), &ip); 1729 ip.ip_len = 0; 1730 m_copyback(m0, 1731 offset + offsetof(struct ip, ip_len), 1732 sizeof(ip.ip_len), &ip.ip_len); 1733 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 1734 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 1735 } else { 1736 struct ip6_hdr ip6; 1737 1738 m_copydata(m0, offset, sizeof(ip6), &ip6); 1739 ip6.ip6_plen = 0; 1740 m_copyback(m0, 1741 offset + offsetof(struct ip6_hdr, ip6_plen), 1742 sizeof(ip6.ip6_plen), &ip6.ip6_plen); 1743 th.th_sum = in6_cksum_phdr(&ip6.ip6_src, 1744 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP)); 1745 } 1746 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 1747 sizeof(th.th_sum), &th.th_sum); 1748 1749 hlen += th.th_off << 2; 1750 } else { 1751 /* 1752 * TCP/IP headers are in the first mbuf; we can do 1753 * this the easy way. 1754 */ 1755 struct tcphdr *th; 1756 1757 if (v4) { 1758 struct ip *ip = 1759 (void *)(mtod(m0, char *) + offset); 1760 th = (void *)(mtod(m0, char *) + hlen); 1761 1762 ip->ip_len = 0; 1763 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 1764 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 1765 } else { 1766 struct ip6_hdr *ip6 = 1767 (void *)(mtod(m0, char *) + offset); 1768 th = (void *)(mtod(m0, char *) + hlen); 1769 1770 ip6->ip6_plen = 0; 1771 th->th_sum = in6_cksum_phdr(&ip6->ip6_src, 1772 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); 1773 } 1774 hlen += th->th_off << 2; 1775 } 1776 1777 if (v4) { 1778 WM_EVCNT_INCR(&sc->sc_ev_txtso); 1779 cmdlen |= WTX_TCPIP_CMD_IP; 1780 } else { 1781 WM_EVCNT_INCR(&sc->sc_ev_txtso6); 1782 ipcse = 0; 1783 } 1784 cmd |= WTX_TCPIP_CMD_TSE; 1785 cmdlen |= WTX_TCPIP_CMD_TSE | 1786 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen); 1787 seg = WTX_TCPIP_SEG_HDRLEN(hlen) | 1788 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz); 1789 } 1790 1791 /* 1792 * NOTE: Even if we're not using the IP or TCP/UDP checksum 1793 * offload feature, if we load the context descriptor, we 1794 * MUST provide valid values for IPCSS and TUCSS fields. 1795 */ 1796 1797 ipcs = WTX_TCPIP_IPCSS(offset) | 1798 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) | 1799 WTX_TCPIP_IPCSE(ipcse); 1800 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) { 1801 WM_EVCNT_INCR(&sc->sc_ev_txipsum); 1802 fields |= WTX_IXSM; 1803 } 1804 1805 offset += iphl; 1806 1807 if (m0->m_pkthdr.csum_flags & 1808 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) { 1809 WM_EVCNT_INCR(&sc->sc_ev_txtusum); 1810 fields |= WTX_TXSM; 1811 tucs = WTX_TCPIP_TUCSS(offset) | 1812 WTX_TCPIP_TUCSO(offset + 1813 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) | 1814 WTX_TCPIP_TUCSE(0) /* rest of packet */; 1815 } else if ((m0->m_pkthdr.csum_flags & 1816 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) { 1817 WM_EVCNT_INCR(&sc->sc_ev_txtusum6); 1818 fields |= WTX_TXSM; 1819 tucs = WTX_TCPIP_TUCSS(offset) | 1820 WTX_TCPIP_TUCSO(offset + 1821 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) | 1822 WTX_TCPIP_TUCSE(0) /* rest of packet */; 1823 } else { 1824 /* Just initialize it to a valid TCP context. */ 1825 tucs = WTX_TCPIP_TUCSS(offset) | 1826 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) | 1827 WTX_TCPIP_TUCSE(0) /* rest of packet */; 1828 } 1829 1830 /* Fill in the context descriptor. */ 1831 t = (struct livengood_tcpip_ctxdesc *) 1832 &sc->sc_txdescs[sc->sc_txnext]; 1833 t->tcpip_ipcs = htole32(ipcs); 1834 t->tcpip_tucs = htole32(tucs); 1835 t->tcpip_cmdlen = htole32(cmdlen); 1836 t->tcpip_seg = htole32(seg); 1837 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE); 1838 1839 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext); 1840 txs->txs_ndesc++; 1841 1842 *cmdp = cmd; 1843 *fieldsp = fields; 1844 1845 return (0); 1846 } 1847 1848 static void 1849 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0) 1850 { 1851 struct mbuf *m; 1852 int i; 1853 1854 log(LOG_DEBUG, "%s: mbuf chain:\n", sc->sc_dev.dv_xname); 1855 for (m = m0, i = 0; m != NULL; m = m->m_next, i++) 1856 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, " 1857 "m_flags = 0x%08x\n", sc->sc_dev.dv_xname, 1858 m->m_data, m->m_len, m->m_flags); 1859 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", sc->sc_dev.dv_xname, 1860 i, i == 1 ? "" : "s"); 1861 } 1862 1863 /* 1864 * wm_82547_txfifo_stall: 1865 * 1866 * Callout used to wait for the 82547 Tx FIFO to drain, 1867 * reset the FIFO pointers, and restart packet transmission. 1868 */ 1869 static void 1870 wm_82547_txfifo_stall(void *arg) 1871 { 1872 struct wm_softc *sc = arg; 1873 int s; 1874 1875 s = splnet(); 1876 1877 if (sc->sc_txfifo_stall) { 1878 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) && 1879 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) && 1880 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) { 1881 /* 1882 * Packets have drained. Stop transmitter, reset 1883 * FIFO pointers, restart transmitter, and kick 1884 * the packet queue. 1885 */ 1886 uint32_t tctl = CSR_READ(sc, WMREG_TCTL); 1887 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN); 1888 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr); 1889 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr); 1890 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr); 1891 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr); 1892 CSR_WRITE(sc, WMREG_TCTL, tctl); 1893 CSR_WRITE_FLUSH(sc); 1894 1895 sc->sc_txfifo_head = 0; 1896 sc->sc_txfifo_stall = 0; 1897 wm_start(&sc->sc_ethercom.ec_if); 1898 } else { 1899 /* 1900 * Still waiting for packets to drain; try again in 1901 * another tick. 1902 */ 1903 callout_schedule(&sc->sc_txfifo_ch, 1); 1904 } 1905 } 1906 1907 splx(s); 1908 } 1909 1910 /* 1911 * wm_82547_txfifo_bugchk: 1912 * 1913 * Check for bug condition in the 82547 Tx FIFO. We need to 1914 * prevent enqueueing a packet that would wrap around the end 1915 * if the Tx FIFO ring buffer, otherwise the chip will croak. 1916 * 1917 * We do this by checking the amount of space before the end 1918 * of the Tx FIFO buffer. If the packet will not fit, we "stall" 1919 * the Tx FIFO, wait for all remaining packets to drain, reset 1920 * the internal FIFO pointers to the beginning, and restart 1921 * transmission on the interface. 1922 */ 1923 #define WM_FIFO_HDR 0x10 1924 #define WM_82547_PAD_LEN 0x3e0 1925 static int 1926 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0) 1927 { 1928 int space = sc->sc_txfifo_size - sc->sc_txfifo_head; 1929 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR); 1930 1931 /* Just return if already stalled. */ 1932 if (sc->sc_txfifo_stall) 1933 return (1); 1934 1935 if (sc->sc_mii.mii_media_active & IFM_FDX) { 1936 /* Stall only occurs in half-duplex mode. */ 1937 goto send_packet; 1938 } 1939 1940 if (len >= WM_82547_PAD_LEN + space) { 1941 sc->sc_txfifo_stall = 1; 1942 callout_schedule(&sc->sc_txfifo_ch, 1); 1943 return (1); 1944 } 1945 1946 send_packet: 1947 sc->sc_txfifo_head += len; 1948 if (sc->sc_txfifo_head >= sc->sc_txfifo_size) 1949 sc->sc_txfifo_head -= sc->sc_txfifo_size; 1950 1951 return (0); 1952 } 1953 1954 /* 1955 * wm_start: [ifnet interface function] 1956 * 1957 * Start packet transmission on the interface. 1958 */ 1959 static void 1960 wm_start(struct ifnet *ifp) 1961 { 1962 struct wm_softc *sc = ifp->if_softc; 1963 struct mbuf *m0; 1964 #if 0 /* XXXJRT */ 1965 struct m_tag *mtag; 1966 #endif 1967 struct wm_txsoft *txs; 1968 bus_dmamap_t dmamap; 1969 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso; 1970 bus_addr_t curaddr; 1971 bus_size_t seglen, curlen; 1972 uint32_t cksumcmd; 1973 uint8_t cksumfields; 1974 1975 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 1976 return; 1977 1978 /* 1979 * Remember the previous number of free descriptors. 1980 */ 1981 ofree = sc->sc_txfree; 1982 1983 /* 1984 * Loop through the send queue, setting up transmit descriptors 1985 * until we drain the queue, or use up all available transmit 1986 * descriptors. 1987 */ 1988 for (;;) { 1989 /* Grab a packet off the queue. */ 1990 IFQ_POLL(&ifp->if_snd, m0); 1991 if (m0 == NULL) 1992 break; 1993 1994 DPRINTF(WM_DEBUG_TX, 1995 ("%s: TX: have packet to transmit: %p\n", 1996 sc->sc_dev.dv_xname, m0)); 1997 1998 /* Get a work queue entry. */ 1999 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) { 2000 wm_txintr(sc); 2001 if (sc->sc_txsfree == 0) { 2002 DPRINTF(WM_DEBUG_TX, 2003 ("%s: TX: no free job descriptors\n", 2004 sc->sc_dev.dv_xname)); 2005 WM_EVCNT_INCR(&sc->sc_ev_txsstall); 2006 break; 2007 } 2008 } 2009 2010 txs = &sc->sc_txsoft[sc->sc_txsnext]; 2011 dmamap = txs->txs_dmamap; 2012 2013 use_tso = (m0->m_pkthdr.csum_flags & 2014 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0; 2015 2016 /* 2017 * So says the Linux driver: 2018 * The controller does a simple calculation to make sure 2019 * there is enough room in the FIFO before initiating the 2020 * DMA for each buffer. The calc is: 2021 * 4 = ceil(buffer len / MSS) 2022 * To make sure we don't overrun the FIFO, adjust the max 2023 * buffer len if the MSS drops. 2024 */ 2025 dmamap->dm_maxsegsz = 2026 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN) 2027 ? m0->m_pkthdr.segsz << 2 2028 : WTX_MAX_LEN; 2029 2030 /* 2031 * Load the DMA map. If this fails, the packet either 2032 * didn't fit in the allotted number of segments, or we 2033 * were short on resources. For the too-many-segments 2034 * case, we simply report an error and drop the packet, 2035 * since we can't sanely copy a jumbo packet to a single 2036 * buffer. 2037 */ 2038 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 2039 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 2040 if (error) { 2041 if (error == EFBIG) { 2042 WM_EVCNT_INCR(&sc->sc_ev_txdrop); 2043 log(LOG_ERR, "%s: Tx packet consumes too many " 2044 "DMA segments, dropping...\n", 2045 sc->sc_dev.dv_xname); 2046 IFQ_DEQUEUE(&ifp->if_snd, m0); 2047 wm_dump_mbuf_chain(sc, m0); 2048 m_freem(m0); 2049 continue; 2050 } 2051 /* 2052 * Short on resources, just stop for now. 2053 */ 2054 DPRINTF(WM_DEBUG_TX, 2055 ("%s: TX: dmamap load failed: %d\n", 2056 sc->sc_dev.dv_xname, error)); 2057 break; 2058 } 2059 2060 segs_needed = dmamap->dm_nsegs; 2061 if (use_tso) { 2062 /* For sentinel descriptor; see below. */ 2063 segs_needed++; 2064 } 2065 2066 /* 2067 * Ensure we have enough descriptors free to describe 2068 * the packet. Note, we always reserve one descriptor 2069 * at the end of the ring due to the semantics of the 2070 * TDT register, plus one more in the event we need 2071 * to load offload context. 2072 */ 2073 if (segs_needed > sc->sc_txfree - 2) { 2074 /* 2075 * Not enough free descriptors to transmit this 2076 * packet. We haven't committed anything yet, 2077 * so just unload the DMA map, put the packet 2078 * pack on the queue, and punt. Notify the upper 2079 * layer that there are no more slots left. 2080 */ 2081 DPRINTF(WM_DEBUG_TX, 2082 ("%s: TX: need %d (%d) descriptors, have %d\n", 2083 sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed, 2084 sc->sc_txfree - 1)); 2085 ifp->if_flags |= IFF_OACTIVE; 2086 bus_dmamap_unload(sc->sc_dmat, dmamap); 2087 WM_EVCNT_INCR(&sc->sc_ev_txdstall); 2088 break; 2089 } 2090 2091 /* 2092 * Check for 82547 Tx FIFO bug. We need to do this 2093 * once we know we can transmit the packet, since we 2094 * do some internal FIFO space accounting here. 2095 */ 2096 if (sc->sc_type == WM_T_82547 && 2097 wm_82547_txfifo_bugchk(sc, m0)) { 2098 DPRINTF(WM_DEBUG_TX, 2099 ("%s: TX: 82547 Tx FIFO bug detected\n", 2100 sc->sc_dev.dv_xname)); 2101 ifp->if_flags |= IFF_OACTIVE; 2102 bus_dmamap_unload(sc->sc_dmat, dmamap); 2103 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall); 2104 break; 2105 } 2106 2107 IFQ_DEQUEUE(&ifp->if_snd, m0); 2108 2109 /* 2110 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 2111 */ 2112 2113 DPRINTF(WM_DEBUG_TX, 2114 ("%s: TX: packet has %d (%d) DMA segments\n", 2115 sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed)); 2116 2117 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]); 2118 2119 /* 2120 * Store a pointer to the packet so that we can free it 2121 * later. 2122 * 2123 * Initially, we consider the number of descriptors the 2124 * packet uses the number of DMA segments. This may be 2125 * incremented by 1 if we do checksum offload (a descriptor 2126 * is used to set the checksum context). 2127 */ 2128 txs->txs_mbuf = m0; 2129 txs->txs_firstdesc = sc->sc_txnext; 2130 txs->txs_ndesc = segs_needed; 2131 2132 /* Set up offload parameters for this packet. */ 2133 if (m0->m_pkthdr.csum_flags & 2134 (M_CSUM_TSOv4|M_CSUM_TSOv6| 2135 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4| 2136 M_CSUM_TCPv6|M_CSUM_UDPv6)) { 2137 if (wm_tx_offload(sc, txs, &cksumcmd, 2138 &cksumfields) != 0) { 2139 /* Error message already displayed. */ 2140 bus_dmamap_unload(sc->sc_dmat, dmamap); 2141 continue; 2142 } 2143 } else { 2144 cksumcmd = 0; 2145 cksumfields = 0; 2146 } 2147 2148 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS; 2149 2150 /* Sync the DMA map. */ 2151 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 2152 BUS_DMASYNC_PREWRITE); 2153 2154 /* 2155 * Initialize the transmit descriptor. 2156 */ 2157 for (nexttx = sc->sc_txnext, seg = 0; 2158 seg < dmamap->dm_nsegs; seg++) { 2159 for (seglen = dmamap->dm_segs[seg].ds_len, 2160 curaddr = dmamap->dm_segs[seg].ds_addr; 2161 seglen != 0; 2162 curaddr += curlen, seglen -= curlen, 2163 nexttx = WM_NEXTTX(sc, nexttx)) { 2164 curlen = seglen; 2165 2166 /* 2167 * So says the Linux driver: 2168 * Work around for premature descriptor 2169 * write-backs in TSO mode. Append a 2170 * 4-byte sentinel descriptor. 2171 */ 2172 if (use_tso && 2173 seg == dmamap->dm_nsegs - 1 && 2174 curlen > 8) 2175 curlen -= 4; 2176 2177 wm_set_dma_addr( 2178 &sc->sc_txdescs[nexttx].wtx_addr, 2179 curaddr); 2180 sc->sc_txdescs[nexttx].wtx_cmdlen = 2181 htole32(cksumcmd | curlen); 2182 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 2183 0; 2184 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 2185 cksumfields; 2186 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0; 2187 lasttx = nexttx; 2188 2189 DPRINTF(WM_DEBUG_TX, 2190 ("%s: TX: desc %d: low 0x%08lx, " 2191 "len 0x%04x\n", 2192 sc->sc_dev.dv_xname, nexttx, 2193 curaddr & 0xffffffffUL, (unsigned)curlen)); 2194 } 2195 } 2196 2197 KASSERT(lasttx != -1); 2198 2199 /* 2200 * Set up the command byte on the last descriptor of 2201 * the packet. If we're in the interrupt delay window, 2202 * delay the interrupt. 2203 */ 2204 sc->sc_txdescs[lasttx].wtx_cmdlen |= 2205 htole32(WTX_CMD_EOP | WTX_CMD_RS); 2206 2207 #if 0 /* XXXJRT */ 2208 /* 2209 * If VLANs are enabled and the packet has a VLAN tag, set 2210 * up the descriptor to encapsulate the packet for us. 2211 * 2212 * This is only valid on the last descriptor of the packet. 2213 */ 2214 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) { 2215 sc->sc_txdescs[lasttx].wtx_cmdlen |= 2216 htole32(WTX_CMD_VLE); 2217 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan 2218 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff); 2219 } 2220 #endif /* XXXJRT */ 2221 2222 txs->txs_lastdesc = lasttx; 2223 2224 DPRINTF(WM_DEBUG_TX, 2225 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname, 2226 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen))); 2227 2228 /* Sync the descriptors we're using. */ 2229 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc, 2230 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2231 2232 /* Give the packet to the chip. */ 2233 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx); 2234 2235 DPRINTF(WM_DEBUG_TX, 2236 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx)); 2237 2238 DPRINTF(WM_DEBUG_TX, 2239 ("%s: TX: finished transmitting packet, job %d\n", 2240 sc->sc_dev.dv_xname, sc->sc_txsnext)); 2241 2242 /* Advance the tx pointer. */ 2243 sc->sc_txfree -= txs->txs_ndesc; 2244 sc->sc_txnext = nexttx; 2245 2246 sc->sc_txsfree--; 2247 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext); 2248 2249 #if NBPFILTER > 0 2250 /* Pass the packet to any BPF listeners. */ 2251 if (ifp->if_bpf) 2252 bpf_mtap(ifp->if_bpf, m0); 2253 #endif /* NBPFILTER > 0 */ 2254 } 2255 2256 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) { 2257 /* No more slots; notify upper layer. */ 2258 ifp->if_flags |= IFF_OACTIVE; 2259 } 2260 2261 if (sc->sc_txfree != ofree) { 2262 /* Set a watchdog timer in case the chip flakes out. */ 2263 ifp->if_timer = 5; 2264 } 2265 } 2266 2267 /* 2268 * wm_watchdog: [ifnet interface function] 2269 * 2270 * Watchdog timer handler. 2271 */ 2272 static void 2273 wm_watchdog(struct ifnet *ifp) 2274 { 2275 struct wm_softc *sc = ifp->if_softc; 2276 2277 /* 2278 * Since we're using delayed interrupts, sweep up 2279 * before we report an error. 2280 */ 2281 wm_txintr(sc); 2282 2283 if (sc->sc_txfree != WM_NTXDESC(sc)) { 2284 log(LOG_ERR, 2285 "%s: device timeout (txfree %d txsfree %d txnext %d)\n", 2286 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree, 2287 sc->sc_txnext); 2288 ifp->if_oerrors++; 2289 2290 /* Reset the interface. */ 2291 (void) wm_init(ifp); 2292 } 2293 2294 /* Try to get more packets going. */ 2295 wm_start(ifp); 2296 } 2297 2298 /* 2299 * wm_ioctl: [ifnet interface function] 2300 * 2301 * Handle control requests from the operator. 2302 */ 2303 static int 2304 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data) 2305 { 2306 struct wm_softc *sc = ifp->if_softc; 2307 struct ifreq *ifr = (struct ifreq *) data; 2308 int s, error; 2309 2310 s = splnet(); 2311 2312 switch (cmd) { 2313 case SIOCSIFMEDIA: 2314 case SIOCGIFMEDIA: 2315 /* Flow control requires full-duplex mode. */ 2316 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 2317 (ifr->ifr_media & IFM_FDX) == 0) 2318 ifr->ifr_media &= ~IFM_ETH_FMASK; 2319 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 2320 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 2321 /* We can do both TXPAUSE and RXPAUSE. */ 2322 ifr->ifr_media |= 2323 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 2324 } 2325 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 2326 } 2327 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 2328 break; 2329 default: 2330 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) 2331 break; 2332 2333 error = 0; 2334 2335 if (cmd == SIOCSIFCAP) 2336 error = (*ifp->if_init)(ifp); 2337 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 2338 ; 2339 else if (ifp->if_flags & IFF_RUNNING) { 2340 /* 2341 * Multicast list has changed; set the hardware filter 2342 * accordingly. 2343 */ 2344 wm_set_filter(sc); 2345 } 2346 break; 2347 } 2348 2349 /* Try to get more packets going. */ 2350 wm_start(ifp); 2351 2352 splx(s); 2353 return (error); 2354 } 2355 2356 /* 2357 * wm_intr: 2358 * 2359 * Interrupt service routine. 2360 */ 2361 static int 2362 wm_intr(void *arg) 2363 { 2364 struct wm_softc *sc = arg; 2365 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2366 uint32_t icr; 2367 int handled = 0; 2368 2369 while (1 /* CONSTCOND */) { 2370 icr = CSR_READ(sc, WMREG_ICR); 2371 if ((icr & sc->sc_icr) == 0) 2372 break; 2373 #if 0 /*NRND > 0*/ 2374 if (RND_ENABLED(&sc->rnd_source)) 2375 rnd_add_uint32(&sc->rnd_source, icr); 2376 #endif 2377 2378 handled = 1; 2379 2380 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 2381 if (icr & (ICR_RXDMT0|ICR_RXT0)) { 2382 DPRINTF(WM_DEBUG_RX, 2383 ("%s: RX: got Rx intr 0x%08x\n", 2384 sc->sc_dev.dv_xname, 2385 icr & (ICR_RXDMT0|ICR_RXT0))); 2386 WM_EVCNT_INCR(&sc->sc_ev_rxintr); 2387 } 2388 #endif 2389 wm_rxintr(sc); 2390 2391 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 2392 if (icr & ICR_TXDW) { 2393 DPRINTF(WM_DEBUG_TX, 2394 ("%s: TX: got TXDW interrupt\n", 2395 sc->sc_dev.dv_xname)); 2396 WM_EVCNT_INCR(&sc->sc_ev_txdw); 2397 } 2398 #endif 2399 wm_txintr(sc); 2400 2401 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) { 2402 WM_EVCNT_INCR(&sc->sc_ev_linkintr); 2403 wm_linkintr(sc, icr); 2404 } 2405 2406 if (icr & ICR_RXO) { 2407 ifp->if_ierrors++; 2408 #if defined(WM_DEBUG) 2409 log(LOG_WARNING, "%s: Receive overrun\n", 2410 sc->sc_dev.dv_xname); 2411 #endif /* defined(WM_DEBUG) */ 2412 } 2413 } 2414 2415 if (handled) { 2416 /* Try to get more packets going. */ 2417 wm_start(ifp); 2418 } 2419 2420 return (handled); 2421 } 2422 2423 /* 2424 * wm_txintr: 2425 * 2426 * Helper; handle transmit interrupts. 2427 */ 2428 static void 2429 wm_txintr(struct wm_softc *sc) 2430 { 2431 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2432 struct wm_txsoft *txs; 2433 uint8_t status; 2434 int i; 2435 2436 ifp->if_flags &= ~IFF_OACTIVE; 2437 2438 /* 2439 * Go through the Tx list and free mbufs for those 2440 * frames which have been transmitted. 2441 */ 2442 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc); 2443 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) { 2444 txs = &sc->sc_txsoft[i]; 2445 2446 DPRINTF(WM_DEBUG_TX, 2447 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i)); 2448 2449 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc, 2450 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2451 2452 status = 2453 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status; 2454 if ((status & WTX_ST_DD) == 0) { 2455 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1, 2456 BUS_DMASYNC_PREREAD); 2457 break; 2458 } 2459 2460 DPRINTF(WM_DEBUG_TX, 2461 ("%s: TX: job %d done: descs %d..%d\n", 2462 sc->sc_dev.dv_xname, i, txs->txs_firstdesc, 2463 txs->txs_lastdesc)); 2464 2465 /* 2466 * XXX We should probably be using the statistics 2467 * XXX registers, but I don't know if they exist 2468 * XXX on chips before the i82544. 2469 */ 2470 2471 #ifdef WM_EVENT_COUNTERS 2472 if (status & WTX_ST_TU) 2473 WM_EVCNT_INCR(&sc->sc_ev_tu); 2474 #endif /* WM_EVENT_COUNTERS */ 2475 2476 if (status & (WTX_ST_EC|WTX_ST_LC)) { 2477 ifp->if_oerrors++; 2478 if (status & WTX_ST_LC) 2479 log(LOG_WARNING, "%s: late collision\n", 2480 sc->sc_dev.dv_xname); 2481 else if (status & WTX_ST_EC) { 2482 ifp->if_collisions += 16; 2483 log(LOG_WARNING, "%s: excessive collisions\n", 2484 sc->sc_dev.dv_xname); 2485 } 2486 } else 2487 ifp->if_opackets++; 2488 2489 sc->sc_txfree += txs->txs_ndesc; 2490 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 2491 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2492 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 2493 m_freem(txs->txs_mbuf); 2494 txs->txs_mbuf = NULL; 2495 } 2496 2497 /* Update the dirty transmit buffer pointer. */ 2498 sc->sc_txsdirty = i; 2499 DPRINTF(WM_DEBUG_TX, 2500 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i)); 2501 2502 /* 2503 * If there are no more pending transmissions, cancel the watchdog 2504 * timer. 2505 */ 2506 if (sc->sc_txsfree == WM_TXQUEUELEN(sc)) 2507 ifp->if_timer = 0; 2508 } 2509 2510 /* 2511 * wm_rxintr: 2512 * 2513 * Helper; handle receive interrupts. 2514 */ 2515 static void 2516 wm_rxintr(struct wm_softc *sc) 2517 { 2518 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2519 struct wm_rxsoft *rxs; 2520 struct mbuf *m; 2521 int i, len; 2522 uint8_t status, errors; 2523 2524 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) { 2525 rxs = &sc->sc_rxsoft[i]; 2526 2527 DPRINTF(WM_DEBUG_RX, 2528 ("%s: RX: checking descriptor %d\n", 2529 sc->sc_dev.dv_xname, i)); 2530 2531 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2532 2533 status = sc->sc_rxdescs[i].wrx_status; 2534 errors = sc->sc_rxdescs[i].wrx_errors; 2535 len = le16toh(sc->sc_rxdescs[i].wrx_len); 2536 2537 if ((status & WRX_ST_DD) == 0) { 2538 /* 2539 * We have processed all of the receive descriptors. 2540 */ 2541 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD); 2542 break; 2543 } 2544 2545 if (__predict_false(sc->sc_rxdiscard)) { 2546 DPRINTF(WM_DEBUG_RX, 2547 ("%s: RX: discarding contents of descriptor %d\n", 2548 sc->sc_dev.dv_xname, i)); 2549 WM_INIT_RXDESC(sc, i); 2550 if (status & WRX_ST_EOP) { 2551 /* Reset our state. */ 2552 DPRINTF(WM_DEBUG_RX, 2553 ("%s: RX: resetting rxdiscard -> 0\n", 2554 sc->sc_dev.dv_xname)); 2555 sc->sc_rxdiscard = 0; 2556 } 2557 continue; 2558 } 2559 2560 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 2561 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2562 2563 m = rxs->rxs_mbuf; 2564 2565 /* 2566 * Add a new receive buffer to the ring, unless of 2567 * course the length is zero. Treat the latter as a 2568 * failed mapping. 2569 */ 2570 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) { 2571 /* 2572 * Failed, throw away what we've done so 2573 * far, and discard the rest of the packet. 2574 */ 2575 ifp->if_ierrors++; 2576 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 2577 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 2578 WM_INIT_RXDESC(sc, i); 2579 if ((status & WRX_ST_EOP) == 0) 2580 sc->sc_rxdiscard = 1; 2581 if (sc->sc_rxhead != NULL) 2582 m_freem(sc->sc_rxhead); 2583 WM_RXCHAIN_RESET(sc); 2584 DPRINTF(WM_DEBUG_RX, 2585 ("%s: RX: Rx buffer allocation failed, " 2586 "dropping packet%s\n", sc->sc_dev.dv_xname, 2587 sc->sc_rxdiscard ? " (discard)" : "")); 2588 continue; 2589 } 2590 2591 WM_RXCHAIN_LINK(sc, m); 2592 2593 m->m_len = len; 2594 2595 DPRINTF(WM_DEBUG_RX, 2596 ("%s: RX: buffer at %p len %d\n", 2597 sc->sc_dev.dv_xname, m->m_data, len)); 2598 2599 /* 2600 * If this is not the end of the packet, keep 2601 * looking. 2602 */ 2603 if ((status & WRX_ST_EOP) == 0) { 2604 sc->sc_rxlen += len; 2605 DPRINTF(WM_DEBUG_RX, 2606 ("%s: RX: not yet EOP, rxlen -> %d\n", 2607 sc->sc_dev.dv_xname, sc->sc_rxlen)); 2608 continue; 2609 } 2610 2611 /* 2612 * Okay, we have the entire packet now. The chip is 2613 * configured to include the FCS (not all chips can 2614 * be configured to strip it), so we need to trim it. 2615 */ 2616 m->m_len -= ETHER_CRC_LEN; 2617 2618 *sc->sc_rxtailp = NULL; 2619 len = m->m_len + sc->sc_rxlen; 2620 m = sc->sc_rxhead; 2621 2622 WM_RXCHAIN_RESET(sc); 2623 2624 DPRINTF(WM_DEBUG_RX, 2625 ("%s: RX: have entire packet, len -> %d\n", 2626 sc->sc_dev.dv_xname, len)); 2627 2628 /* 2629 * If an error occurred, update stats and drop the packet. 2630 */ 2631 if (errors & 2632 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) { 2633 ifp->if_ierrors++; 2634 if (errors & WRX_ER_SE) 2635 log(LOG_WARNING, "%s: symbol error\n", 2636 sc->sc_dev.dv_xname); 2637 else if (errors & WRX_ER_SEQ) 2638 log(LOG_WARNING, "%s: receive sequence error\n", 2639 sc->sc_dev.dv_xname); 2640 else if (errors & WRX_ER_CE) 2641 log(LOG_WARNING, "%s: CRC error\n", 2642 sc->sc_dev.dv_xname); 2643 m_freem(m); 2644 continue; 2645 } 2646 2647 /* 2648 * No errors. Receive the packet. 2649 */ 2650 m->m_pkthdr.rcvif = ifp; 2651 m->m_pkthdr.len = len; 2652 2653 #if 0 /* XXXJRT */ 2654 /* 2655 * If VLANs are enabled, VLAN packets have been unwrapped 2656 * for us. Associate the tag with the packet. 2657 */ 2658 if ((status & WRX_ST_VP) != 0) { 2659 VLAN_INPUT_TAG(ifp, m, 2660 le16toh(sc->sc_rxdescs[i].wrx_special, 2661 continue); 2662 } 2663 #endif /* XXXJRT */ 2664 2665 /* 2666 * Set up checksum info for this packet. 2667 */ 2668 if ((status & WRX_ST_IXSM) == 0) { 2669 if (status & WRX_ST_IPCS) { 2670 WM_EVCNT_INCR(&sc->sc_ev_rxipsum); 2671 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 2672 if (errors & WRX_ER_IPE) 2673 m->m_pkthdr.csum_flags |= 2674 M_CSUM_IPv4_BAD; 2675 } 2676 if (status & WRX_ST_TCPCS) { 2677 /* 2678 * Note: we don't know if this was TCP or UDP, 2679 * so we just set both bits, and expect the 2680 * upper layers to deal. 2681 */ 2682 WM_EVCNT_INCR(&sc->sc_ev_rxtusum); 2683 m->m_pkthdr.csum_flags |= 2684 M_CSUM_TCPv4 | M_CSUM_UDPv4 | 2685 M_CSUM_TCPv6 | M_CSUM_UDPv6; 2686 if (errors & WRX_ER_TCPE) 2687 m->m_pkthdr.csum_flags |= 2688 M_CSUM_TCP_UDP_BAD; 2689 } 2690 } 2691 2692 ifp->if_ipackets++; 2693 2694 #if NBPFILTER > 0 2695 /* Pass this up to any BPF listeners. */ 2696 if (ifp->if_bpf) 2697 bpf_mtap(ifp->if_bpf, m); 2698 #endif /* NBPFILTER > 0 */ 2699 2700 /* Pass it on. */ 2701 (*ifp->if_input)(ifp, m); 2702 } 2703 2704 /* Update the receive pointer. */ 2705 sc->sc_rxptr = i; 2706 2707 DPRINTF(WM_DEBUG_RX, 2708 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i)); 2709 } 2710 2711 /* 2712 * wm_linkintr: 2713 * 2714 * Helper; handle link interrupts. 2715 */ 2716 static void 2717 wm_linkintr(struct wm_softc *sc, uint32_t icr) 2718 { 2719 uint32_t status; 2720 2721 /* 2722 * If we get a link status interrupt on a 1000BASE-T 2723 * device, just fall into the normal MII tick path. 2724 */ 2725 if (sc->sc_flags & WM_F_HAS_MII) { 2726 if (icr & ICR_LSC) { 2727 DPRINTF(WM_DEBUG_LINK, 2728 ("%s: LINK: LSC -> mii_tick\n", 2729 sc->sc_dev.dv_xname)); 2730 mii_tick(&sc->sc_mii); 2731 } else if (icr & ICR_RXSEQ) { 2732 DPRINTF(WM_DEBUG_LINK, 2733 ("%s: LINK Receive sequence error\n", 2734 sc->sc_dev.dv_xname)); 2735 } 2736 return; 2737 } 2738 2739 /* 2740 * If we are now receiving /C/, check for link again in 2741 * a couple of link clock ticks. 2742 */ 2743 if (icr & ICR_RXCFG) { 2744 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n", 2745 sc->sc_dev.dv_xname)); 2746 sc->sc_tbi_anstate = 2; 2747 } 2748 2749 if (icr & ICR_LSC) { 2750 status = CSR_READ(sc, WMREG_STATUS); 2751 if (status & STATUS_LU) { 2752 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n", 2753 sc->sc_dev.dv_xname, 2754 (status & STATUS_FD) ? "FDX" : "HDX")); 2755 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 2756 sc->sc_fcrtl &= ~FCRTL_XONE; 2757 if (status & STATUS_FD) 2758 sc->sc_tctl |= 2759 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 2760 else 2761 sc->sc_tctl |= 2762 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 2763 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE) 2764 sc->sc_fcrtl |= FCRTL_XONE; 2765 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 2766 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 2767 WMREG_OLD_FCRTL : WMREG_FCRTL, 2768 sc->sc_fcrtl); 2769 sc->sc_tbi_linkup = 1; 2770 } else { 2771 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", 2772 sc->sc_dev.dv_xname)); 2773 sc->sc_tbi_linkup = 0; 2774 } 2775 sc->sc_tbi_anstate = 2; 2776 wm_tbi_set_linkled(sc); 2777 } else if (icr & ICR_RXSEQ) { 2778 DPRINTF(WM_DEBUG_LINK, 2779 ("%s: LINK: Receive sequence error\n", 2780 sc->sc_dev.dv_xname)); 2781 } 2782 } 2783 2784 /* 2785 * wm_tick: 2786 * 2787 * One second timer, used to check link status, sweep up 2788 * completed transmit jobs, etc. 2789 */ 2790 static void 2791 wm_tick(void *arg) 2792 { 2793 struct wm_softc *sc = arg; 2794 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2795 int s; 2796 2797 s = splnet(); 2798 2799 if (sc->sc_type >= WM_T_82542_2_1) { 2800 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC)); 2801 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC)); 2802 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC)); 2803 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC)); 2804 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC)); 2805 } 2806 2807 ifp->if_collisions += CSR_READ(sc, WMREG_COLC); 2808 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC); 2809 2810 2811 if (sc->sc_flags & WM_F_HAS_MII) 2812 mii_tick(&sc->sc_mii); 2813 else 2814 wm_tbi_check_link(sc); 2815 2816 splx(s); 2817 2818 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); 2819 } 2820 2821 /* 2822 * wm_reset: 2823 * 2824 * Reset the i82542 chip. 2825 */ 2826 static void 2827 wm_reset(struct wm_softc *sc) 2828 { 2829 uint32_t reg; 2830 2831 /* 2832 * Allocate on-chip memory according to the MTU size. 2833 * The Packet Buffer Allocation register must be written 2834 * before the chip is reset. 2835 */ 2836 switch (sc->sc_type) { 2837 case WM_T_82547: 2838 case WM_T_82547_2: 2839 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? 2840 PBA_22K : PBA_30K; 2841 sc->sc_txfifo_head = 0; 2842 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT; 2843 sc->sc_txfifo_size = 2844 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT; 2845 sc->sc_txfifo_stall = 0; 2846 break; 2847 case WM_T_82571: 2848 case WM_T_82572: 2849 case WM_T_80003: 2850 sc->sc_pba = PBA_32K; 2851 break; 2852 case WM_T_82573: 2853 sc->sc_pba = PBA_12K; 2854 break; 2855 case WM_T_ICH8: 2856 sc->sc_pba = PBA_8K; 2857 CSR_WRITE(sc, WMREG_PBS, PBA_16K); 2858 break; 2859 case WM_T_ICH9: 2860 sc->sc_pba = PBA_10K; 2861 break; 2862 default: 2863 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? 2864 PBA_40K : PBA_48K; 2865 break; 2866 } 2867 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba); 2868 2869 if (sc->sc_flags & WM_F_PCIE) { 2870 int timeout = 800; 2871 2872 sc->sc_ctrl |= CTRL_GIO_M_DIS; 2873 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 2874 2875 while (timeout) { 2876 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0) 2877 break; 2878 delay(100); 2879 } 2880 } 2881 2882 /* clear interrupt */ 2883 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 2884 2885 /* 2886 * 82541 Errata 29? & 82547 Errata 28? 2887 * See also the description about PHY_RST bit in CTRL register 2888 * in 8254x_GBe_SDM.pdf. 2889 */ 2890 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) { 2891 CSR_WRITE(sc, WMREG_CTRL, 2892 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET); 2893 delay(5000); 2894 } 2895 2896 switch (sc->sc_type) { 2897 case WM_T_82544: 2898 case WM_T_82540: 2899 case WM_T_82545: 2900 case WM_T_82546: 2901 case WM_T_82541: 2902 case WM_T_82541_2: 2903 /* 2904 * On some chipsets, a reset through a memory-mapped write 2905 * cycle can cause the chip to reset before completing the 2906 * write cycle. This causes major headache that can be 2907 * avoided by issuing the reset via indirect register writes 2908 * through I/O space. 2909 * 2910 * So, if we successfully mapped the I/O BAR at attach time, 2911 * use that. Otherwise, try our luck with a memory-mapped 2912 * reset. 2913 */ 2914 if (sc->sc_flags & WM_F_IOH_VALID) 2915 wm_io_write(sc, WMREG_CTRL, CTRL_RST); 2916 else 2917 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST); 2918 break; 2919 2920 case WM_T_82545_3: 2921 case WM_T_82546_3: 2922 /* Use the shadow control register on these chips. */ 2923 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST); 2924 break; 2925 2926 case WM_T_ICH8: 2927 case WM_T_ICH9: 2928 wm_get_swfwhw_semaphore(sc); 2929 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST | CTRL_PHY_RESET); 2930 delay(10000); 2931 2932 default: 2933 /* Everything else can safely use the documented method. */ 2934 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST); 2935 break; 2936 } 2937 delay(10000); 2938 2939 /* reload EEPROM */ 2940 switch(sc->sc_type) { 2941 case WM_T_82542_2_0: 2942 case WM_T_82542_2_1: 2943 case WM_T_82543: 2944 case WM_T_82544: 2945 delay(10); 2946 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; 2947 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 2948 delay(2000); 2949 break; 2950 case WM_T_82541: 2951 case WM_T_82541_2: 2952 case WM_T_82547: 2953 case WM_T_82547_2: 2954 delay(20000); 2955 break; 2956 case WM_T_82573: 2957 if (sc->sc_flags & WM_F_EEPROM_FLASH) { 2958 delay(10); 2959 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; 2960 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 2961 } 2962 /* FALLTHROUGH */ 2963 default: 2964 /* check EECD_EE_AUTORD */ 2965 wm_get_auto_rd_done(sc); 2966 } 2967 2968 #if 0 2969 for (i = 0; i < 1000; i++) { 2970 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) { 2971 return; 2972 } 2973 delay(20); 2974 } 2975 2976 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST) 2977 log(LOG_ERR, "%s: reset failed to complete\n", 2978 sc->sc_dev.dv_xname); 2979 #endif 2980 } 2981 2982 /* 2983 * wm_init: [ifnet interface function] 2984 * 2985 * Initialize the interface. Must be called at splnet(). 2986 */ 2987 static int 2988 wm_init(struct ifnet *ifp) 2989 { 2990 struct wm_softc *sc = ifp->if_softc; 2991 struct wm_rxsoft *rxs; 2992 int i, error = 0; 2993 uint32_t reg; 2994 2995 /* 2996 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set. 2997 * There is a small but measurable benefit to avoiding the adjusment 2998 * of the descriptor so that the headers are aligned, for normal mtu, 2999 * on such platforms. One possibility is that the DMA itself is 3000 * slightly more efficient if the front of the entire packet (instead 3001 * of the front of the headers) is aligned. 3002 * 3003 * Note we must always set align_tweak to 0 if we are using 3004 * jumbo frames. 3005 */ 3006 #ifdef __NO_STRICT_ALIGNMENT 3007 sc->sc_align_tweak = 0; 3008 #else 3009 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2)) 3010 sc->sc_align_tweak = 0; 3011 else 3012 sc->sc_align_tweak = 2; 3013 #endif /* __NO_STRICT_ALIGNMENT */ 3014 3015 /* Cancel any pending I/O. */ 3016 wm_stop(ifp, 0); 3017 3018 /* update statistics before reset */ 3019 ifp->if_collisions += CSR_READ(sc, WMREG_COLC); 3020 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC); 3021 3022 /* Reset the chip to a known state. */ 3023 wm_reset(sc); 3024 3025 /* Initialize the transmit descriptor ring. */ 3026 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc)); 3027 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc), 3028 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 3029 sc->sc_txfree = WM_NTXDESC(sc); 3030 sc->sc_txnext = 0; 3031 3032 if (sc->sc_type < WM_T_82543) { 3033 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0)); 3034 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0)); 3035 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc)); 3036 CSR_WRITE(sc, WMREG_OLD_TDH, 0); 3037 CSR_WRITE(sc, WMREG_OLD_TDT, 0); 3038 CSR_WRITE(sc, WMREG_OLD_TIDV, 128); 3039 } else { 3040 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0)); 3041 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0)); 3042 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc)); 3043 CSR_WRITE(sc, WMREG_TDH, 0); 3044 CSR_WRITE(sc, WMREG_TDT, 0); 3045 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */ 3046 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */ 3047 3048 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) | 3049 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0)); 3050 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) | 3051 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1)); 3052 } 3053 CSR_WRITE(sc, WMREG_TQSA_LO, 0); 3054 CSR_WRITE(sc, WMREG_TQSA_HI, 0); 3055 3056 /* Initialize the transmit job descriptors. */ 3057 for (i = 0; i < WM_TXQUEUELEN(sc); i++) 3058 sc->sc_txsoft[i].txs_mbuf = NULL; 3059 sc->sc_txsfree = WM_TXQUEUELEN(sc); 3060 sc->sc_txsnext = 0; 3061 sc->sc_txsdirty = 0; 3062 3063 /* 3064 * Initialize the receive descriptor and receive job 3065 * descriptor rings. 3066 */ 3067 if (sc->sc_type < WM_T_82543) { 3068 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0)); 3069 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0)); 3070 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs)); 3071 CSR_WRITE(sc, WMREG_OLD_RDH0, 0); 3072 CSR_WRITE(sc, WMREG_OLD_RDT0, 0); 3073 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD); 3074 3075 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0); 3076 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0); 3077 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0); 3078 CSR_WRITE(sc, WMREG_OLD_RDH1, 0); 3079 CSR_WRITE(sc, WMREG_OLD_RDT1, 0); 3080 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0); 3081 } else { 3082 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0)); 3083 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0)); 3084 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs)); 3085 CSR_WRITE(sc, WMREG_RDH, 0); 3086 CSR_WRITE(sc, WMREG_RDT, 0); 3087 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */ 3088 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */ 3089 } 3090 for (i = 0; i < WM_NRXDESC; i++) { 3091 rxs = &sc->sc_rxsoft[i]; 3092 if (rxs->rxs_mbuf == NULL) { 3093 if ((error = wm_add_rxbuf(sc, i)) != 0) { 3094 log(LOG_ERR, "%s: unable to allocate or map rx " 3095 "buffer %d, error = %d\n", 3096 sc->sc_dev.dv_xname, i, error); 3097 /* 3098 * XXX Should attempt to run with fewer receive 3099 * XXX buffers instead of just failing. 3100 */ 3101 wm_rxdrain(sc); 3102 goto out; 3103 } 3104 } else 3105 WM_INIT_RXDESC(sc, i); 3106 } 3107 sc->sc_rxptr = 0; 3108 sc->sc_rxdiscard = 0; 3109 WM_RXCHAIN_RESET(sc); 3110 3111 /* 3112 * Clear out the VLAN table -- we don't use it (yet). 3113 */ 3114 CSR_WRITE(sc, WMREG_VET, 0); 3115 for (i = 0; i < WM_VLAN_TABSIZE; i++) 3116 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0); 3117 3118 /* 3119 * Set up flow-control parameters. 3120 * 3121 * XXX Values could probably stand some tuning. 3122 */ 3123 if (sc->sc_type != WM_T_ICH8) { 3124 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST); 3125 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST); 3126 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL); 3127 } 3128 3129 sc->sc_fcrtl = FCRTL_DFLT; 3130 if (sc->sc_type < WM_T_82543) { 3131 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT); 3132 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl); 3133 } else { 3134 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT); 3135 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl); 3136 } 3137 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT); 3138 3139 #if 0 /* XXXJRT */ 3140 /* Deal with VLAN enables. */ 3141 if (VLAN_ATTACHED(&sc->sc_ethercom)) 3142 sc->sc_ctrl |= CTRL_VME; 3143 else 3144 #endif /* XXXJRT */ 3145 sc->sc_ctrl &= ~CTRL_VME; 3146 3147 /* Write the control registers. */ 3148 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3149 if (sc->sc_type >= WM_T_80003 && (sc->sc_flags & WM_F_HAS_MII)) { 3150 int val; 3151 val = CSR_READ(sc, WMREG_CTRL_EXT); 3152 val &= ~CTRL_EXT_LINK_MODE_MASK; 3153 CSR_WRITE(sc, WMREG_CTRL_EXT, val); 3154 3155 /* Bypass RX and TX FIFO's */ 3156 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL, 3157 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS | 3158 KUMCTRLSTA_FIFO_CTRL_TX_BYPASS); 3159 3160 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL, 3161 KUMCTRLSTA_INB_CTRL_DIS_PADDING | 3162 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT); 3163 /* 3164 * Set the mac to wait the maximum time between each 3165 * iteration and increase the max iterations when 3166 * polling the phy; this fixes erroneous timeouts at 10Mbps. 3167 */ 3168 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, 0xFFFF); 3169 val = wm_kmrn_i80003_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM); 3170 val |= 0x3F; 3171 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM, val); 3172 } 3173 #if 0 3174 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 3175 #endif 3176 3177 /* 3178 * Set up checksum offload parameters. 3179 */ 3180 reg = CSR_READ(sc, WMREG_RXCSUM); 3181 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL); 3182 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) 3183 reg |= RXCSUM_IPOFL; 3184 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) 3185 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL; 3186 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx)) 3187 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL; 3188 CSR_WRITE(sc, WMREG_RXCSUM, reg); 3189 3190 /* 3191 * Set up the interrupt registers. 3192 */ 3193 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 3194 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 | 3195 ICR_RXO | ICR_RXT0; 3196 if ((sc->sc_flags & WM_F_HAS_MII) == 0) 3197 sc->sc_icr |= ICR_RXCFG; 3198 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr); 3199 3200 /* Set up the inter-packet gap. */ 3201 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); 3202 3203 if (sc->sc_type >= WM_T_82543) { 3204 /* 3205 * Set up the interrupt throttling register (units of 256ns) 3206 * Note that a footnote in Intel's documentation says this 3207 * ticker runs at 1/4 the rate when the chip is in 100Mbit 3208 * or 10Mbit mode. Empirically, it appears to be the case 3209 * that that is also true for the 1024ns units of the other 3210 * interrupt-related timer registers -- so, really, we ought 3211 * to divide this value by 4 when the link speed is low. 3212 * 3213 * XXX implement this division at link speed change! 3214 */ 3215 3216 /* 3217 * For N interrupts/sec, set this value to: 3218 * 1000000000 / (N * 256). Note that we set the 3219 * absolute and packet timer values to this value 3220 * divided by 4 to get "simple timer" behavior. 3221 */ 3222 3223 sc->sc_itr = 1500; /* 2604 ints/sec */ 3224 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr); 3225 } 3226 3227 #if 0 /* XXXJRT */ 3228 /* Set the VLAN ethernetype. */ 3229 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN); 3230 #endif 3231 3232 /* 3233 * Set up the transmit control register; we start out with 3234 * a collision distance suitable for FDX, but update it whe 3235 * we resolve the media type. 3236 */ 3237 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) | 3238 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 3239 if (sc->sc_type >= WM_T_82571) 3240 sc->sc_tctl |= TCTL_MULR; 3241 if (sc->sc_type >= WM_T_80003) 3242 sc->sc_tctl |= TCTL_RTLC; 3243 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 3244 3245 /* Set the media. */ 3246 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0) 3247 goto out; 3248 3249 /* 3250 * Set up the receive control register; we actually program 3251 * the register when we set the receive filter. Use multicast 3252 * address offset type 0. 3253 * 3254 * Only the i82544 has the ability to strip the incoming 3255 * CRC, so we don't enable that feature. 3256 */ 3257 sc->sc_mchash_type = 0; 3258 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF 3259 | RCTL_MO(sc->sc_mchash_type); 3260 3261 /* 82573 doesn't support jumbo frame */ 3262 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_ICH8) 3263 sc->sc_rctl |= RCTL_LPE; 3264 3265 if (MCLBYTES == 2048) { 3266 sc->sc_rctl |= RCTL_2k; 3267 } else { 3268 if (sc->sc_type >= WM_T_82543) { 3269 switch(MCLBYTES) { 3270 case 4096: 3271 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k; 3272 break; 3273 case 8192: 3274 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k; 3275 break; 3276 case 16384: 3277 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k; 3278 break; 3279 default: 3280 panic("wm_init: MCLBYTES %d unsupported", 3281 MCLBYTES); 3282 break; 3283 } 3284 } else panic("wm_init: i82542 requires MCLBYTES = 2048"); 3285 } 3286 3287 /* Set the receive filter. */ 3288 wm_set_filter(sc); 3289 3290 /* Start the one second link check clock. */ 3291 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc); 3292 3293 /* ...all done! */ 3294 ifp->if_flags |= IFF_RUNNING; 3295 ifp->if_flags &= ~IFF_OACTIVE; 3296 3297 out: 3298 if (error) 3299 log(LOG_ERR, "%s: interface not running\n", 3300 sc->sc_dev.dv_xname); 3301 return (error); 3302 } 3303 3304 /* 3305 * wm_rxdrain: 3306 * 3307 * Drain the receive queue. 3308 */ 3309 static void 3310 wm_rxdrain(struct wm_softc *sc) 3311 { 3312 struct wm_rxsoft *rxs; 3313 int i; 3314 3315 for (i = 0; i < WM_NRXDESC; i++) { 3316 rxs = &sc->sc_rxsoft[i]; 3317 if (rxs->rxs_mbuf != NULL) { 3318 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 3319 m_freem(rxs->rxs_mbuf); 3320 rxs->rxs_mbuf = NULL; 3321 } 3322 } 3323 } 3324 3325 /* 3326 * wm_stop: [ifnet interface function] 3327 * 3328 * Stop transmission on the interface. 3329 */ 3330 static void 3331 wm_stop(struct ifnet *ifp, int disable) 3332 { 3333 struct wm_softc *sc = ifp->if_softc; 3334 struct wm_txsoft *txs; 3335 int i; 3336 3337 /* Stop the one second clock. */ 3338 callout_stop(&sc->sc_tick_ch); 3339 3340 /* Stop the 82547 Tx FIFO stall check timer. */ 3341 if (sc->sc_type == WM_T_82547) 3342 callout_stop(&sc->sc_txfifo_ch); 3343 3344 if (sc->sc_flags & WM_F_HAS_MII) { 3345 /* Down the MII. */ 3346 mii_down(&sc->sc_mii); 3347 } 3348 3349 /* Stop the transmit and receive processes. */ 3350 CSR_WRITE(sc, WMREG_TCTL, 0); 3351 CSR_WRITE(sc, WMREG_RCTL, 0); 3352 3353 /* 3354 * Clear the interrupt mask to ensure the device cannot assert its 3355 * interrupt line. 3356 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service 3357 * any currently pending or shared interrupt. 3358 */ 3359 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 3360 sc->sc_icr = 0; 3361 3362 /* Release any queued transmit buffers. */ 3363 for (i = 0; i < WM_TXQUEUELEN(sc); i++) { 3364 txs = &sc->sc_txsoft[i]; 3365 if (txs->txs_mbuf != NULL) { 3366 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 3367 m_freem(txs->txs_mbuf); 3368 txs->txs_mbuf = NULL; 3369 } 3370 } 3371 3372 if (disable) 3373 wm_rxdrain(sc); 3374 3375 /* Mark the interface as down and cancel the watchdog timer. */ 3376 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3377 ifp->if_timer = 0; 3378 } 3379 3380 void 3381 wm_get_auto_rd_done(struct wm_softc *sc) 3382 { 3383 int i; 3384 3385 /* wait for eeprom to reload */ 3386 switch (sc->sc_type) { 3387 case WM_T_82571: 3388 case WM_T_82572: 3389 case WM_T_82573: 3390 case WM_T_80003: 3391 case WM_T_ICH8: 3392 case WM_T_ICH9: 3393 for (i = 10; i > 0; i--) { 3394 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD) 3395 break; 3396 delay(1000); 3397 } 3398 if (i == 0) { 3399 log(LOG_ERR, "%s: auto read from eeprom failed to " 3400 "complete\n", sc->sc_dev.dv_xname); 3401 } 3402 break; 3403 default: 3404 delay(5000); 3405 break; 3406 } 3407 3408 /* Phy configuration starts after EECD_AUTO_RD is set */ 3409 if (sc->sc_type == WM_T_82573) 3410 delay(25000); 3411 } 3412 3413 /* 3414 * wm_acquire_eeprom: 3415 * 3416 * Perform the EEPROM handshake required on some chips. 3417 */ 3418 static int 3419 wm_acquire_eeprom(struct wm_softc *sc) 3420 { 3421 uint32_t reg; 3422 int x; 3423 int ret = 0; 3424 3425 /* always success */ 3426 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0) 3427 return 0; 3428 3429 if (sc->sc_flags & WM_F_SWFWHW_SYNC) { 3430 ret = wm_get_swfwhw_semaphore(sc); 3431 } else if (sc->sc_flags & WM_F_SWFW_SYNC) { 3432 /* this will also do wm_get_swsm_semaphore() if needed */ 3433 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM); 3434 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) { 3435 ret = wm_get_swsm_semaphore(sc); 3436 } 3437 3438 if (ret) 3439 return 1; 3440 3441 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) { 3442 reg = CSR_READ(sc, WMREG_EECD); 3443 3444 /* Request EEPROM access. */ 3445 reg |= EECD_EE_REQ; 3446 CSR_WRITE(sc, WMREG_EECD, reg); 3447 3448 /* ..and wait for it to be granted. */ 3449 for (x = 0; x < 1000; x++) { 3450 reg = CSR_READ(sc, WMREG_EECD); 3451 if (reg & EECD_EE_GNT) 3452 break; 3453 delay(5); 3454 } 3455 if ((reg & EECD_EE_GNT) == 0) { 3456 aprint_error("%s: could not acquire EEPROM GNT\n", 3457 sc->sc_dev.dv_xname); 3458 reg &= ~EECD_EE_REQ; 3459 CSR_WRITE(sc, WMREG_EECD, reg); 3460 if (sc->sc_flags & WM_F_SWFWHW_SYNC) 3461 wm_put_swfwhw_semaphore(sc); 3462 if (sc->sc_flags & WM_F_SWFW_SYNC) 3463 wm_put_swfw_semaphore(sc, SWFW_EEP_SM); 3464 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 3465 wm_put_swsm_semaphore(sc); 3466 return (1); 3467 } 3468 } 3469 3470 return (0); 3471 } 3472 3473 /* 3474 * wm_release_eeprom: 3475 * 3476 * Release the EEPROM mutex. 3477 */ 3478 static void 3479 wm_release_eeprom(struct wm_softc *sc) 3480 { 3481 uint32_t reg; 3482 3483 /* always success */ 3484 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0) 3485 return; 3486 3487 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) { 3488 reg = CSR_READ(sc, WMREG_EECD); 3489 reg &= ~EECD_EE_REQ; 3490 CSR_WRITE(sc, WMREG_EECD, reg); 3491 } 3492 3493 if (sc->sc_flags & WM_F_SWFWHW_SYNC) 3494 wm_put_swfwhw_semaphore(sc); 3495 if (sc->sc_flags & WM_F_SWFW_SYNC) 3496 wm_put_swfw_semaphore(sc, SWFW_EEP_SM); 3497 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 3498 wm_put_swsm_semaphore(sc); 3499 } 3500 3501 /* 3502 * wm_eeprom_sendbits: 3503 * 3504 * Send a series of bits to the EEPROM. 3505 */ 3506 static void 3507 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits) 3508 { 3509 uint32_t reg; 3510 int x; 3511 3512 reg = CSR_READ(sc, WMREG_EECD); 3513 3514 for (x = nbits; x > 0; x--) { 3515 if (bits & (1U << (x - 1))) 3516 reg |= EECD_DI; 3517 else 3518 reg &= ~EECD_DI; 3519 CSR_WRITE(sc, WMREG_EECD, reg); 3520 delay(2); 3521 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK); 3522 delay(2); 3523 CSR_WRITE(sc, WMREG_EECD, reg); 3524 delay(2); 3525 } 3526 } 3527 3528 /* 3529 * wm_eeprom_recvbits: 3530 * 3531 * Receive a series of bits from the EEPROM. 3532 */ 3533 static void 3534 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits) 3535 { 3536 uint32_t reg, val; 3537 int x; 3538 3539 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI; 3540 3541 val = 0; 3542 for (x = nbits; x > 0; x--) { 3543 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK); 3544 delay(2); 3545 if (CSR_READ(sc, WMREG_EECD) & EECD_DO) 3546 val |= (1U << (x - 1)); 3547 CSR_WRITE(sc, WMREG_EECD, reg); 3548 delay(2); 3549 } 3550 *valp = val; 3551 } 3552 3553 /* 3554 * wm_read_eeprom_uwire: 3555 * 3556 * Read a word from the EEPROM using the MicroWire protocol. 3557 */ 3558 static int 3559 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 3560 { 3561 uint32_t reg, val; 3562 int i; 3563 3564 for (i = 0; i < wordcnt; i++) { 3565 /* Clear SK and DI. */ 3566 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI); 3567 CSR_WRITE(sc, WMREG_EECD, reg); 3568 3569 /* Set CHIP SELECT. */ 3570 reg |= EECD_CS; 3571 CSR_WRITE(sc, WMREG_EECD, reg); 3572 delay(2); 3573 3574 /* Shift in the READ command. */ 3575 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3); 3576 3577 /* Shift in address. */ 3578 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits); 3579 3580 /* Shift out the data. */ 3581 wm_eeprom_recvbits(sc, &val, 16); 3582 data[i] = val & 0xffff; 3583 3584 /* Clear CHIP SELECT. */ 3585 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS; 3586 CSR_WRITE(sc, WMREG_EECD, reg); 3587 delay(2); 3588 } 3589 3590 return (0); 3591 } 3592 3593 /* 3594 * wm_spi_eeprom_ready: 3595 * 3596 * Wait for a SPI EEPROM to be ready for commands. 3597 */ 3598 static int 3599 wm_spi_eeprom_ready(struct wm_softc *sc) 3600 { 3601 uint32_t val; 3602 int usec; 3603 3604 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) { 3605 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8); 3606 wm_eeprom_recvbits(sc, &val, 8); 3607 if ((val & SPI_SR_RDY) == 0) 3608 break; 3609 } 3610 if (usec >= SPI_MAX_RETRIES) { 3611 aprint_error("%s: EEPROM failed to become ready\n", 3612 sc->sc_dev.dv_xname); 3613 return (1); 3614 } 3615 return (0); 3616 } 3617 3618 /* 3619 * wm_read_eeprom_spi: 3620 * 3621 * Read a work from the EEPROM using the SPI protocol. 3622 */ 3623 static int 3624 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 3625 { 3626 uint32_t reg, val; 3627 int i; 3628 uint8_t opc; 3629 3630 /* Clear SK and CS. */ 3631 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS); 3632 CSR_WRITE(sc, WMREG_EECD, reg); 3633 delay(2); 3634 3635 if (wm_spi_eeprom_ready(sc)) 3636 return (1); 3637 3638 /* Toggle CS to flush commands. */ 3639 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS); 3640 delay(2); 3641 CSR_WRITE(sc, WMREG_EECD, reg); 3642 delay(2); 3643 3644 opc = SPI_OPC_READ; 3645 if (sc->sc_ee_addrbits == 8 && word >= 128) 3646 opc |= SPI_OPC_A8; 3647 3648 wm_eeprom_sendbits(sc, opc, 8); 3649 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits); 3650 3651 for (i = 0; i < wordcnt; i++) { 3652 wm_eeprom_recvbits(sc, &val, 16); 3653 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8); 3654 } 3655 3656 /* Raise CS and clear SK. */ 3657 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS; 3658 CSR_WRITE(sc, WMREG_EECD, reg); 3659 delay(2); 3660 3661 return (0); 3662 } 3663 3664 #define EEPROM_CHECKSUM 0xBABA 3665 #define EEPROM_SIZE 0x0040 3666 3667 /* 3668 * wm_validate_eeprom_checksum 3669 * 3670 * The checksum is defined as the sum of the first 64 (16 bit) words. 3671 */ 3672 static int 3673 wm_validate_eeprom_checksum(struct wm_softc *sc) 3674 { 3675 uint16_t checksum; 3676 uint16_t eeprom_data; 3677 int i; 3678 3679 checksum = 0; 3680 3681 for (i = 0; i < EEPROM_SIZE; i++) { 3682 if (wm_read_eeprom(sc, i, 1, &eeprom_data)) 3683 return 1; 3684 checksum += eeprom_data; 3685 } 3686 3687 if (checksum != (uint16_t) EEPROM_CHECKSUM) 3688 return 1; 3689 3690 return 0; 3691 } 3692 3693 /* 3694 * wm_read_eeprom: 3695 * 3696 * Read data from the serial EEPROM. 3697 */ 3698 static int 3699 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 3700 { 3701 int rv; 3702 3703 if (sc->sc_flags & WM_F_EEPROM_INVALID) 3704 return 1; 3705 3706 if (wm_acquire_eeprom(sc)) 3707 return 1; 3708 3709 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) 3710 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data); 3711 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR) 3712 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data); 3713 else if (sc->sc_flags & WM_F_EEPROM_SPI) 3714 rv = wm_read_eeprom_spi(sc, word, wordcnt, data); 3715 else 3716 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data); 3717 3718 wm_release_eeprom(sc); 3719 return rv; 3720 } 3721 3722 static int 3723 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt, 3724 uint16_t *data) 3725 { 3726 int i, eerd = 0; 3727 int error = 0; 3728 3729 for (i = 0; i < wordcnt; i++) { 3730 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START; 3731 3732 CSR_WRITE(sc, WMREG_EERD, eerd); 3733 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD); 3734 if (error != 0) 3735 break; 3736 3737 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT); 3738 } 3739 3740 return error; 3741 } 3742 3743 static int 3744 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw) 3745 { 3746 uint32_t attempts = 100000; 3747 uint32_t i, reg = 0; 3748 int32_t done = -1; 3749 3750 for (i = 0; i < attempts; i++) { 3751 reg = CSR_READ(sc, rw); 3752 3753 if (reg & EERD_DONE) { 3754 done = 0; 3755 break; 3756 } 3757 delay(5); 3758 } 3759 3760 return done; 3761 } 3762 3763 /* 3764 * wm_add_rxbuf: 3765 * 3766 * Add a receive buffer to the indiciated descriptor. 3767 */ 3768 static int 3769 wm_add_rxbuf(struct wm_softc *sc, int idx) 3770 { 3771 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx]; 3772 struct mbuf *m; 3773 int error; 3774 3775 MGETHDR(m, M_DONTWAIT, MT_DATA); 3776 if (m == NULL) 3777 return (ENOBUFS); 3778 3779 MCLGET(m, M_DONTWAIT); 3780 if ((m->m_flags & M_EXT) == 0) { 3781 m_freem(m); 3782 return (ENOBUFS); 3783 } 3784 3785 if (rxs->rxs_mbuf != NULL) 3786 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 3787 3788 rxs->rxs_mbuf = m; 3789 3790 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 3791 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m, 3792 BUS_DMA_READ|BUS_DMA_NOWAIT); 3793 if (error) { 3794 /* XXX XXX XXX */ 3795 printf("%s: unable to load rx DMA map %d, error = %d\n", 3796 sc->sc_dev.dv_xname, idx, error); 3797 panic("wm_add_rxbuf"); 3798 } 3799 3800 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 3801 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 3802 3803 WM_INIT_RXDESC(sc, idx); 3804 3805 return (0); 3806 } 3807 3808 /* 3809 * wm_set_ral: 3810 * 3811 * Set an entery in the receive address list. 3812 */ 3813 static void 3814 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx) 3815 { 3816 uint32_t ral_lo, ral_hi; 3817 3818 if (enaddr != NULL) { 3819 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) | 3820 (enaddr[3] << 24); 3821 ral_hi = enaddr[4] | (enaddr[5] << 8); 3822 ral_hi |= RAL_AV; 3823 } else { 3824 ral_lo = 0; 3825 ral_hi = 0; 3826 } 3827 3828 if (sc->sc_type >= WM_T_82544) { 3829 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx), 3830 ral_lo); 3831 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx), 3832 ral_hi); 3833 } else { 3834 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo); 3835 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi); 3836 } 3837 } 3838 3839 /* 3840 * wm_mchash: 3841 * 3842 * Compute the hash of the multicast address for the 4096-bit 3843 * multicast filter. 3844 */ 3845 static uint32_t 3846 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr) 3847 { 3848 static const int lo_shift[4] = { 4, 3, 2, 0 }; 3849 static const int hi_shift[4] = { 4, 5, 6, 8 }; 3850 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 }; 3851 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 }; 3852 uint32_t hash; 3853 3854 if (sc->sc_type == WM_T_ICH8) { 3855 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) | 3856 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]); 3857 return (hash & 0x3ff); 3858 } 3859 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) | 3860 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]); 3861 3862 return (hash & 0xfff); 3863 } 3864 3865 /* 3866 * wm_set_filter: 3867 * 3868 * Set up the receive filter. 3869 */ 3870 static void 3871 wm_set_filter(struct wm_softc *sc) 3872 { 3873 struct ethercom *ec = &sc->sc_ethercom; 3874 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3875 struct ether_multi *enm; 3876 struct ether_multistep step; 3877 bus_addr_t mta_reg; 3878 uint32_t hash, reg, bit; 3879 int i, size; 3880 3881 if (sc->sc_type >= WM_T_82544) 3882 mta_reg = WMREG_CORDOVA_MTA; 3883 else 3884 mta_reg = WMREG_MTA; 3885 3886 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE); 3887 3888 if (ifp->if_flags & IFF_BROADCAST) 3889 sc->sc_rctl |= RCTL_BAM; 3890 if (ifp->if_flags & IFF_PROMISC) { 3891 sc->sc_rctl |= RCTL_UPE; 3892 goto allmulti; 3893 } 3894 3895 /* 3896 * Set the station address in the first RAL slot, and 3897 * clear the remaining slots. 3898 */ 3899 if (sc->sc_type == WM_T_ICH8) 3900 size = WM_ICH8_RAL_TABSIZE; 3901 else 3902 size = WM_RAL_TABSIZE; 3903 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0); 3904 for (i = 1; i < size; i++) 3905 wm_set_ral(sc, NULL, i); 3906 3907 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) 3908 size = WM_ICH8_MC_TABSIZE; 3909 else 3910 size = WM_MC_TABSIZE; 3911 /* Clear out the multicast table. */ 3912 for (i = 0; i < size; i++) 3913 CSR_WRITE(sc, mta_reg + (i << 2), 0); 3914 3915 ETHER_FIRST_MULTI(step, ec, enm); 3916 while (enm != NULL) { 3917 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 3918 /* 3919 * We must listen to a range of multicast addresses. 3920 * For now, just accept all multicasts, rather than 3921 * trying to set only those filter bits needed to match 3922 * the range. (At this time, the only use of address 3923 * ranges is for IP multicast routing, for which the 3924 * range is big enough to require all bits set.) 3925 */ 3926 goto allmulti; 3927 } 3928 3929 hash = wm_mchash(sc, enm->enm_addrlo); 3930 3931 reg = (hash >> 5); 3932 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) 3933 reg &= 0x1f; 3934 else 3935 reg &= 0x7f; 3936 bit = hash & 0x1f; 3937 3938 hash = CSR_READ(sc, mta_reg + (reg << 2)); 3939 hash |= 1U << bit; 3940 3941 /* XXX Hardware bug?? */ 3942 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) { 3943 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2)); 3944 CSR_WRITE(sc, mta_reg + (reg << 2), hash); 3945 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit); 3946 } else 3947 CSR_WRITE(sc, mta_reg + (reg << 2), hash); 3948 3949 ETHER_NEXT_MULTI(step, enm); 3950 } 3951 3952 ifp->if_flags &= ~IFF_ALLMULTI; 3953 goto setit; 3954 3955 allmulti: 3956 ifp->if_flags |= IFF_ALLMULTI; 3957 sc->sc_rctl |= RCTL_MPE; 3958 3959 setit: 3960 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl); 3961 } 3962 3963 /* 3964 * wm_tbi_mediainit: 3965 * 3966 * Initialize media for use on 1000BASE-X devices. 3967 */ 3968 static void 3969 wm_tbi_mediainit(struct wm_softc *sc) 3970 { 3971 const char *sep = ""; 3972 3973 if (sc->sc_type < WM_T_82543) 3974 sc->sc_tipg = TIPG_WM_DFLT; 3975 else 3976 sc->sc_tipg = TIPG_LG_DFLT; 3977 3978 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange, 3979 wm_tbi_mediastatus); 3980 3981 /* 3982 * SWD Pins: 3983 * 3984 * 0 = Link LED (output) 3985 * 1 = Loss Of Signal (input) 3986 */ 3987 sc->sc_ctrl |= CTRL_SWDPIO(0); 3988 sc->sc_ctrl &= ~CTRL_SWDPIO(1); 3989 3990 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 3991 3992 #define ADD(ss, mm, dd) \ 3993 do { \ 3994 aprint_normal("%s%s", sep, ss); \ 3995 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \ 3996 sep = ", "; \ 3997 } while (/*CONSTCOND*/0) 3998 3999 aprint_normal("%s: ", sc->sc_dev.dv_xname); 4000 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD); 4001 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD); 4002 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD); 4003 aprint_normal("\n"); 4004 4005 #undef ADD 4006 4007 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 4008 } 4009 4010 /* 4011 * wm_tbi_mediastatus: [ifmedia interface function] 4012 * 4013 * Get the current interface media status on a 1000BASE-X device. 4014 */ 4015 static void 4016 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 4017 { 4018 struct wm_softc *sc = ifp->if_softc; 4019 uint32_t ctrl; 4020 4021 ifmr->ifm_status = IFM_AVALID; 4022 ifmr->ifm_active = IFM_ETHER; 4023 4024 if (sc->sc_tbi_linkup == 0) { 4025 ifmr->ifm_active |= IFM_NONE; 4026 return; 4027 } 4028 4029 ifmr->ifm_status |= IFM_ACTIVE; 4030 ifmr->ifm_active |= IFM_1000_SX; 4031 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD) 4032 ifmr->ifm_active |= IFM_FDX; 4033 ctrl = CSR_READ(sc, WMREG_CTRL); 4034 if (ctrl & CTRL_RFCE) 4035 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE; 4036 if (ctrl & CTRL_TFCE) 4037 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE; 4038 } 4039 4040 /* 4041 * wm_tbi_mediachange: [ifmedia interface function] 4042 * 4043 * Set hardware to newly-selected media on a 1000BASE-X device. 4044 */ 4045 static int 4046 wm_tbi_mediachange(struct ifnet *ifp) 4047 { 4048 struct wm_softc *sc = ifp->if_softc; 4049 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 4050 uint32_t status; 4051 int i; 4052 4053 sc->sc_txcw = ife->ifm_data; 4054 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x on entry\n", 4055 sc->sc_dev.dv_xname,sc->sc_txcw)); 4056 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO || 4057 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0) 4058 sc->sc_txcw |= ANAR_X_PAUSE_SYM | ANAR_X_PAUSE_ASYM; 4059 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 4060 sc->sc_txcw |= TXCW_ANE; 4061 } else { 4062 /*If autonegotiation is turned off, force link up and turn on full duplex*/ 4063 sc->sc_txcw &= ~TXCW_ANE; 4064 sc->sc_ctrl |= CTRL_SLU | CTRL_FD; 4065 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4066 delay(1000); 4067 } 4068 4069 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n", 4070 sc->sc_dev.dv_xname,sc->sc_txcw)); 4071 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); 4072 delay(10000); 4073 4074 /* NOTE: CTRL will update TFCE and RFCE automatically. */ 4075 4076 sc->sc_tbi_anstate = 0; 4077 4078 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1); 4079 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", sc->sc_dev.dv_xname,i)); 4080 4081 /* 4082 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the 4083 * optics detect a signal, 0 if they don't. 4084 */ 4085 if (((i != 0) && (sc->sc_type >= WM_T_82544)) || (i == 0)) { 4086 /* Have signal; wait for the link to come up. */ 4087 4088 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 4089 /* 4090 * Reset the link, and let autonegotiation do its thing 4091 */ 4092 sc->sc_ctrl |= CTRL_LRST; 4093 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4094 delay(1000); 4095 sc->sc_ctrl &= ~CTRL_LRST; 4096 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4097 delay(1000); 4098 } 4099 4100 for (i = 0; i < 50; i++) { 4101 delay(10000); 4102 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU) 4103 break; 4104 } 4105 4106 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n", 4107 sc->sc_dev.dv_xname,i)); 4108 4109 status = CSR_READ(sc, WMREG_STATUS); 4110 DPRINTF(WM_DEBUG_LINK, 4111 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n", 4112 sc->sc_dev.dv_xname,status, STATUS_LU)); 4113 if (status & STATUS_LU) { 4114 /* Link is up. */ 4115 DPRINTF(WM_DEBUG_LINK, 4116 ("%s: LINK: set media -> link up %s\n", 4117 sc->sc_dev.dv_xname, 4118 (status & STATUS_FD) ? "FDX" : "HDX")); 4119 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 4120 sc->sc_fcrtl &= ~FCRTL_XONE; 4121 if (status & STATUS_FD) 4122 sc->sc_tctl |= 4123 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 4124 else 4125 sc->sc_tctl |= 4126 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 4127 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE) 4128 sc->sc_fcrtl |= FCRTL_XONE; 4129 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 4130 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 4131 WMREG_OLD_FCRTL : WMREG_FCRTL, 4132 sc->sc_fcrtl); 4133 sc->sc_tbi_linkup = 1; 4134 } else { 4135 /* Link is down. */ 4136 DPRINTF(WM_DEBUG_LINK, 4137 ("%s: LINK: set media -> link down\n", 4138 sc->sc_dev.dv_xname)); 4139 sc->sc_tbi_linkup = 0; 4140 } 4141 } else { 4142 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n", 4143 sc->sc_dev.dv_xname)); 4144 sc->sc_tbi_linkup = 0; 4145 } 4146 4147 wm_tbi_set_linkled(sc); 4148 4149 return (0); 4150 } 4151 4152 /* 4153 * wm_tbi_set_linkled: 4154 * 4155 * Update the link LED on 1000BASE-X devices. 4156 */ 4157 static void 4158 wm_tbi_set_linkled(struct wm_softc *sc) 4159 { 4160 4161 if (sc->sc_tbi_linkup) 4162 sc->sc_ctrl |= CTRL_SWDPIN(0); 4163 else 4164 sc->sc_ctrl &= ~CTRL_SWDPIN(0); 4165 4166 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4167 } 4168 4169 /* 4170 * wm_tbi_check_link: 4171 * 4172 * Check the link on 1000BASE-X devices. 4173 */ 4174 static void 4175 wm_tbi_check_link(struct wm_softc *sc) 4176 { 4177 uint32_t rxcw, ctrl, status; 4178 4179 if (sc->sc_tbi_anstate == 0) 4180 return; 4181 else if (sc->sc_tbi_anstate > 1) { 4182 DPRINTF(WM_DEBUG_LINK, 4183 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname, 4184 sc->sc_tbi_anstate)); 4185 sc->sc_tbi_anstate--; 4186 return; 4187 } 4188 4189 sc->sc_tbi_anstate = 0; 4190 4191 rxcw = CSR_READ(sc, WMREG_RXCW); 4192 ctrl = CSR_READ(sc, WMREG_CTRL); 4193 status = CSR_READ(sc, WMREG_STATUS); 4194 4195 if ((status & STATUS_LU) == 0) { 4196 DPRINTF(WM_DEBUG_LINK, 4197 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname)); 4198 sc->sc_tbi_linkup = 0; 4199 } else { 4200 DPRINTF(WM_DEBUG_LINK, 4201 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname, 4202 (status & STATUS_FD) ? "FDX" : "HDX")); 4203 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 4204 sc->sc_fcrtl &= ~FCRTL_XONE; 4205 if (status & STATUS_FD) 4206 sc->sc_tctl |= 4207 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 4208 else 4209 sc->sc_tctl |= 4210 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 4211 if (ctrl & CTRL_TFCE) 4212 sc->sc_fcrtl |= FCRTL_XONE; 4213 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 4214 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 4215 WMREG_OLD_FCRTL : WMREG_FCRTL, 4216 sc->sc_fcrtl); 4217 sc->sc_tbi_linkup = 1; 4218 } 4219 4220 wm_tbi_set_linkled(sc); 4221 } 4222 4223 /* 4224 * wm_gmii_reset: 4225 * 4226 * Reset the PHY. 4227 */ 4228 static void 4229 wm_gmii_reset(struct wm_softc *sc) 4230 { 4231 uint32_t reg; 4232 int func = 0; /* XXX gcc */ 4233 4234 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) { 4235 if (wm_get_swfwhw_semaphore(sc)) 4236 return; 4237 } 4238 if (sc->sc_type == WM_T_80003) { 4239 func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1; 4240 if (wm_get_swfw_semaphore(sc, 4241 func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) 4242 return; 4243 } 4244 if (sc->sc_type >= WM_T_82544) { 4245 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 4246 delay(20000); 4247 4248 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4249 delay(20000); 4250 } else { 4251 /* 4252 * With 82543, we need to force speed and duplex on the MAC 4253 * equal to what the PHY speed and duplex configuration is. 4254 * In addition, we need to perform a hardware reset on the PHY 4255 * to take it out of reset. 4256 */ 4257 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; 4258 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4259 4260 /* The PHY reset pin is active-low. */ 4261 reg = CSR_READ(sc, WMREG_CTRL_EXT); 4262 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) | 4263 CTRL_EXT_SWDPIN(4)); 4264 reg |= CTRL_EXT_SWDPIO(4); 4265 4266 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4)); 4267 delay(10); 4268 4269 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 4270 delay(10000); 4271 4272 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4)); 4273 delay(10); 4274 #if 0 4275 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4); 4276 #endif 4277 } 4278 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) 4279 wm_put_swfwhw_semaphore(sc); 4280 if (sc->sc_type == WM_T_80003) 4281 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM); 4282 } 4283 4284 /* 4285 * wm_gmii_mediainit: 4286 * 4287 * Initialize media for use on 1000BASE-T devices. 4288 */ 4289 static void 4290 wm_gmii_mediainit(struct wm_softc *sc) 4291 { 4292 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 4293 4294 /* We have MII. */ 4295 sc->sc_flags |= WM_F_HAS_MII; 4296 4297 if (sc->sc_type >= WM_T_80003) 4298 sc->sc_tipg = TIPG_1000T_80003_DFLT; 4299 else 4300 sc->sc_tipg = TIPG_1000T_DFLT; 4301 4302 /* 4303 * Let the chip set speed/duplex on its own based on 4304 * signals from the PHY. 4305 * XXXbouyer - I'm not sure this is right for the 80003, 4306 * the em driver only sets CTRL_SLU here - but it seems to work. 4307 */ 4308 sc->sc_ctrl |= CTRL_SLU; 4309 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4310 4311 /* Initialize our media structures and probe the GMII. */ 4312 sc->sc_mii.mii_ifp = ifp; 4313 4314 if (sc->sc_type >= WM_T_80003) { 4315 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg; 4316 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg; 4317 } else if (sc->sc_type >= WM_T_82544) { 4318 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg; 4319 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg; 4320 } else { 4321 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg; 4322 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg; 4323 } 4324 sc->sc_mii.mii_statchg = wm_gmii_statchg; 4325 4326 wm_gmii_reset(sc); 4327 4328 sc->sc_ethercom.ec_mii = &sc->sc_mii; 4329 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange, 4330 wm_gmii_mediastatus); 4331 4332 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 4333 MII_OFFSET_ANY, MIIF_DOPAUSE); 4334 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 4335 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 4336 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 4337 } else 4338 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 4339 } 4340 4341 /* 4342 * wm_gmii_mediastatus: [ifmedia interface function] 4343 * 4344 * Get the current interface media status on a 1000BASE-T device. 4345 */ 4346 static void 4347 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 4348 { 4349 struct wm_softc *sc = ifp->if_softc; 4350 4351 ether_mediastatus(ifp, ifmr); 4352 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) | 4353 sc->sc_flowflags; 4354 } 4355 4356 /* 4357 * wm_gmii_mediachange: [ifmedia interface function] 4358 * 4359 * Set hardware to newly-selected media on a 1000BASE-T device. 4360 */ 4361 static int 4362 wm_gmii_mediachange(struct ifnet *ifp) 4363 { 4364 struct wm_softc *sc = ifp->if_softc; 4365 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 4366 int rc; 4367 4368 if ((ifp->if_flags & IFF_UP) == 0) 4369 return 0; 4370 4371 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); 4372 sc->sc_ctrl |= CTRL_SLU; 4373 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) 4374 || (sc->sc_type > WM_T_82543)) { 4375 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX); 4376 } else { 4377 sc->sc_ctrl &= ~CTRL_ASDE; 4378 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; 4379 if (ife->ifm_media & IFM_FDX) 4380 sc->sc_ctrl |= CTRL_FD; 4381 switch(IFM_SUBTYPE(ife->ifm_media)) { 4382 case IFM_10_T: 4383 sc->sc_ctrl |= CTRL_SPEED_10; 4384 break; 4385 case IFM_100_TX: 4386 sc->sc_ctrl |= CTRL_SPEED_100; 4387 break; 4388 case IFM_1000_T: 4389 sc->sc_ctrl |= CTRL_SPEED_1000; 4390 break; 4391 default: 4392 panic("wm_gmii_mediachange: bad media 0x%x", 4393 ife->ifm_media); 4394 } 4395 } 4396 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4397 if (sc->sc_type <= WM_T_82543) 4398 wm_gmii_reset(sc); 4399 4400 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO) 4401 return 0; 4402 return rc; 4403 } 4404 4405 #define MDI_IO CTRL_SWDPIN(2) 4406 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */ 4407 #define MDI_CLK CTRL_SWDPIN(3) 4408 4409 static void 4410 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits) 4411 { 4412 uint32_t i, v; 4413 4414 v = CSR_READ(sc, WMREG_CTRL); 4415 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 4416 v |= MDI_DIR | CTRL_SWDPIO(3); 4417 4418 for (i = 1 << (nbits - 1); i != 0; i >>= 1) { 4419 if (data & i) 4420 v |= MDI_IO; 4421 else 4422 v &= ~MDI_IO; 4423 CSR_WRITE(sc, WMREG_CTRL, v); 4424 delay(10); 4425 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 4426 delay(10); 4427 CSR_WRITE(sc, WMREG_CTRL, v); 4428 delay(10); 4429 } 4430 } 4431 4432 static uint32_t 4433 i82543_mii_recvbits(struct wm_softc *sc) 4434 { 4435 uint32_t v, i, data = 0; 4436 4437 v = CSR_READ(sc, WMREG_CTRL); 4438 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 4439 v |= CTRL_SWDPIO(3); 4440 4441 CSR_WRITE(sc, WMREG_CTRL, v); 4442 delay(10); 4443 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 4444 delay(10); 4445 CSR_WRITE(sc, WMREG_CTRL, v); 4446 delay(10); 4447 4448 for (i = 0; i < 16; i++) { 4449 data <<= 1; 4450 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 4451 delay(10); 4452 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO) 4453 data |= 1; 4454 CSR_WRITE(sc, WMREG_CTRL, v); 4455 delay(10); 4456 } 4457 4458 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 4459 delay(10); 4460 CSR_WRITE(sc, WMREG_CTRL, v); 4461 delay(10); 4462 4463 return (data); 4464 } 4465 4466 #undef MDI_IO 4467 #undef MDI_DIR 4468 #undef MDI_CLK 4469 4470 /* 4471 * wm_gmii_i82543_readreg: [mii interface function] 4472 * 4473 * Read a PHY register on the GMII (i82543 version). 4474 */ 4475 static int 4476 wm_gmii_i82543_readreg(struct device *self, int phy, int reg) 4477 { 4478 struct wm_softc *sc = (void *) self; 4479 int rv; 4480 4481 i82543_mii_sendbits(sc, 0xffffffffU, 32); 4482 i82543_mii_sendbits(sc, reg | (phy << 5) | 4483 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14); 4484 rv = i82543_mii_recvbits(sc) & 0xffff; 4485 4486 DPRINTF(WM_DEBUG_GMII, 4487 ("%s: GMII: read phy %d reg %d -> 0x%04x\n", 4488 sc->sc_dev.dv_xname, phy, reg, rv)); 4489 4490 return (rv); 4491 } 4492 4493 /* 4494 * wm_gmii_i82543_writereg: [mii interface function] 4495 * 4496 * Write a PHY register on the GMII (i82543 version). 4497 */ 4498 static void 4499 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val) 4500 { 4501 struct wm_softc *sc = (void *) self; 4502 4503 i82543_mii_sendbits(sc, 0xffffffffU, 32); 4504 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) | 4505 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) | 4506 (MII_COMMAND_START << 30), 32); 4507 } 4508 4509 /* 4510 * wm_gmii_i82544_readreg: [mii interface function] 4511 * 4512 * Read a PHY register on the GMII. 4513 */ 4514 static int 4515 wm_gmii_i82544_readreg(struct device *self, int phy, int reg) 4516 { 4517 struct wm_softc *sc = (void *) self; 4518 uint32_t mdic = 0; 4519 int i, rv; 4520 4521 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) | 4522 MDIC_REGADD(reg)); 4523 4524 for (i = 0; i < 320; i++) { 4525 mdic = CSR_READ(sc, WMREG_MDIC); 4526 if (mdic & MDIC_READY) 4527 break; 4528 delay(10); 4529 } 4530 4531 if ((mdic & MDIC_READY) == 0) { 4532 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n", 4533 sc->sc_dev.dv_xname, phy, reg); 4534 rv = 0; 4535 } else if (mdic & MDIC_E) { 4536 #if 0 /* This is normal if no PHY is present. */ 4537 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n", 4538 sc->sc_dev.dv_xname, phy, reg); 4539 #endif 4540 rv = 0; 4541 } else { 4542 rv = MDIC_DATA(mdic); 4543 if (rv == 0xffff) 4544 rv = 0; 4545 } 4546 4547 return (rv); 4548 } 4549 4550 /* 4551 * wm_gmii_i82544_writereg: [mii interface function] 4552 * 4553 * Write a PHY register on the GMII. 4554 */ 4555 static void 4556 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val) 4557 { 4558 struct wm_softc *sc = (void *) self; 4559 uint32_t mdic = 0; 4560 int i; 4561 4562 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) | 4563 MDIC_REGADD(reg) | MDIC_DATA(val)); 4564 4565 for (i = 0; i < 320; i++) { 4566 mdic = CSR_READ(sc, WMREG_MDIC); 4567 if (mdic & MDIC_READY) 4568 break; 4569 delay(10); 4570 } 4571 4572 if ((mdic & MDIC_READY) == 0) 4573 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n", 4574 sc->sc_dev.dv_xname, phy, reg); 4575 else if (mdic & MDIC_E) 4576 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n", 4577 sc->sc_dev.dv_xname, phy, reg); 4578 } 4579 4580 /* 4581 * wm_gmii_i80003_readreg: [mii interface function] 4582 * 4583 * Read a PHY register on the kumeran 4584 * This could be handled by the PHY layer if we didn't have to lock the 4585 * ressource ... 4586 */ 4587 static int 4588 wm_gmii_i80003_readreg(struct device *self, int phy, int reg) 4589 { 4590 struct wm_softc *sc = (void *) self; 4591 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1); 4592 int rv; 4593 4594 if (phy != 1) /* only one PHY on kumeran bus */ 4595 return 0; 4596 4597 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) 4598 return 0; 4599 4600 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) { 4601 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT, 4602 reg >> GG82563_PAGE_SHIFT); 4603 } else { 4604 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT, 4605 reg >> GG82563_PAGE_SHIFT); 4606 } 4607 4608 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS); 4609 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM); 4610 return (rv); 4611 } 4612 4613 /* 4614 * wm_gmii_i80003_writereg: [mii interface function] 4615 * 4616 * Write a PHY register on the kumeran. 4617 * This could be handled by the PHY layer if we didn't have to lock the 4618 * ressource ... 4619 */ 4620 static void 4621 wm_gmii_i80003_writereg(struct device *self, int phy, int reg, int val) 4622 { 4623 struct wm_softc *sc = (void *) self; 4624 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1); 4625 4626 if (phy != 1) /* only one PHY on kumeran bus */ 4627 return; 4628 4629 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) 4630 return; 4631 4632 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) { 4633 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT, 4634 reg >> GG82563_PAGE_SHIFT); 4635 } else { 4636 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT, 4637 reg >> GG82563_PAGE_SHIFT); 4638 } 4639 4640 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val); 4641 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM); 4642 } 4643 4644 /* 4645 * wm_gmii_statchg: [mii interface function] 4646 * 4647 * Callback from MII layer when media changes. 4648 */ 4649 static void 4650 wm_gmii_statchg(struct device *self) 4651 { 4652 struct wm_softc *sc = (void *) self; 4653 struct mii_data *mii = &sc->sc_mii; 4654 4655 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE); 4656 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 4657 sc->sc_fcrtl &= ~FCRTL_XONE; 4658 4659 /* 4660 * Get flow control negotiation result. 4661 */ 4662 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 4663 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { 4664 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 4665 mii->mii_media_active &= ~IFM_ETH_FMASK; 4666 } 4667 4668 if (sc->sc_flowflags & IFM_FLOW) { 4669 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) { 4670 sc->sc_ctrl |= CTRL_TFCE; 4671 sc->sc_fcrtl |= FCRTL_XONE; 4672 } 4673 if (sc->sc_flowflags & IFM_ETH_RXPAUSE) 4674 sc->sc_ctrl |= CTRL_RFCE; 4675 } 4676 4677 if (sc->sc_mii.mii_media_active & IFM_FDX) { 4678 DPRINTF(WM_DEBUG_LINK, 4679 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname)); 4680 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 4681 } else { 4682 DPRINTF(WM_DEBUG_LINK, 4683 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname)); 4684 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 4685 } 4686 4687 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4688 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 4689 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL 4690 : WMREG_FCRTL, sc->sc_fcrtl); 4691 if (sc->sc_type >= WM_T_80003) { 4692 switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { 4693 case IFM_1000_T: 4694 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL, 4695 KUMCTRLSTA_HD_CTRL_1000_DEFAULT); 4696 sc->sc_tipg = TIPG_1000T_80003_DFLT; 4697 break; 4698 default: 4699 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL, 4700 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT); 4701 sc->sc_tipg = TIPG_10_100_80003_DFLT; 4702 break; 4703 } 4704 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); 4705 } 4706 } 4707 4708 /* 4709 * wm_kmrn_i80003_readreg: 4710 * 4711 * Read a kumeran register 4712 */ 4713 static int 4714 wm_kmrn_i80003_readreg(struct wm_softc *sc, int reg) 4715 { 4716 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1); 4717 int rv; 4718 4719 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) 4720 return 0; 4721 4722 CSR_WRITE(sc, WMREG_KUMCTRLSTA, 4723 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | 4724 KUMCTRLSTA_REN); 4725 delay(2); 4726 4727 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK; 4728 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM); 4729 return (rv); 4730 } 4731 4732 /* 4733 * wm_kmrn_i80003_writereg: 4734 * 4735 * Write a kumeran register 4736 */ 4737 static void 4738 wm_kmrn_i80003_writereg(struct wm_softc *sc, int reg, int val) 4739 { 4740 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1); 4741 4742 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) 4743 return; 4744 4745 CSR_WRITE(sc, WMREG_KUMCTRLSTA, 4746 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | 4747 (val & KUMCTRLSTA_MASK)); 4748 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM); 4749 } 4750 4751 static int 4752 wm_is_onboard_nvm_eeprom(struct wm_softc *sc) 4753 { 4754 uint32_t eecd = 0; 4755 4756 if (sc->sc_type == WM_T_82573) { 4757 eecd = CSR_READ(sc, WMREG_EECD); 4758 4759 /* Isolate bits 15 & 16 */ 4760 eecd = ((eecd >> 15) & 0x03); 4761 4762 /* If both bits are set, device is Flash type */ 4763 if (eecd == 0x03) { 4764 return 0; 4765 } 4766 } 4767 return 1; 4768 } 4769 4770 static int 4771 wm_get_swsm_semaphore(struct wm_softc *sc) 4772 { 4773 int32_t timeout; 4774 uint32_t swsm; 4775 4776 /* Get the FW semaphore. */ 4777 timeout = 1000 + 1; /* XXX */ 4778 while (timeout) { 4779 swsm = CSR_READ(sc, WMREG_SWSM); 4780 swsm |= SWSM_SWESMBI; 4781 CSR_WRITE(sc, WMREG_SWSM, swsm); 4782 /* if we managed to set the bit we got the semaphore. */ 4783 swsm = CSR_READ(sc, WMREG_SWSM); 4784 if (swsm & SWSM_SWESMBI) 4785 break; 4786 4787 delay(50); 4788 timeout--; 4789 } 4790 4791 if (timeout == 0) { 4792 aprint_error("%s: could not acquire EEPROM GNT\n", 4793 sc->sc_dev.dv_xname); 4794 /* Release semaphores */ 4795 wm_put_swsm_semaphore(sc); 4796 return 1; 4797 } 4798 return 0; 4799 } 4800 4801 static void 4802 wm_put_swsm_semaphore(struct wm_softc *sc) 4803 { 4804 uint32_t swsm; 4805 4806 swsm = CSR_READ(sc, WMREG_SWSM); 4807 swsm &= ~(SWSM_SWESMBI); 4808 CSR_WRITE(sc, WMREG_SWSM, swsm); 4809 } 4810 4811 static int 4812 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask) 4813 { 4814 uint32_t swfw_sync; 4815 uint32_t swmask = mask << SWFW_SOFT_SHIFT; 4816 uint32_t fwmask = mask << SWFW_FIRM_SHIFT; 4817 int timeout = 200; 4818 4819 for(timeout = 0; timeout < 200; timeout++) { 4820 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) { 4821 if (wm_get_swsm_semaphore(sc)) 4822 return 1; 4823 } 4824 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC); 4825 if ((swfw_sync & (swmask | fwmask)) == 0) { 4826 swfw_sync |= swmask; 4827 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync); 4828 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 4829 wm_put_swsm_semaphore(sc); 4830 return 0; 4831 } 4832 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 4833 wm_put_swsm_semaphore(sc); 4834 delay(5000); 4835 } 4836 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n", 4837 sc->sc_dev.dv_xname, mask, swfw_sync); 4838 return 1; 4839 } 4840 4841 static void 4842 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask) 4843 { 4844 uint32_t swfw_sync; 4845 4846 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) { 4847 while (wm_get_swsm_semaphore(sc) != 0) 4848 continue; 4849 } 4850 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC); 4851 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT); 4852 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync); 4853 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) 4854 wm_put_swsm_semaphore(sc); 4855 } 4856 4857 static int 4858 wm_get_swfwhw_semaphore(struct wm_softc *sc) 4859 { 4860 uint32_t ext_ctrl; 4861 int timeout = 200; 4862 4863 for(timeout = 0; timeout < 200; timeout++) { 4864 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 4865 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; 4866 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 4867 4868 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 4869 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG) 4870 return 0; 4871 delay(5000); 4872 } 4873 printf("%s: failed to get swfwgw semaphore ext_ctrl 0x%x\n", 4874 sc->sc_dev.dv_xname, ext_ctrl); 4875 return 1; 4876 } 4877 4878 static void 4879 wm_put_swfwhw_semaphore(struct wm_softc *sc) 4880 { 4881 uint32_t ext_ctrl; 4882 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 4883 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 4884 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 4885 } 4886 4887 /****************************************************************************** 4888 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access 4889 * register. 4890 * 4891 * sc - Struct containing variables accessed by shared code 4892 * offset - offset of word in the EEPROM to read 4893 * data - word read from the EEPROM 4894 * words - number of words to read 4895 *****************************************************************************/ 4896 static int 4897 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data) 4898 { 4899 int32_t error = 0; 4900 uint32_t flash_bank = 0; 4901 uint32_t act_offset = 0; 4902 uint32_t bank_offset = 0; 4903 uint16_t word = 0; 4904 uint16_t i = 0; 4905 4906 /* We need to know which is the valid flash bank. In the event 4907 * that we didn't allocate eeprom_shadow_ram, we may not be 4908 * managing flash_bank. So it cannot be trusted and needs 4909 * to be updated with each read. 4910 */ 4911 /* Value of bit 22 corresponds to the flash bank we're on. */ 4912 flash_bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0; 4913 4914 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */ 4915 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2); 4916 4917 error = wm_get_swfwhw_semaphore(sc); 4918 if (error) 4919 return error; 4920 4921 for (i = 0; i < words; i++) { 4922 /* The NVM part needs a byte offset, hence * 2 */ 4923 act_offset = bank_offset + ((offset + i) * 2); 4924 error = wm_read_ich8_word(sc, act_offset, &word); 4925 if (error) 4926 break; 4927 data[i] = word; 4928 } 4929 4930 wm_put_swfwhw_semaphore(sc); 4931 return error; 4932 } 4933 4934 /****************************************************************************** 4935 * This function does initial flash setup so that a new read/write/erase cycle 4936 * can be started. 4937 * 4938 * sc - The pointer to the hw structure 4939 ****************************************************************************/ 4940 static int32_t 4941 wm_ich8_cycle_init(struct wm_softc *sc) 4942 { 4943 uint16_t hsfsts; 4944 int32_t error = 1; 4945 int32_t i = 0; 4946 4947 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 4948 4949 /* May be check the Flash Des Valid bit in Hw status */ 4950 if ((hsfsts & HSFSTS_FLDVAL) == 0) { 4951 return error; 4952 } 4953 4954 /* Clear FCERR in Hw status by writing 1 */ 4955 /* Clear DAEL in Hw status by writing a 1 */ 4956 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL; 4957 4958 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); 4959 4960 /* Either we should have a hardware SPI cycle in progress bit to check 4961 * against, in order to start a new cycle or FDONE bit should be changed 4962 * in the hardware so that it is 1 after harware reset, which can then be 4963 * used as an indication whether a cycle is in progress or has been 4964 * completed .. we should also have some software semaphore mechanism to 4965 * guard FDONE or the cycle in progress bit so that two threads access to 4966 * those bits can be sequentiallized or a way so that 2 threads dont 4967 * start the cycle at the same time */ 4968 4969 if ((hsfsts & HSFSTS_FLINPRO) == 0) { 4970 /* There is no cycle running at present, so we can start a cycle */ 4971 /* Begin by setting Flash Cycle Done. */ 4972 hsfsts |= HSFSTS_DONE; 4973 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); 4974 error = 0; 4975 } else { 4976 /* otherwise poll for sometime so the current cycle has a chance 4977 * to end before giving up. */ 4978 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) { 4979 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 4980 if ((hsfsts & HSFSTS_FLINPRO) == 0) { 4981 error = 0; 4982 break; 4983 } 4984 delay(1); 4985 } 4986 if (error == 0) { 4987 /* Successful in waiting for previous cycle to timeout, 4988 * now set the Flash Cycle Done. */ 4989 hsfsts |= HSFSTS_DONE; 4990 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); 4991 } 4992 } 4993 return error; 4994 } 4995 4996 /****************************************************************************** 4997 * This function starts a flash cycle and waits for its completion 4998 * 4999 * sc - The pointer to the hw structure 5000 ****************************************************************************/ 5001 static int32_t 5002 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout) 5003 { 5004 uint16_t hsflctl; 5005 uint16_t hsfsts; 5006 int32_t error = 1; 5007 uint32_t i = 0; 5008 5009 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ 5010 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL); 5011 hsflctl |= HSFCTL_GO; 5012 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl); 5013 5014 /* wait till FDONE bit is set to 1 */ 5015 do { 5016 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 5017 if (hsfsts & HSFSTS_DONE) 5018 break; 5019 delay(1); 5020 i++; 5021 } while (i < timeout); 5022 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) { 5023 error = 0; 5024 } 5025 return error; 5026 } 5027 5028 /****************************************************************************** 5029 * Reads a byte or word from the NVM using the ICH8 flash access registers. 5030 * 5031 * sc - The pointer to the hw structure 5032 * index - The index of the byte or word to read. 5033 * size - Size of data to read, 1=byte 2=word 5034 * data - Pointer to the word to store the value read. 5035 *****************************************************************************/ 5036 static int32_t 5037 wm_read_ich8_data(struct wm_softc *sc, uint32_t index, 5038 uint32_t size, uint16_t* data) 5039 { 5040 uint16_t hsfsts; 5041 uint16_t hsflctl; 5042 uint32_t flash_linear_address; 5043 uint32_t flash_data = 0; 5044 int32_t error = 1; 5045 int32_t count = 0; 5046 5047 if (size < 1 || size > 2 || data == 0x0 || 5048 index > ICH_FLASH_LINEAR_ADDR_MASK) 5049 return error; 5050 5051 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) + 5052 sc->sc_ich8_flash_base; 5053 5054 do { 5055 delay(1); 5056 /* Steps */ 5057 error = wm_ich8_cycle_init(sc); 5058 if (error) 5059 break; 5060 5061 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL); 5062 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 5063 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK; 5064 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT; 5065 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl); 5066 5067 /* Write the last 24 bits of index into Flash Linear address field in 5068 * Flash Address */ 5069 /* TODO: TBD maybe check the index against the size of flash */ 5070 5071 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address); 5072 5073 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT); 5074 5075 /* Check if FCERR is set to 1, if set to 1, clear it and try the whole 5076 * sequence a few more times, else read in (shift in) the Flash Data0, 5077 * the order is least significant byte first msb to lsb */ 5078 if (error == 0) { 5079 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0); 5080 if (size == 1) { 5081 *data = (uint8_t)(flash_data & 0x000000FF); 5082 } else if (size == 2) { 5083 *data = (uint16_t)(flash_data & 0x0000FFFF); 5084 } 5085 break; 5086 } else { 5087 /* If we've gotten here, then things are probably completely hosed, 5088 * but if the error condition is detected, it won't hurt to give 5089 * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times. 5090 */ 5091 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 5092 if (hsfsts & HSFSTS_ERR) { 5093 /* Repeat for some time before giving up. */ 5094 continue; 5095 } else if ((hsfsts & HSFSTS_DONE) == 0) { 5096 break; 5097 } 5098 } 5099 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 5100 5101 return error; 5102 } 5103 5104 #if 0 5105 /****************************************************************************** 5106 * Reads a single byte from the NVM using the ICH8 flash access registers. 5107 * 5108 * sc - pointer to wm_hw structure 5109 * index - The index of the byte to read. 5110 * data - Pointer to a byte to store the value read. 5111 *****************************************************************************/ 5112 static int32_t 5113 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data) 5114 { 5115 int32_t status; 5116 uint16_t word = 0; 5117 5118 status = wm_read_ich8_data(sc, index, 1, &word); 5119 if (status == 0) { 5120 *data = (uint8_t)word; 5121 } 5122 5123 return status; 5124 } 5125 #endif 5126 5127 /****************************************************************************** 5128 * Reads a word from the NVM using the ICH8 flash access registers. 5129 * 5130 * sc - pointer to wm_hw structure 5131 * index - The starting byte index of the word to read. 5132 * data - Pointer to a word to store the value read. 5133 *****************************************************************************/ 5134 static int32_t 5135 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data) 5136 { 5137 int32_t status; 5138 5139 status = wm_read_ich8_data(sc, index, 2, data); 5140 return status; 5141 } 5142