1 /* $NetBSD: if_wm.c,v 1.729 2022/02/26 15:04:39 rillig Exp $ */ 2 3 /* 4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /******************************************************************************* 39 40 Copyright (c) 2001-2005, Intel Corporation 41 All rights reserved. 42 43 Redistribution and use in source and binary forms, with or without 44 modification, are permitted provided that the following conditions are met: 45 46 1. Redistributions of source code must retain the above copyright notice, 47 this list of conditions and the following disclaimer. 48 49 2. Redistributions in binary form must reproduce the above copyright 50 notice, this list of conditions and the following disclaimer in the 51 documentation and/or other materials provided with the distribution. 52 53 3. Neither the name of the Intel Corporation nor the names of its 54 contributors may be used to endorse or promote products derived from 55 this software without specific prior written permission. 56 57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 67 POSSIBILITY OF SUCH DAMAGE. 68 69 *******************************************************************************/ 70 /* 71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips. 72 * 73 * TODO (in order of importance): 74 * 75 * - Check XXX'ed comments 76 * - TX Multi queue improvement (refine queue selection logic) 77 * - Split header buffer for newer descriptors 78 * - EEE (Energy Efficiency Ethernet) for I354 79 * - Virtual Function 80 * - Set LED correctly (based on contents in EEPROM) 81 * - Rework how parameters are loaded from the EEPROM. 82 */ 83 84 #include <sys/cdefs.h> 85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.729 2022/02/26 15:04:39 rillig Exp $"); 86 87 #ifdef _KERNEL_OPT 88 #include "opt_net_mpsafe.h" 89 #include "opt_if_wm.h" 90 #endif 91 92 #include <sys/param.h> 93 #include <sys/systm.h> 94 #include <sys/callout.h> 95 #include <sys/mbuf.h> 96 #include <sys/malloc.h> 97 #include <sys/kmem.h> 98 #include <sys/kernel.h> 99 #include <sys/socket.h> 100 #include <sys/ioctl.h> 101 #include <sys/errno.h> 102 #include <sys/device.h> 103 #include <sys/queue.h> 104 #include <sys/syslog.h> 105 #include <sys/interrupt.h> 106 #include <sys/cpu.h> 107 #include <sys/pcq.h> 108 #include <sys/sysctl.h> 109 #include <sys/workqueue.h> 110 #include <sys/atomic.h> 111 112 #include <sys/rndsource.h> 113 114 #include <net/if.h> 115 #include <net/if_dl.h> 116 #include <net/if_media.h> 117 #include <net/if_ether.h> 118 119 #include <net/bpf.h> 120 121 #include <net/rss_config.h> 122 123 #include <netinet/in.h> /* XXX for struct ip */ 124 #include <netinet/in_systm.h> /* XXX for struct ip */ 125 #include <netinet/ip.h> /* XXX for struct ip */ 126 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */ 127 #include <netinet/tcp.h> /* XXX for struct tcphdr */ 128 129 #include <sys/bus.h> 130 #include <sys/intr.h> 131 #include <machine/endian.h> 132 133 #include <dev/mii/mii.h> 134 #include <dev/mii/mdio.h> 135 #include <dev/mii/miivar.h> 136 #include <dev/mii/miidevs.h> 137 #include <dev/mii/mii_bitbang.h> 138 #include <dev/mii/ikphyreg.h> 139 #include <dev/mii/igphyreg.h> 140 #include <dev/mii/igphyvar.h> 141 #include <dev/mii/inbmphyreg.h> 142 #include <dev/mii/ihphyreg.h> 143 #include <dev/mii/makphyreg.h> 144 145 #include <dev/pci/pcireg.h> 146 #include <dev/pci/pcivar.h> 147 #include <dev/pci/pcidevs.h> 148 149 #include <dev/pci/if_wmreg.h> 150 #include <dev/pci/if_wmvar.h> 151 152 #ifdef WM_DEBUG 153 #define WM_DEBUG_LINK __BIT(0) 154 #define WM_DEBUG_TX __BIT(1) 155 #define WM_DEBUG_RX __BIT(2) 156 #define WM_DEBUG_GMII __BIT(3) 157 #define WM_DEBUG_MANAGE __BIT(4) 158 #define WM_DEBUG_NVM __BIT(5) 159 #define WM_DEBUG_INIT __BIT(6) 160 #define WM_DEBUG_LOCK __BIT(7) 161 162 #if 0 163 #define WM_DEBUG_DEFAULT WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \ 164 WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | \ 165 WM_DEBUG_LOCK 166 #endif 167 168 #define DPRINTF(sc, x, y) \ 169 do { \ 170 if ((sc)->sc_debug & (x)) \ 171 printf y; \ 172 } while (0) 173 #else 174 #define DPRINTF(sc, x, y) __nothing 175 #endif /* WM_DEBUG */ 176 177 #ifdef NET_MPSAFE 178 #define WM_MPSAFE 1 179 #define WM_CALLOUT_FLAGS CALLOUT_MPSAFE 180 #define WM_SOFTINT_FLAGS SOFTINT_MPSAFE 181 #define WM_WORKQUEUE_FLAGS WQ_PERCPU | WQ_MPSAFE 182 #else 183 #define WM_CALLOUT_FLAGS 0 184 #define WM_SOFTINT_FLAGS 0 185 #define WM_WORKQUEUE_FLAGS WQ_PERCPU 186 #endif 187 188 #define WM_WORKQUEUE_PRI PRI_SOFTNET 189 190 /* 191 * This device driver's max interrupt numbers. 192 */ 193 #define WM_MAX_NQUEUEINTR 16 194 #define WM_MAX_NINTR (WM_MAX_NQUEUEINTR + 1) 195 196 #ifndef WM_DISABLE_MSI 197 #define WM_DISABLE_MSI 0 198 #endif 199 #ifndef WM_DISABLE_MSIX 200 #define WM_DISABLE_MSIX 0 201 #endif 202 203 int wm_disable_msi = WM_DISABLE_MSI; 204 int wm_disable_msix = WM_DISABLE_MSIX; 205 206 #ifndef WM_WATCHDOG_TIMEOUT 207 #define WM_WATCHDOG_TIMEOUT 5 208 #endif 209 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT; 210 211 /* 212 * Transmit descriptor list size. Due to errata, we can only have 213 * 256 hardware descriptors in the ring on < 82544, but we use 4096 214 * on >= 82544. We tell the upper layers that they can queue a lot 215 * of packets, and we go ahead and manage up to 64 (16 for the i82547) 216 * of them at a time. 217 * 218 * We allow up to 64 DMA segments per packet. Pathological packet 219 * chains containing many small mbufs have been observed in zero-copy 220 * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments, 221 * m_defrag() is called to reduce it. 222 */ 223 #define WM_NTXSEGS 64 224 #define WM_IFQUEUELEN 256 225 #define WM_TXQUEUELEN_MAX 64 226 #define WM_TXQUEUELEN_MAX_82547 16 227 #define WM_TXQUEUELEN(txq) ((txq)->txq_num) 228 #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1) 229 #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8) 230 #define WM_NTXDESC_82542 256 231 #define WM_NTXDESC_82544 4096 232 #define WM_NTXDESC(txq) ((txq)->txq_ndesc) 233 #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1) 234 #define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize) 235 #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq)) 236 #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq)) 237 238 #define WM_MAXTXDMA (2 * round_page(IP_MAXPACKET)) /* for TSO */ 239 240 #define WM_TXINTERQSIZE 256 241 242 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT 243 #define WM_TX_PROCESS_LIMIT_DEFAULT 100U 244 #endif 245 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT 246 #define WM_TX_INTR_PROCESS_LIMIT_DEFAULT 0U 247 #endif 248 249 /* 250 * Receive descriptor list size. We have one Rx buffer for normal 251 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized 252 * packet. We allocate 256 receive descriptors, each with a 2k 253 * buffer (MCLBYTES), which gives us room for 50 jumbo packets. 254 */ 255 #define WM_NRXDESC 256U 256 #define WM_NRXDESC_MASK (WM_NRXDESC - 1) 257 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK) 258 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK) 259 260 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT 261 #define WM_RX_PROCESS_LIMIT_DEFAULT 100U 262 #endif 263 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT 264 #define WM_RX_INTR_PROCESS_LIMIT_DEFAULT 0U 265 #endif 266 267 typedef union txdescs { 268 wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544]; 269 nq_txdesc_t sctxu_nq_txdescs[WM_NTXDESC_82544]; 270 } txdescs_t; 271 272 typedef union rxdescs { 273 wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC]; 274 ext_rxdesc_t sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */ 275 nq_rxdesc_t sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */ 276 } rxdescs_t; 277 278 #define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x)) 279 #define WM_CDRXOFF(rxq, x) ((rxq)->rxq_descsize * (x)) 280 281 /* 282 * Software state for transmit jobs. 283 */ 284 struct wm_txsoft { 285 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 286 bus_dmamap_t txs_dmamap; /* our DMA map */ 287 int txs_firstdesc; /* first descriptor in packet */ 288 int txs_lastdesc; /* last descriptor in packet */ 289 int txs_ndesc; /* # of descriptors used */ 290 }; 291 292 /* 293 * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES) 294 * buffer and a DMA map. For packets which fill more than one buffer, we chain 295 * them together. 296 */ 297 struct wm_rxsoft { 298 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 299 bus_dmamap_t rxs_dmamap; /* our DMA map */ 300 }; 301 302 #define WM_LINKUP_TIMEOUT 50 303 304 static uint16_t swfwphysem[] = { 305 SWFW_PHY0_SM, 306 SWFW_PHY1_SM, 307 SWFW_PHY2_SM, 308 SWFW_PHY3_SM 309 }; 310 311 static const uint32_t wm_82580_rxpbs_table[] = { 312 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 313 }; 314 315 struct wm_softc; 316 317 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS) 318 #if !defined(WM_EVENT_COUNTERS) 319 #define WM_EVENT_COUNTERS 1 320 #endif 321 #endif 322 323 #ifdef WM_EVENT_COUNTERS 324 #define WM_Q_EVCNT_DEFINE(qname, evname) \ 325 char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \ 326 struct evcnt qname##_ev_##evname; 327 328 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype) \ 329 do { \ 330 snprintf((q)->qname##_##evname##_evcnt_name, \ 331 sizeof((q)->qname##_##evname##_evcnt_name), \ 332 "%s%02d%s", #qname, (qnum), #evname); \ 333 evcnt_attach_dynamic(&(q)->qname##_ev_##evname, \ 334 (evtype), NULL, (xname), \ 335 (q)->qname##_##evname##_evcnt_name); \ 336 } while (0) 337 338 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname) \ 339 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC) 340 341 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname) \ 342 WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR) 343 344 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum) \ 345 evcnt_detach(&(q)->qname##_ev_##evname); 346 #endif /* WM_EVENT_COUNTERS */ 347 348 struct wm_txqueue { 349 kmutex_t *txq_lock; /* lock for tx operations */ 350 351 struct wm_softc *txq_sc; /* shortcut (skip struct wm_queue) */ 352 353 /* Software state for the transmit descriptors. */ 354 int txq_num; /* must be a power of two */ 355 struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX]; 356 357 /* TX control data structures. */ 358 int txq_ndesc; /* must be a power of two */ 359 size_t txq_descsize; /* a tx descriptor size */ 360 txdescs_t *txq_descs_u; 361 bus_dmamap_t txq_desc_dmamap; /* control data DMA map */ 362 bus_dma_segment_t txq_desc_seg; /* control data segment */ 363 int txq_desc_rseg; /* real number of control segment */ 364 #define txq_desc_dma txq_desc_dmamap->dm_segs[0].ds_addr 365 #define txq_descs txq_descs_u->sctxu_txdescs 366 #define txq_nq_descs txq_descs_u->sctxu_nq_txdescs 367 368 bus_addr_t txq_tdt_reg; /* offset of TDT register */ 369 370 int txq_free; /* number of free Tx descriptors */ 371 int txq_next; /* next ready Tx descriptor */ 372 373 int txq_sfree; /* number of free Tx jobs */ 374 int txq_snext; /* next free Tx job */ 375 int txq_sdirty; /* dirty Tx jobs */ 376 377 /* These 4 variables are used only on the 82547. */ 378 int txq_fifo_size; /* Tx FIFO size */ 379 int txq_fifo_head; /* current head of FIFO */ 380 uint32_t txq_fifo_addr; /* internal address of start of FIFO */ 381 int txq_fifo_stall; /* Tx FIFO is stalled */ 382 383 /* 384 * When ncpu > number of Tx queues, a Tx queue is shared by multiple 385 * CPUs. This queue intermediate them without block. 386 */ 387 pcq_t *txq_interq; 388 389 /* 390 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags 391 * to manage Tx H/W queue's busy flag. 392 */ 393 int txq_flags; /* flags for H/W queue, see below */ 394 #define WM_TXQ_NO_SPACE 0x1 395 #define WM_TXQ_LINKDOWN_DISCARD 0x2 396 397 bool txq_stopping; 398 399 bool txq_sending; 400 time_t txq_lastsent; 401 402 /* Checksum flags used for previous packet */ 403 uint32_t txq_last_hw_cmd; 404 uint8_t txq_last_hw_fields; 405 uint16_t txq_last_hw_ipcs; 406 uint16_t txq_last_hw_tucs; 407 408 uint32_t txq_packets; /* for AIM */ 409 uint32_t txq_bytes; /* for AIM */ 410 #ifdef WM_EVENT_COUNTERS 411 /* TX event counters */ 412 WM_Q_EVCNT_DEFINE(txq, txsstall) /* Stalled due to no txs */ 413 WM_Q_EVCNT_DEFINE(txq, txdstall) /* Stalled due to no txd */ 414 WM_Q_EVCNT_DEFINE(txq, fifo_stall) /* FIFO stalls (82547) */ 415 WM_Q_EVCNT_DEFINE(txq, txdw) /* Tx descriptor interrupts */ 416 WM_Q_EVCNT_DEFINE(txq, txqe) /* Tx queue empty interrupts */ 417 /* XXX not used? */ 418 419 WM_Q_EVCNT_DEFINE(txq, ipsum) /* IP checksums comp. */ 420 WM_Q_EVCNT_DEFINE(txq, tusum) /* TCP/UDP cksums comp. */ 421 WM_Q_EVCNT_DEFINE(txq, tusum6) /* TCP/UDP v6 cksums comp. */ 422 WM_Q_EVCNT_DEFINE(txq, tso) /* TCP seg offload (IPv4) */ 423 WM_Q_EVCNT_DEFINE(txq, tso6) /* TCP seg offload (IPv6) */ 424 WM_Q_EVCNT_DEFINE(txq, tsopain) /* Painful header manip. for TSO */ 425 WM_Q_EVCNT_DEFINE(txq, pcqdrop) /* Pkt dropped in pcq */ 426 WM_Q_EVCNT_DEFINE(txq, descdrop) /* Pkt dropped in MAC desc ring */ 427 /* other than toomanyseg */ 428 429 WM_Q_EVCNT_DEFINE(txq, toomanyseg) /* Pkt dropped(toomany DMA segs) */ 430 WM_Q_EVCNT_DEFINE(txq, defrag) /* m_defrag() */ 431 WM_Q_EVCNT_DEFINE(txq, underrun) /* Tx underrun */ 432 WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */ 433 434 char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")]; 435 struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */ 436 #endif /* WM_EVENT_COUNTERS */ 437 }; 438 439 struct wm_rxqueue { 440 kmutex_t *rxq_lock; /* lock for rx operations */ 441 442 struct wm_softc *rxq_sc; /* shortcut (skip struct wm_queue) */ 443 444 /* Software state for the receive descriptors. */ 445 struct wm_rxsoft rxq_soft[WM_NRXDESC]; 446 447 /* RX control data structures. */ 448 int rxq_ndesc; /* must be a power of two */ 449 size_t rxq_descsize; /* a rx descriptor size */ 450 rxdescs_t *rxq_descs_u; 451 bus_dmamap_t rxq_desc_dmamap; /* control data DMA map */ 452 bus_dma_segment_t rxq_desc_seg; /* control data segment */ 453 int rxq_desc_rseg; /* real number of control segment */ 454 #define rxq_desc_dma rxq_desc_dmamap->dm_segs[0].ds_addr 455 #define rxq_descs rxq_descs_u->sctxu_rxdescs 456 #define rxq_ext_descs rxq_descs_u->sctxu_ext_rxdescs 457 #define rxq_nq_descs rxq_descs_u->sctxu_nq_rxdescs 458 459 bus_addr_t rxq_rdt_reg; /* offset of RDT register */ 460 461 int rxq_ptr; /* next ready Rx desc/queue ent */ 462 int rxq_discard; 463 int rxq_len; 464 struct mbuf *rxq_head; 465 struct mbuf *rxq_tail; 466 struct mbuf **rxq_tailp; 467 468 bool rxq_stopping; 469 470 uint32_t rxq_packets; /* for AIM */ 471 uint32_t rxq_bytes; /* for AIM */ 472 #ifdef WM_EVENT_COUNTERS 473 /* RX event counters */ 474 WM_Q_EVCNT_DEFINE(rxq, intr) /* Interrupts */ 475 WM_Q_EVCNT_DEFINE(rxq, defer) /* Rx deferred processing */ 476 477 WM_Q_EVCNT_DEFINE(rxq, ipsum) /* IP checksums checked */ 478 WM_Q_EVCNT_DEFINE(rxq, tusum) /* TCP/UDP cksums checked */ 479 #endif 480 }; 481 482 struct wm_queue { 483 int wmq_id; /* index of TX/RX queues */ 484 int wmq_intr_idx; /* index of MSI-X tables */ 485 486 uint32_t wmq_itr; /* interrupt interval per queue. */ 487 bool wmq_set_itr; 488 489 struct wm_txqueue wmq_txq; 490 struct wm_rxqueue wmq_rxq; 491 char sysctlname[32]; /* Name for sysctl */ 492 493 bool wmq_txrx_use_workqueue; 494 struct work wmq_cookie; 495 void *wmq_si; 496 }; 497 498 struct wm_phyop { 499 int (*acquire)(struct wm_softc *); 500 void (*release)(struct wm_softc *); 501 int (*readreg_locked)(device_t, int, int, uint16_t *); 502 int (*writereg_locked)(device_t, int, int, uint16_t); 503 int reset_delay_us; 504 bool no_errprint; 505 }; 506 507 struct wm_nvmop { 508 int (*acquire)(struct wm_softc *); 509 void (*release)(struct wm_softc *); 510 int (*read)(struct wm_softc *, int, int, uint16_t *); 511 }; 512 513 /* 514 * Software state per device. 515 */ 516 struct wm_softc { 517 device_t sc_dev; /* generic device information */ 518 bus_space_tag_t sc_st; /* bus space tag */ 519 bus_space_handle_t sc_sh; /* bus space handle */ 520 bus_size_t sc_ss; /* bus space size */ 521 bus_space_tag_t sc_iot; /* I/O space tag */ 522 bus_space_handle_t sc_ioh; /* I/O space handle */ 523 bus_size_t sc_ios; /* I/O space size */ 524 bus_space_tag_t sc_flasht; /* flash registers space tag */ 525 bus_space_handle_t sc_flashh; /* flash registers space handle */ 526 bus_size_t sc_flashs; /* flash registers space size */ 527 off_t sc_flashreg_offset; /* 528 * offset to flash registers from 529 * start of BAR 530 */ 531 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 532 533 struct ethercom sc_ethercom; /* ethernet common data */ 534 struct mii_data sc_mii; /* MII/media information */ 535 536 pci_chipset_tag_t sc_pc; 537 pcitag_t sc_pcitag; 538 int sc_bus_speed; /* PCI/PCIX bus speed */ 539 int sc_pcixe_capoff; /* PCI[Xe] capability reg offset */ 540 541 uint16_t sc_pcidevid; /* PCI device ID */ 542 wm_chip_type sc_type; /* MAC type */ 543 int sc_rev; /* MAC revision */ 544 wm_phy_type sc_phytype; /* PHY type */ 545 uint8_t sc_sfptype; /* SFP type */ 546 uint32_t sc_mediatype; /* Media type (Copper, Fiber, SERDES)*/ 547 #define WM_MEDIATYPE_UNKNOWN 0x00 548 #define WM_MEDIATYPE_FIBER 0x01 549 #define WM_MEDIATYPE_COPPER 0x02 550 #define WM_MEDIATYPE_SERDES 0x03 /* Internal SERDES */ 551 int sc_funcid; /* unit number of the chip (0 to 3) */ 552 int sc_flags; /* flags; see below */ 553 u_short sc_if_flags; /* last if_flags */ 554 int sc_ec_capenable; /* last ec_capenable */ 555 int sc_flowflags; /* 802.3x flow control flags */ 556 uint16_t eee_lp_ability; /* EEE link partner's ability */ 557 int sc_align_tweak; 558 559 void *sc_ihs[WM_MAX_NINTR]; /* 560 * interrupt cookie. 561 * - legacy and msi use sc_ihs[0] only 562 * - msix use sc_ihs[0] to sc_ihs[nintrs-1] 563 */ 564 pci_intr_handle_t *sc_intrs; /* 565 * legacy and msi use sc_intrs[0] only 566 * msix use sc_intrs[0] to sc_ihs[nintrs-1] 567 */ 568 int sc_nintrs; /* number of interrupts */ 569 570 int sc_link_intr_idx; /* index of MSI-X tables */ 571 572 callout_t sc_tick_ch; /* tick callout */ 573 bool sc_core_stopping; 574 575 int sc_nvm_ver_major; 576 int sc_nvm_ver_minor; 577 int sc_nvm_ver_build; 578 int sc_nvm_addrbits; /* NVM address bits */ 579 unsigned int sc_nvm_wordsize; /* NVM word size */ 580 int sc_ich8_flash_base; 581 int sc_ich8_flash_bank_size; 582 int sc_nvm_k1_enabled; 583 584 int sc_nqueues; 585 struct wm_queue *sc_queue; 586 u_int sc_tx_process_limit; /* Tx proc. repeat limit in softint */ 587 u_int sc_tx_intr_process_limit; /* Tx proc. repeat limit in H/W intr */ 588 u_int sc_rx_process_limit; /* Rx proc. repeat limit in softint */ 589 u_int sc_rx_intr_process_limit; /* Rx proc. repeat limit in H/W intr */ 590 struct workqueue *sc_queue_wq; 591 bool sc_txrx_use_workqueue; 592 593 int sc_affinity_offset; 594 595 #ifdef WM_EVENT_COUNTERS 596 /* Event counters. */ 597 struct evcnt sc_ev_linkintr; /* Link interrupts */ 598 599 /* WM_T_82542_2_1 only */ 600 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */ 601 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */ 602 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */ 603 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */ 604 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */ 605 #endif /* WM_EVENT_COUNTERS */ 606 607 struct sysctllog *sc_sysctllog; 608 609 /* This variable are used only on the 82547. */ 610 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */ 611 612 uint32_t sc_ctrl; /* prototype CTRL register */ 613 #if 0 614 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */ 615 #endif 616 uint32_t sc_icr; /* prototype interrupt bits */ 617 uint32_t sc_itr_init; /* prototype intr throttling reg */ 618 uint32_t sc_tctl; /* prototype TCTL register */ 619 uint32_t sc_rctl; /* prototype RCTL register */ 620 uint32_t sc_txcw; /* prototype TXCW register */ 621 uint32_t sc_tipg; /* prototype TIPG register */ 622 uint32_t sc_fcrtl; /* prototype FCRTL register */ 623 uint32_t sc_pba; /* prototype PBA register */ 624 625 int sc_tbi_linkup; /* TBI link status */ 626 int sc_tbi_serdes_anegticks; /* autonegotiation ticks */ 627 int sc_tbi_serdes_ticks; /* tbi ticks */ 628 629 int sc_mchash_type; /* multicast filter offset */ 630 631 krndsource_t rnd_source; /* random source */ 632 633 struct if_percpuq *sc_ipq; /* softint-based input queues */ 634 635 kmutex_t *sc_core_lock; /* lock for softc operations */ 636 kmutex_t *sc_ich_phymtx; /* 637 * 82574/82583/ICH/PCH specific PHY 638 * mutex. For 82574/82583, the mutex 639 * is used for both PHY and NVM. 640 */ 641 kmutex_t *sc_ich_nvmmtx; /* ICH/PCH specific NVM mutex */ 642 643 struct wm_phyop phy; 644 struct wm_nvmop nvm; 645 #ifdef WM_DEBUG 646 uint32_t sc_debug; 647 #endif 648 }; 649 650 #define WM_CORE_LOCK(_sc) \ 651 if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock) 652 #define WM_CORE_UNLOCK(_sc) \ 653 if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock) 654 #define WM_CORE_LOCKED(_sc) \ 655 (!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock)) 656 657 #define WM_RXCHAIN_RESET(rxq) \ 658 do { \ 659 (rxq)->rxq_tailp = &(rxq)->rxq_head; \ 660 *(rxq)->rxq_tailp = NULL; \ 661 (rxq)->rxq_len = 0; \ 662 } while (/*CONSTCOND*/0) 663 664 #define WM_RXCHAIN_LINK(rxq, m) \ 665 do { \ 666 *(rxq)->rxq_tailp = (rxq)->rxq_tail = (m); \ 667 (rxq)->rxq_tailp = &(m)->m_next; \ 668 } while (/*CONSTCOND*/0) 669 670 #ifdef WM_EVENT_COUNTERS 671 #ifdef __HAVE_ATOMIC64_LOADSTORE 672 #define WM_EVCNT_INCR(ev) \ 673 atomic_store_relaxed(&((ev)->ev_count), \ 674 atomic_load_relaxed(&(ev)->ev_count) + 1) 675 #define WM_EVCNT_ADD(ev, val) \ 676 atomic_store_relaxed(&((ev)->ev_count), \ 677 atomic_load_relaxed(&(ev)->ev_count) + (val)) 678 #else 679 #define WM_EVCNT_INCR(ev) \ 680 ((ev)->ev_count)++ 681 #define WM_EVCNT_ADD(ev, val) \ 682 (ev)->ev_count += (val) 683 #endif 684 685 #define WM_Q_EVCNT_INCR(qname, evname) \ 686 WM_EVCNT_INCR(&(qname)->qname##_ev_##evname) 687 #define WM_Q_EVCNT_ADD(qname, evname, val) \ 688 WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val)) 689 #else /* !WM_EVENT_COUNTERS */ 690 #define WM_EVCNT_INCR(ev) /* nothing */ 691 #define WM_EVCNT_ADD(ev, val) /* nothing */ 692 693 #define WM_Q_EVCNT_INCR(qname, evname) /* nothing */ 694 #define WM_Q_EVCNT_ADD(qname, evname, val) /* nothing */ 695 #endif /* !WM_EVENT_COUNTERS */ 696 697 #define CSR_READ(sc, reg) \ 698 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) 699 #define CSR_WRITE(sc, reg, val) \ 700 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) 701 #define CSR_WRITE_FLUSH(sc) \ 702 (void)CSR_READ((sc), WMREG_STATUS) 703 704 #define ICH8_FLASH_READ32(sc, reg) \ 705 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, \ 706 (reg) + sc->sc_flashreg_offset) 707 #define ICH8_FLASH_WRITE32(sc, reg, data) \ 708 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, \ 709 (reg) + sc->sc_flashreg_offset, (data)) 710 711 #define ICH8_FLASH_READ16(sc, reg) \ 712 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, \ 713 (reg) + sc->sc_flashreg_offset) 714 #define ICH8_FLASH_WRITE16(sc, reg, data) \ 715 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, \ 716 (reg) + sc->sc_flashreg_offset, (data)) 717 718 #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x))) 719 #define WM_CDRXADDR(rxq, x) ((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x))) 720 721 #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU) 722 #define WM_CDTXADDR_HI(txq, x) \ 723 (sizeof(bus_addr_t) == 8 ? \ 724 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0) 725 726 #define WM_CDRXADDR_LO(rxq, x) (WM_CDRXADDR((rxq), (x)) & 0xffffffffU) 727 #define WM_CDRXADDR_HI(rxq, x) \ 728 (sizeof(bus_addr_t) == 8 ? \ 729 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0) 730 731 /* 732 * Register read/write functions. 733 * Other than CSR_{READ|WRITE}(). 734 */ 735 #if 0 736 static inline uint32_t wm_io_read(struct wm_softc *, int); 737 #endif 738 static inline void wm_io_write(struct wm_softc *, int, uint32_t); 739 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t, 740 uint32_t, uint32_t); 741 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t); 742 743 /* 744 * Descriptor sync/init functions. 745 */ 746 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int); 747 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int); 748 static inline void wm_init_rxdesc(struct wm_rxqueue *, int); 749 750 /* 751 * Device driver interface functions and commonly used functions. 752 * match, attach, detach, init, start, stop, ioctl, watchdog and so on. 753 */ 754 static const struct wm_product *wm_lookup(const struct pci_attach_args *); 755 static int wm_match(device_t, cfdata_t, void *); 756 static void wm_attach(device_t, device_t, void *); 757 static int wm_detach(device_t, int); 758 static bool wm_suspend(device_t, const pmf_qual_t *); 759 static bool wm_resume(device_t, const pmf_qual_t *); 760 static void wm_watchdog(struct ifnet *); 761 static void wm_watchdog_txq(struct ifnet *, struct wm_txqueue *, 762 uint16_t *); 763 static void wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *, 764 uint16_t *); 765 static void wm_tick(void *); 766 static int wm_ifflags_cb(struct ethercom *); 767 static int wm_ioctl(struct ifnet *, u_long, void *); 768 /* MAC address related */ 769 static uint16_t wm_check_alt_mac_addr(struct wm_softc *); 770 static int wm_read_mac_addr(struct wm_softc *, uint8_t *); 771 static void wm_set_ral(struct wm_softc *, const uint8_t *, int); 772 static uint32_t wm_mchash(struct wm_softc *, const uint8_t *); 773 static int wm_rar_count(struct wm_softc *); 774 static void wm_set_filter(struct wm_softc *); 775 /* Reset and init related */ 776 static void wm_set_vlan(struct wm_softc *); 777 static void wm_set_pcie_completion_timeout(struct wm_softc *); 778 static void wm_get_auto_rd_done(struct wm_softc *); 779 static void wm_lan_init_done(struct wm_softc *); 780 static void wm_get_cfg_done(struct wm_softc *); 781 static int wm_phy_post_reset(struct wm_softc *); 782 static int wm_write_smbus_addr(struct wm_softc *); 783 static int wm_init_lcd_from_nvm(struct wm_softc *); 784 static int wm_oem_bits_config_ich8lan(struct wm_softc *, bool); 785 static void wm_initialize_hardware_bits(struct wm_softc *); 786 static uint32_t wm_rxpbs_adjust_82580(uint32_t); 787 static int wm_reset_phy(struct wm_softc *); 788 static void wm_flush_desc_rings(struct wm_softc *); 789 static void wm_reset(struct wm_softc *); 790 static int wm_add_rxbuf(struct wm_rxqueue *, int); 791 static void wm_rxdrain(struct wm_rxqueue *); 792 static void wm_init_rss(struct wm_softc *); 793 static void wm_adjust_qnum(struct wm_softc *, int); 794 static inline bool wm_is_using_msix(struct wm_softc *); 795 static inline bool wm_is_using_multiqueue(struct wm_softc *); 796 static int wm_softint_establish_queue(struct wm_softc *, int, int); 797 static int wm_setup_legacy(struct wm_softc *); 798 static int wm_setup_msix(struct wm_softc *); 799 static int wm_init(struct ifnet *); 800 static int wm_init_locked(struct ifnet *); 801 static void wm_init_sysctls(struct wm_softc *); 802 static void wm_unset_stopping_flags(struct wm_softc *); 803 static void wm_set_stopping_flags(struct wm_softc *); 804 static void wm_stop(struct ifnet *, int); 805 static void wm_stop_locked(struct ifnet *, bool, bool); 806 static void wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *); 807 static void wm_82547_txfifo_stall(void *); 808 static int wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *); 809 static void wm_itrs_writereg(struct wm_softc *, struct wm_queue *); 810 /* DMA related */ 811 static int wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *); 812 static void wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *); 813 static void wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *); 814 static void wm_init_tx_regs(struct wm_softc *, struct wm_queue *, 815 struct wm_txqueue *); 816 static int wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *); 817 static void wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *); 818 static void wm_init_rx_regs(struct wm_softc *, struct wm_queue *, 819 struct wm_rxqueue *); 820 static int wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *); 821 static void wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *); 822 static void wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *); 823 static int wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 824 static void wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 825 static int wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *); 826 static void wm_init_tx_queue(struct wm_softc *, struct wm_queue *, 827 struct wm_txqueue *); 828 static int wm_init_rx_queue(struct wm_softc *, struct wm_queue *, 829 struct wm_rxqueue *); 830 static int wm_alloc_txrx_queues(struct wm_softc *); 831 static void wm_free_txrx_queues(struct wm_softc *); 832 static int wm_init_txrx_queues(struct wm_softc *); 833 /* Start */ 834 static void wm_tx_offload(struct wm_softc *, struct wm_txqueue *, 835 struct wm_txsoft *, uint32_t *, uint8_t *); 836 static inline int wm_select_txqueue(struct ifnet *, struct mbuf *); 837 static void wm_start(struct ifnet *); 838 static void wm_start_locked(struct ifnet *); 839 static int wm_transmit(struct ifnet *, struct mbuf *); 840 static void wm_transmit_locked(struct ifnet *, struct wm_txqueue *); 841 static void wm_send_common_locked(struct ifnet *, struct wm_txqueue *, 842 bool); 843 static void wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *, 844 struct wm_txsoft *, uint32_t *, uint32_t *, bool *); 845 static void wm_nq_start(struct ifnet *); 846 static void wm_nq_start_locked(struct ifnet *); 847 static int wm_nq_transmit(struct ifnet *, struct mbuf *); 848 static void wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *); 849 static void wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, 850 bool); 851 static void wm_deferred_start_locked(struct wm_txqueue *); 852 static void wm_handle_queue(void *); 853 static void wm_handle_queue_work(struct work *, void *); 854 /* Interrupt */ 855 static bool wm_txeof(struct wm_txqueue *, u_int); 856 static bool wm_rxeof(struct wm_rxqueue *, u_int); 857 static void wm_linkintr_gmii(struct wm_softc *, uint32_t); 858 static void wm_linkintr_tbi(struct wm_softc *, uint32_t); 859 static void wm_linkintr_serdes(struct wm_softc *, uint32_t); 860 static void wm_linkintr(struct wm_softc *, uint32_t); 861 static int wm_intr_legacy(void *); 862 static inline void wm_txrxintr_disable(struct wm_queue *); 863 static inline void wm_txrxintr_enable(struct wm_queue *); 864 static void wm_itrs_calculate(struct wm_softc *, struct wm_queue *); 865 static int wm_txrxintr_msix(void *); 866 static int wm_linkintr_msix(void *); 867 868 /* 869 * Media related. 870 * GMII, SGMII, TBI, SERDES and SFP. 871 */ 872 /* Common */ 873 static void wm_tbi_serdes_set_linkled(struct wm_softc *); 874 /* GMII related */ 875 static void wm_gmii_reset(struct wm_softc *); 876 static void wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t); 877 static int wm_get_phy_id_82575(struct wm_softc *); 878 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t); 879 static int wm_gmii_mediachange(struct ifnet *); 880 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *); 881 static void wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int); 882 static uint16_t wm_i82543_mii_recvbits(struct wm_softc *); 883 static int wm_gmii_i82543_readreg(device_t, int, int, uint16_t *); 884 static int wm_gmii_i82543_writereg(device_t, int, int, uint16_t); 885 static int wm_gmii_mdic_readreg(device_t, int, int, uint16_t *); 886 static int wm_gmii_mdic_writereg(device_t, int, int, uint16_t); 887 static int wm_gmii_i82544_readreg(device_t, int, int, uint16_t *); 888 static int wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *); 889 static int wm_gmii_i82544_writereg(device_t, int, int, uint16_t); 890 static int wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t); 891 static int wm_gmii_i80003_readreg(device_t, int, int, uint16_t *); 892 static int wm_gmii_i80003_writereg(device_t, int, int, uint16_t); 893 static int wm_gmii_bm_readreg(device_t, int, int, uint16_t *); 894 static int wm_gmii_bm_writereg(device_t, int, int, uint16_t); 895 static int wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *); 896 static int wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *); 897 static int wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int, 898 bool); 899 static int wm_gmii_hv_readreg(device_t, int, int, uint16_t *); 900 static int wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *); 901 static int wm_gmii_hv_writereg(device_t, int, int, uint16_t); 902 static int wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t); 903 static int wm_gmii_82580_readreg(device_t, int, int, uint16_t *); 904 static int wm_gmii_82580_writereg(device_t, int, int, uint16_t); 905 static int wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *); 906 static int wm_gmii_gs40g_writereg(device_t, int, int, uint16_t); 907 static void wm_gmii_statchg(struct ifnet *); 908 /* 909 * kumeran related (80003, ICH* and PCH*). 910 * These functions are not for accessing MII registers but for accessing 911 * kumeran specific registers. 912 */ 913 static int wm_kmrn_readreg(struct wm_softc *, int, uint16_t *); 914 static int wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *); 915 static int wm_kmrn_writereg(struct wm_softc *, int, uint16_t); 916 static int wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t); 917 /* EMI register related */ 918 static int wm_access_emi_reg_locked(device_t, int, uint16_t *, bool); 919 static int wm_read_emi_reg_locked(device_t, int, uint16_t *); 920 static int wm_write_emi_reg_locked(device_t, int, uint16_t); 921 /* SGMII */ 922 static bool wm_sgmii_uses_mdio(struct wm_softc *); 923 static void wm_sgmii_sfp_preconfig(struct wm_softc *); 924 static int wm_sgmii_readreg(device_t, int, int, uint16_t *); 925 static int wm_sgmii_readreg_locked(device_t, int, int, uint16_t *); 926 static int wm_sgmii_writereg(device_t, int, int, uint16_t); 927 static int wm_sgmii_writereg_locked(device_t, int, int, uint16_t); 928 /* TBI related */ 929 static bool wm_tbi_havesignal(struct wm_softc *, uint32_t); 930 static void wm_tbi_mediainit(struct wm_softc *); 931 static int wm_tbi_mediachange(struct ifnet *); 932 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *); 933 static int wm_check_for_link(struct wm_softc *); 934 static void wm_tbi_tick(struct wm_softc *); 935 /* SERDES related */ 936 static void wm_serdes_power_up_link_82575(struct wm_softc *); 937 static int wm_serdes_mediachange(struct ifnet *); 938 static void wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *); 939 static void wm_serdes_tick(struct wm_softc *); 940 /* SFP related */ 941 static int wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *); 942 static uint32_t wm_sfp_get_media_type(struct wm_softc *); 943 944 /* 945 * NVM related. 946 * Microwire, SPI (w/wo EERD) and Flash. 947 */ 948 /* Misc functions */ 949 static void wm_eeprom_sendbits(struct wm_softc *, uint32_t, int); 950 static void wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int); 951 static int wm_nvm_set_addrbits_size_eecd(struct wm_softc *); 952 /* Microwire */ 953 static int wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *); 954 /* SPI */ 955 static int wm_nvm_ready_spi(struct wm_softc *); 956 static int wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *); 957 /* Using with EERD */ 958 static int wm_poll_eerd_eewr_done(struct wm_softc *, int); 959 static int wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *); 960 /* Flash */ 961 static int wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *, 962 unsigned int *); 963 static int32_t wm_ich8_cycle_init(struct wm_softc *); 964 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t); 965 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t, 966 uint32_t *); 967 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *); 968 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *); 969 static int32_t wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *); 970 static int wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *); 971 static int wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *); 972 /* iNVM */ 973 static int wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *); 974 static int wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *); 975 /* Lock, detecting NVM type, validate checksum and read */ 976 static int wm_nvm_is_onboard_eeprom(struct wm_softc *); 977 static int wm_nvm_flash_presence_i210(struct wm_softc *); 978 static int wm_nvm_validate_checksum(struct wm_softc *); 979 static void wm_nvm_version_invm(struct wm_softc *); 980 static void wm_nvm_version(struct wm_softc *); 981 static int wm_nvm_read(struct wm_softc *, int, int, uint16_t *); 982 983 /* 984 * Hardware semaphores. 985 * Very complexed... 986 */ 987 static int wm_get_null(struct wm_softc *); 988 static void wm_put_null(struct wm_softc *); 989 static int wm_get_eecd(struct wm_softc *); 990 static void wm_put_eecd(struct wm_softc *); 991 static int wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */ 992 static void wm_put_swsm_semaphore(struct wm_softc *); 993 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t); 994 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t); 995 static int wm_get_nvm_80003(struct wm_softc *); 996 static void wm_put_nvm_80003(struct wm_softc *); 997 static int wm_get_nvm_82571(struct wm_softc *); 998 static void wm_put_nvm_82571(struct wm_softc *); 999 static int wm_get_phy_82575(struct wm_softc *); 1000 static void wm_put_phy_82575(struct wm_softc *); 1001 static int wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */ 1002 static void wm_put_swfwhw_semaphore(struct wm_softc *); 1003 static int wm_get_swflag_ich8lan(struct wm_softc *); /* For PHY */ 1004 static void wm_put_swflag_ich8lan(struct wm_softc *); 1005 static int wm_get_nvm_ich8lan(struct wm_softc *); 1006 static void wm_put_nvm_ich8lan(struct wm_softc *); 1007 static int wm_get_hw_semaphore_82573(struct wm_softc *); 1008 static void wm_put_hw_semaphore_82573(struct wm_softc *); 1009 1010 /* 1011 * Management mode and power management related subroutines. 1012 * BMC, AMT, suspend/resume and EEE. 1013 */ 1014 #if 0 1015 static int wm_check_mng_mode(struct wm_softc *); 1016 static int wm_check_mng_mode_ich8lan(struct wm_softc *); 1017 static int wm_check_mng_mode_82574(struct wm_softc *); 1018 static int wm_check_mng_mode_generic(struct wm_softc *); 1019 #endif 1020 static int wm_enable_mng_pass_thru(struct wm_softc *); 1021 static bool wm_phy_resetisblocked(struct wm_softc *); 1022 static void wm_get_hw_control(struct wm_softc *); 1023 static void wm_release_hw_control(struct wm_softc *); 1024 static void wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool); 1025 static int wm_init_phy_workarounds_pchlan(struct wm_softc *); 1026 static void wm_init_manageability(struct wm_softc *); 1027 static void wm_release_manageability(struct wm_softc *); 1028 static void wm_get_wakeup(struct wm_softc *); 1029 static int wm_ulp_disable(struct wm_softc *); 1030 static int wm_enable_phy_wakeup(struct wm_softc *); 1031 static void wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *); 1032 static void wm_suspend_workarounds_ich8lan(struct wm_softc *); 1033 static int wm_resume_workarounds_pchlan(struct wm_softc *); 1034 static void wm_enable_wakeup(struct wm_softc *); 1035 static void wm_disable_aspm(struct wm_softc *); 1036 /* LPLU (Low Power Link Up) */ 1037 static void wm_lplu_d0_disable(struct wm_softc *); 1038 /* EEE */ 1039 static int wm_set_eee_i350(struct wm_softc *); 1040 static int wm_set_eee_pchlan(struct wm_softc *); 1041 static int wm_set_eee(struct wm_softc *); 1042 1043 /* 1044 * Workarounds (mainly PHY related). 1045 * Basically, PHY's workarounds are in the PHY drivers. 1046 */ 1047 static int wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *); 1048 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *); 1049 static int wm_hv_phy_workarounds_ich8lan(struct wm_softc *); 1050 static void wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *); 1051 static void wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *); 1052 static int wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool); 1053 static int wm_lv_phy_workarounds_ich8lan(struct wm_softc *); 1054 static int wm_k1_workaround_lpt_lp(struct wm_softc *, bool); 1055 static int wm_k1_gig_workaround_hv(struct wm_softc *, int); 1056 static int wm_k1_workaround_lv(struct wm_softc *); 1057 static int wm_link_stall_workaround_hv(struct wm_softc *); 1058 static int wm_set_mdio_slow_mode_hv(struct wm_softc *); 1059 static void wm_configure_k1_ich8lan(struct wm_softc *, int); 1060 static void wm_reset_init_script_82575(struct wm_softc *); 1061 static void wm_reset_mdicnfg_82580(struct wm_softc *); 1062 static bool wm_phy_is_accessible_pchlan(struct wm_softc *); 1063 static void wm_toggle_lanphypc_pch_lpt(struct wm_softc *); 1064 static int wm_platform_pm_pch_lpt(struct wm_softc *, bool); 1065 static int wm_pll_workaround_i210(struct wm_softc *); 1066 static void wm_legacy_irq_quirk_spt(struct wm_softc *); 1067 static bool wm_phy_need_linkdown_discard(struct wm_softc *); 1068 static void wm_set_linkdown_discard(struct wm_softc *); 1069 static void wm_clear_linkdown_discard(struct wm_softc *); 1070 1071 static int wm_sysctl_tdh_handler(SYSCTLFN_PROTO); 1072 static int wm_sysctl_tdt_handler(SYSCTLFN_PROTO); 1073 #ifdef WM_DEBUG 1074 static int wm_sysctl_debug(SYSCTLFN_PROTO); 1075 #endif 1076 1077 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc), 1078 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN); 1079 1080 /* 1081 * Devices supported by this driver. 1082 */ 1083 static const struct wm_product { 1084 pci_vendor_id_t wmp_vendor; 1085 pci_product_id_t wmp_product; 1086 const char *wmp_name; 1087 wm_chip_type wmp_type; 1088 uint32_t wmp_flags; 1089 #define WMP_F_UNKNOWN WM_MEDIATYPE_UNKNOWN 1090 #define WMP_F_FIBER WM_MEDIATYPE_FIBER 1091 #define WMP_F_COPPER WM_MEDIATYPE_COPPER 1092 #define WMP_F_SERDES WM_MEDIATYPE_SERDES 1093 #define WMP_MEDIATYPE(x) ((x) & 0x03) 1094 } wm_products[] = { 1095 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542, 1096 "Intel i82542 1000BASE-X Ethernet", 1097 WM_T_82542_2_1, WMP_F_FIBER }, 1098 1099 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER, 1100 "Intel i82543GC 1000BASE-X Ethernet", 1101 WM_T_82543, WMP_F_FIBER }, 1102 1103 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER, 1104 "Intel i82543GC 1000BASE-T Ethernet", 1105 WM_T_82543, WMP_F_COPPER }, 1106 1107 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER, 1108 "Intel i82544EI 1000BASE-T Ethernet", 1109 WM_T_82544, WMP_F_COPPER }, 1110 1111 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER, 1112 "Intel i82544EI 1000BASE-X Ethernet", 1113 WM_T_82544, WMP_F_FIBER }, 1114 1115 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER, 1116 "Intel i82544GC 1000BASE-T Ethernet", 1117 WM_T_82544, WMP_F_COPPER }, 1118 1119 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM, 1120 "Intel i82544GC (LOM) 1000BASE-T Ethernet", 1121 WM_T_82544, WMP_F_COPPER }, 1122 1123 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM, 1124 "Intel i82540EM 1000BASE-T Ethernet", 1125 WM_T_82540, WMP_F_COPPER }, 1126 1127 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM, 1128 "Intel i82540EM (LOM) 1000BASE-T Ethernet", 1129 WM_T_82540, WMP_F_COPPER }, 1130 1131 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM, 1132 "Intel i82540EP 1000BASE-T Ethernet", 1133 WM_T_82540, WMP_F_COPPER }, 1134 1135 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP, 1136 "Intel i82540EP 1000BASE-T Ethernet", 1137 WM_T_82540, WMP_F_COPPER }, 1138 1139 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP, 1140 "Intel i82540EP 1000BASE-T Ethernet", 1141 WM_T_82540, WMP_F_COPPER }, 1142 1143 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER, 1144 "Intel i82545EM 1000BASE-T Ethernet", 1145 WM_T_82545, WMP_F_COPPER }, 1146 1147 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER, 1148 "Intel i82545GM 1000BASE-T Ethernet", 1149 WM_T_82545_3, WMP_F_COPPER }, 1150 1151 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER, 1152 "Intel i82545GM 1000BASE-X Ethernet", 1153 WM_T_82545_3, WMP_F_FIBER }, 1154 1155 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES, 1156 "Intel i82545GM Gigabit Ethernet (SERDES)", 1157 WM_T_82545_3, WMP_F_SERDES }, 1158 1159 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER, 1160 "Intel i82546EB 1000BASE-T Ethernet", 1161 WM_T_82546, WMP_F_COPPER }, 1162 1163 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD, 1164 "Intel i82546EB 1000BASE-T Ethernet", 1165 WM_T_82546, WMP_F_COPPER }, 1166 1167 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER, 1168 "Intel i82545EM 1000BASE-X Ethernet", 1169 WM_T_82545, WMP_F_FIBER }, 1170 1171 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER, 1172 "Intel i82546EB 1000BASE-X Ethernet", 1173 WM_T_82546, WMP_F_FIBER }, 1174 1175 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER, 1176 "Intel i82546GB 1000BASE-T Ethernet", 1177 WM_T_82546_3, WMP_F_COPPER }, 1178 1179 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER, 1180 "Intel i82546GB 1000BASE-X Ethernet", 1181 WM_T_82546_3, WMP_F_FIBER }, 1182 1183 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES, 1184 "Intel i82546GB Gigabit Ethernet (SERDES)", 1185 WM_T_82546_3, WMP_F_SERDES }, 1186 1187 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER, 1188 "i82546GB quad-port Gigabit Ethernet", 1189 WM_T_82546_3, WMP_F_COPPER }, 1190 1191 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3, 1192 "i82546GB quad-port Gigabit Ethernet (KSP3)", 1193 WM_T_82546_3, WMP_F_COPPER }, 1194 1195 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE, 1196 "Intel PRO/1000MT (82546GB)", 1197 WM_T_82546_3, WMP_F_COPPER }, 1198 1199 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI, 1200 "Intel i82541EI 1000BASE-T Ethernet", 1201 WM_T_82541, WMP_F_COPPER }, 1202 1203 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM, 1204 "Intel i82541ER (LOM) 1000BASE-T Ethernet", 1205 WM_T_82541, WMP_F_COPPER }, 1206 1207 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE, 1208 "Intel i82541EI Mobile 1000BASE-T Ethernet", 1209 WM_T_82541, WMP_F_COPPER }, 1210 1211 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER, 1212 "Intel i82541ER 1000BASE-T Ethernet", 1213 WM_T_82541_2, WMP_F_COPPER }, 1214 1215 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI, 1216 "Intel i82541GI 1000BASE-T Ethernet", 1217 WM_T_82541_2, WMP_F_COPPER }, 1218 1219 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE, 1220 "Intel i82541GI Mobile 1000BASE-T Ethernet", 1221 WM_T_82541_2, WMP_F_COPPER }, 1222 1223 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI, 1224 "Intel i82541PI 1000BASE-T Ethernet", 1225 WM_T_82541_2, WMP_F_COPPER }, 1226 1227 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI, 1228 "Intel i82547EI 1000BASE-T Ethernet", 1229 WM_T_82547, WMP_F_COPPER }, 1230 1231 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE, 1232 "Intel i82547EI Mobile 1000BASE-T Ethernet", 1233 WM_T_82547, WMP_F_COPPER }, 1234 1235 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI, 1236 "Intel i82547GI 1000BASE-T Ethernet", 1237 WM_T_82547_2, WMP_F_COPPER }, 1238 1239 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER, 1240 "Intel PRO/1000 PT (82571EB)", 1241 WM_T_82571, WMP_F_COPPER }, 1242 1243 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER, 1244 "Intel PRO/1000 PF (82571EB)", 1245 WM_T_82571, WMP_F_FIBER }, 1246 1247 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES, 1248 "Intel PRO/1000 PB (82571EB)", 1249 WM_T_82571, WMP_F_SERDES }, 1250 1251 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER, 1252 "Intel PRO/1000 QT (82571EB)", 1253 WM_T_82571, WMP_F_COPPER }, 1254 1255 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER, 1256 "Intel PRO/1000 PT Quad Port Server Adapter", 1257 WM_T_82571, WMP_F_COPPER }, 1258 1259 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER, 1260 "Intel Gigabit PT Quad Port Server ExpressModule", 1261 WM_T_82571, WMP_F_COPPER }, 1262 1263 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES, 1264 "Intel 82571EB Dual Gigabit Ethernet (SERDES)", 1265 WM_T_82571, WMP_F_SERDES }, 1266 1267 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES, 1268 "Intel 82571EB Quad Gigabit Ethernet (SERDES)", 1269 WM_T_82571, WMP_F_SERDES }, 1270 1271 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER, 1272 "Intel 82571EB Quad 1000baseX Ethernet", 1273 WM_T_82571, WMP_F_FIBER }, 1274 1275 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER, 1276 "Intel i82572EI 1000baseT Ethernet", 1277 WM_T_82572, WMP_F_COPPER }, 1278 1279 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER, 1280 "Intel i82572EI 1000baseX Ethernet", 1281 WM_T_82572, WMP_F_FIBER }, 1282 1283 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES, 1284 "Intel i82572EI Gigabit Ethernet (SERDES)", 1285 WM_T_82572, WMP_F_SERDES }, 1286 1287 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI, 1288 "Intel i82572EI 1000baseT Ethernet", 1289 WM_T_82572, WMP_F_COPPER }, 1290 1291 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E, 1292 "Intel i82573E", 1293 WM_T_82573, WMP_F_COPPER }, 1294 1295 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT, 1296 "Intel i82573E IAMT", 1297 WM_T_82573, WMP_F_COPPER }, 1298 1299 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L, 1300 "Intel i82573L Gigabit Ethernet", 1301 WM_T_82573, WMP_F_COPPER }, 1302 1303 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L, 1304 "Intel i82574L", 1305 WM_T_82574, WMP_F_COPPER }, 1306 1307 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574LA, 1308 "Intel i82574L", 1309 WM_T_82574, WMP_F_COPPER }, 1310 1311 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V, 1312 "Intel i82583V", 1313 WM_T_82583, WMP_F_COPPER }, 1314 1315 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT, 1316 "i80003 dual 1000baseT Ethernet", 1317 WM_T_80003, WMP_F_COPPER }, 1318 1319 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT, 1320 "i80003 dual 1000baseX Ethernet", 1321 WM_T_80003, WMP_F_COPPER }, 1322 1323 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT, 1324 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)", 1325 WM_T_80003, WMP_F_SERDES }, 1326 1327 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT, 1328 "Intel i80003 1000baseT Ethernet", 1329 WM_T_80003, WMP_F_COPPER }, 1330 1331 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT, 1332 "Intel i80003 Gigabit Ethernet (SERDES)", 1333 WM_T_80003, WMP_F_SERDES }, 1334 1335 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT, 1336 "Intel i82801H (M_AMT) LAN Controller", 1337 WM_T_ICH8, WMP_F_COPPER }, 1338 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT, 1339 "Intel i82801H (AMT) LAN Controller", 1340 WM_T_ICH8, WMP_F_COPPER }, 1341 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN, 1342 "Intel i82801H LAN Controller", 1343 WM_T_ICH8, WMP_F_COPPER }, 1344 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN, 1345 "Intel i82801H (IFE) 10/100 LAN Controller", 1346 WM_T_ICH8, WMP_F_COPPER }, 1347 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN, 1348 "Intel i82801H (M) LAN Controller", 1349 WM_T_ICH8, WMP_F_COPPER }, 1350 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT, 1351 "Intel i82801H IFE (GT) 10/100 LAN Controller", 1352 WM_T_ICH8, WMP_F_COPPER }, 1353 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G, 1354 "Intel i82801H IFE (G) 10/100 LAN Controller", 1355 WM_T_ICH8, WMP_F_COPPER }, 1356 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_82567V_3, 1357 "82567V-3 LAN Controller", 1358 WM_T_ICH8, WMP_F_COPPER }, 1359 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT, 1360 "82801I (AMT) LAN Controller", 1361 WM_T_ICH9, WMP_F_COPPER }, 1362 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE, 1363 "82801I 10/100 LAN Controller", 1364 WM_T_ICH9, WMP_F_COPPER }, 1365 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G, 1366 "82801I (G) 10/100 LAN Controller", 1367 WM_T_ICH9, WMP_F_COPPER }, 1368 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT, 1369 "82801I (GT) 10/100 LAN Controller", 1370 WM_T_ICH9, WMP_F_COPPER }, 1371 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C, 1372 "82801I (C) LAN Controller", 1373 WM_T_ICH9, WMP_F_COPPER }, 1374 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M, 1375 "82801I mobile LAN Controller", 1376 WM_T_ICH9, WMP_F_COPPER }, 1377 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_V, 1378 "82801I mobile (V) LAN Controller", 1379 WM_T_ICH9, WMP_F_COPPER }, 1380 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT, 1381 "82801I mobile (AMT) LAN Controller", 1382 WM_T_ICH9, WMP_F_COPPER }, 1383 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM, 1384 "82567LM-4 LAN Controller", 1385 WM_T_ICH9, WMP_F_COPPER }, 1386 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM, 1387 "82567LM-2 LAN Controller", 1388 WM_T_ICH10, WMP_F_COPPER }, 1389 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF, 1390 "82567LF-2 LAN Controller", 1391 WM_T_ICH10, WMP_F_COPPER }, 1392 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM, 1393 "82567LM-3 LAN Controller", 1394 WM_T_ICH10, WMP_F_COPPER }, 1395 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF, 1396 "82567LF-3 LAN Controller", 1397 WM_T_ICH10, WMP_F_COPPER }, 1398 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V, 1399 "82567V-2 LAN Controller", 1400 WM_T_ICH10, WMP_F_COPPER }, 1401 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_V, 1402 "82567V-3? LAN Controller", 1403 WM_T_ICH10, WMP_F_COPPER }, 1404 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_HANKSVILLE, 1405 "HANKSVILLE LAN Controller", 1406 WM_T_ICH10, WMP_F_COPPER }, 1407 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM, 1408 "PCH LAN (82577LM) Controller", 1409 WM_T_PCH, WMP_F_COPPER }, 1410 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC, 1411 "PCH LAN (82577LC) Controller", 1412 WM_T_PCH, WMP_F_COPPER }, 1413 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM, 1414 "PCH LAN (82578DM) Controller", 1415 WM_T_PCH, WMP_F_COPPER }, 1416 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC, 1417 "PCH LAN (82578DC) Controller", 1418 WM_T_PCH, WMP_F_COPPER }, 1419 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_LM, 1420 "PCH2 LAN (82579LM) Controller", 1421 WM_T_PCH2, WMP_F_COPPER }, 1422 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH2_LV_V, 1423 "PCH2 LAN (82579V) Controller", 1424 WM_T_PCH2, WMP_F_COPPER }, 1425 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER, 1426 "82575EB dual-1000baseT Ethernet", 1427 WM_T_82575, WMP_F_COPPER }, 1428 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES, 1429 "82575EB dual-1000baseX Ethernet (SERDES)", 1430 WM_T_82575, WMP_F_SERDES }, 1431 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER, 1432 "82575GB quad-1000baseT Ethernet", 1433 WM_T_82575, WMP_F_COPPER }, 1434 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM, 1435 "82575GB quad-1000baseT Ethernet (PM)", 1436 WM_T_82575, WMP_F_COPPER }, 1437 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER, 1438 "82576 1000BaseT Ethernet", 1439 WM_T_82576, WMP_F_COPPER }, 1440 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER, 1441 "82576 1000BaseX Ethernet", 1442 WM_T_82576, WMP_F_FIBER }, 1443 1444 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES, 1445 "82576 gigabit Ethernet (SERDES)", 1446 WM_T_82576, WMP_F_SERDES }, 1447 1448 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER, 1449 "82576 quad-1000BaseT Ethernet", 1450 WM_T_82576, WMP_F_COPPER }, 1451 1452 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2, 1453 "82576 Gigabit ET2 Quad Port Server Adapter", 1454 WM_T_82576, WMP_F_COPPER }, 1455 1456 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS, 1457 "82576 gigabit Ethernet", 1458 WM_T_82576, WMP_F_COPPER }, 1459 1460 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES, 1461 "82576 gigabit Ethernet (SERDES)", 1462 WM_T_82576, WMP_F_SERDES }, 1463 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD, 1464 "82576 quad-gigabit Ethernet (SERDES)", 1465 WM_T_82576, WMP_F_SERDES }, 1466 1467 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER, 1468 "82580 1000BaseT Ethernet", 1469 WM_T_82580, WMP_F_COPPER }, 1470 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER, 1471 "82580 1000BaseX Ethernet", 1472 WM_T_82580, WMP_F_FIBER }, 1473 1474 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES, 1475 "82580 1000BaseT Ethernet (SERDES)", 1476 WM_T_82580, WMP_F_SERDES }, 1477 1478 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII, 1479 "82580 gigabit Ethernet (SGMII)", 1480 WM_T_82580, WMP_F_COPPER }, 1481 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL, 1482 "82580 dual-1000BaseT Ethernet", 1483 WM_T_82580, WMP_F_COPPER }, 1484 1485 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_QUAD_FIBER, 1486 "82580 quad-1000BaseX Ethernet", 1487 WM_T_82580, WMP_F_FIBER }, 1488 1489 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SGMII, 1490 "DH89XXCC Gigabit Ethernet (SGMII)", 1491 WM_T_82580, WMP_F_COPPER }, 1492 1493 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SERDES, 1494 "DH89XXCC Gigabit Ethernet (SERDES)", 1495 WM_T_82580, WMP_F_SERDES }, 1496 1497 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_BPLANE, 1498 "DH89XXCC 1000BASE-KX Ethernet", 1499 WM_T_82580, WMP_F_SERDES }, 1500 1501 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH89XXCC_SFP, 1502 "DH89XXCC Gigabit Ethernet (SFP)", 1503 WM_T_82580, WMP_F_SERDES }, 1504 1505 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_COPPER, 1506 "I350 Gigabit Network Connection", 1507 WM_T_I350, WMP_F_COPPER }, 1508 1509 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_FIBER, 1510 "I350 Gigabit Fiber Network Connection", 1511 WM_T_I350, WMP_F_FIBER }, 1512 1513 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SERDES, 1514 "I350 Gigabit Backplane Connection", 1515 WM_T_I350, WMP_F_SERDES }, 1516 1517 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_DA4, 1518 "I350 Quad Port Gigabit Ethernet", 1519 WM_T_I350, WMP_F_SERDES }, 1520 1521 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I350_SGMII, 1522 "I350 Gigabit Connection", 1523 WM_T_I350, WMP_F_COPPER }, 1524 1525 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_1000KX, 1526 "I354 Gigabit Ethernet (KX)", 1527 WM_T_I354, WMP_F_SERDES }, 1528 1529 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_SGMII, 1530 "I354 Gigabit Ethernet (SGMII)", 1531 WM_T_I354, WMP_F_COPPER }, 1532 1533 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_25GBE, 1534 "I354 Gigabit Ethernet (2.5G)", 1535 WM_T_I354, WMP_F_COPPER }, 1536 1537 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_T1, 1538 "I210-T1 Ethernet Server Adapter", 1539 WM_T_I210, WMP_F_COPPER }, 1540 1541 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_OEM1, 1542 "I210 Ethernet (Copper OEM)", 1543 WM_T_I210, WMP_F_COPPER }, 1544 1545 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_IT, 1546 "I210 Ethernet (Copper IT)", 1547 WM_T_I210, WMP_F_COPPER }, 1548 1549 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_COPPER_WOF, 1550 "I210 Ethernet (Copper, FLASH less)", 1551 WM_T_I210, WMP_F_COPPER }, 1552 1553 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_FIBER, 1554 "I210 Gigabit Ethernet (Fiber)", 1555 WM_T_I210, WMP_F_FIBER }, 1556 1557 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES, 1558 "I210 Gigabit Ethernet (SERDES)", 1559 WM_T_I210, WMP_F_SERDES }, 1560 1561 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SERDES_WOF, 1562 "I210 Gigabit Ethernet (SERDES, FLASH less)", 1563 WM_T_I210, WMP_F_SERDES }, 1564 1565 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII, 1566 "I210 Gigabit Ethernet (SGMII)", 1567 WM_T_I210, WMP_F_COPPER }, 1568 1569 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I210_SGMII_WOF, 1570 "I210 Gigabit Ethernet (SGMII, FLASH less)", 1571 WM_T_I210, WMP_F_COPPER }, 1572 1573 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I211_COPPER, 1574 "I211 Ethernet (COPPER)", 1575 WM_T_I211, WMP_F_COPPER }, 1576 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_V, 1577 "I217 V Ethernet Connection", 1578 WM_T_PCH_LPT, WMP_F_COPPER }, 1579 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I217_LM, 1580 "I217 LM Ethernet Connection", 1581 WM_T_PCH_LPT, WMP_F_COPPER }, 1582 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V, 1583 "I218 V Ethernet Connection", 1584 WM_T_PCH_LPT, WMP_F_COPPER }, 1585 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V2, 1586 "I218 V Ethernet Connection", 1587 WM_T_PCH_LPT, WMP_F_COPPER }, 1588 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_V3, 1589 "I218 V Ethernet Connection", 1590 WM_T_PCH_LPT, WMP_F_COPPER }, 1591 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM, 1592 "I218 LM Ethernet Connection", 1593 WM_T_PCH_LPT, WMP_F_COPPER }, 1594 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM2, 1595 "I218 LM Ethernet Connection", 1596 WM_T_PCH_LPT, WMP_F_COPPER }, 1597 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I218_LM3, 1598 "I218 LM Ethernet Connection", 1599 WM_T_PCH_LPT, WMP_F_COPPER }, 1600 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM, 1601 "I219 LM Ethernet Connection", 1602 WM_T_PCH_SPT, WMP_F_COPPER }, 1603 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM2, 1604 "I219 LM (2) Ethernet Connection", 1605 WM_T_PCH_SPT, WMP_F_COPPER }, 1606 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM3, 1607 "I219 LM (3) Ethernet Connection", 1608 WM_T_PCH_SPT, WMP_F_COPPER }, 1609 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM4, 1610 "I219 LM (4) Ethernet Connection", 1611 WM_T_PCH_SPT, WMP_F_COPPER }, 1612 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM5, 1613 "I219 LM (5) Ethernet Connection", 1614 WM_T_PCH_SPT, WMP_F_COPPER }, 1615 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM6, 1616 "I219 LM (6) Ethernet Connection", 1617 WM_T_PCH_CNP, WMP_F_COPPER }, 1618 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM7, 1619 "I219 LM (7) Ethernet Connection", 1620 WM_T_PCH_CNP, WMP_F_COPPER }, 1621 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM8, 1622 "I219 LM (8) Ethernet Connection", 1623 WM_T_PCH_CNP, WMP_F_COPPER }, 1624 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM9, 1625 "I219 LM (9) Ethernet Connection", 1626 WM_T_PCH_CNP, WMP_F_COPPER }, 1627 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM10, 1628 "I219 LM (10) Ethernet Connection", 1629 WM_T_PCH_CNP, WMP_F_COPPER }, 1630 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM11, 1631 "I219 LM (11) Ethernet Connection", 1632 WM_T_PCH_CNP, WMP_F_COPPER }, 1633 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM12, 1634 "I219 LM (12) Ethernet Connection", 1635 WM_T_PCH_SPT, WMP_F_COPPER }, 1636 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM13, 1637 "I219 LM (13) Ethernet Connection", 1638 WM_T_PCH_CNP, WMP_F_COPPER }, 1639 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM14, 1640 "I219 LM (14) Ethernet Connection", 1641 WM_T_PCH_CNP, WMP_F_COPPER }, 1642 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM15, 1643 "I219 LM (15) Ethernet Connection", 1644 WM_T_PCH_CNP, WMP_F_COPPER }, 1645 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM16, 1646 "I219 LM (16) Ethernet Connection", 1647 WM_T_PCH_CNP, WMP_F_COPPER }, 1648 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM17, 1649 "I219 LM (17) Ethernet Connection", 1650 WM_T_PCH_CNP, WMP_F_COPPER }, 1651 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM18, 1652 "I219 LM (18) Ethernet Connection", 1653 WM_T_PCH_CNP, WMP_F_COPPER }, 1654 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_LM19, 1655 "I219 LM (19) Ethernet Connection", 1656 WM_T_PCH_CNP, WMP_F_COPPER }, 1657 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V, 1658 "I219 V Ethernet Connection", 1659 WM_T_PCH_SPT, WMP_F_COPPER }, 1660 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V2, 1661 "I219 V (2) Ethernet Connection", 1662 WM_T_PCH_SPT, WMP_F_COPPER }, 1663 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V4, 1664 "I219 V (4) Ethernet Connection", 1665 WM_T_PCH_SPT, WMP_F_COPPER }, 1666 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V5, 1667 "I219 V (5) Ethernet Connection", 1668 WM_T_PCH_SPT, WMP_F_COPPER }, 1669 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V6, 1670 "I219 V (6) Ethernet Connection", 1671 WM_T_PCH_CNP, WMP_F_COPPER }, 1672 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V7, 1673 "I219 V (7) Ethernet Connection", 1674 WM_T_PCH_CNP, WMP_F_COPPER }, 1675 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V8, 1676 "I219 V (8) Ethernet Connection", 1677 WM_T_PCH_CNP, WMP_F_COPPER }, 1678 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V9, 1679 "I219 V (9) Ethernet Connection", 1680 WM_T_PCH_CNP, WMP_F_COPPER }, 1681 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V10, 1682 "I219 V (10) Ethernet Connection", 1683 WM_T_PCH_CNP, WMP_F_COPPER }, 1684 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V11, 1685 "I219 V (11) Ethernet Connection", 1686 WM_T_PCH_CNP, WMP_F_COPPER }, 1687 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V12, 1688 "I219 V (12) Ethernet Connection", 1689 WM_T_PCH_SPT, WMP_F_COPPER }, 1690 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V13, 1691 "I219 V (13) Ethernet Connection", 1692 WM_T_PCH_CNP, WMP_F_COPPER }, 1693 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V14, 1694 "I219 V (14) Ethernet Connection", 1695 WM_T_PCH_CNP, WMP_F_COPPER }, 1696 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V15, 1697 "I219 V (15) Ethernet Connection", 1698 WM_T_PCH_CNP, WMP_F_COPPER }, 1699 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V16, 1700 "I219 V (16) Ethernet Connection", 1701 WM_T_PCH_CNP, WMP_F_COPPER }, 1702 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V17, 1703 "I219 V (17) Ethernet Connection", 1704 WM_T_PCH_CNP, WMP_F_COPPER }, 1705 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V18, 1706 "I219 V (18) Ethernet Connection", 1707 WM_T_PCH_CNP, WMP_F_COPPER }, 1708 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I219_V19, 1709 "I219 V (19) Ethernet Connection", 1710 WM_T_PCH_CNP, WMP_F_COPPER }, 1711 { 0, 0, 1712 NULL, 1713 0, 0 }, 1714 }; 1715 1716 /* 1717 * Register read/write functions. 1718 * Other than CSR_{READ|WRITE}(). 1719 */ 1720 1721 #if 0 /* Not currently used */ 1722 static inline uint32_t 1723 wm_io_read(struct wm_softc *sc, int reg) 1724 { 1725 1726 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 1727 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4)); 1728 } 1729 #endif 1730 1731 static inline void 1732 wm_io_write(struct wm_softc *sc, int reg, uint32_t val) 1733 { 1734 1735 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg); 1736 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val); 1737 } 1738 1739 static inline void 1740 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off, 1741 uint32_t data) 1742 { 1743 uint32_t regval; 1744 int i; 1745 1746 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT); 1747 1748 CSR_WRITE(sc, reg, regval); 1749 1750 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) { 1751 delay(5); 1752 if (CSR_READ(sc, reg) & SCTL_CTL_READY) 1753 break; 1754 } 1755 if (i == SCTL_CTL_POLL_TIMEOUT) { 1756 aprint_error("%s: WARNING:" 1757 " i82575 reg 0x%08x setup did not indicate ready\n", 1758 device_xname(sc->sc_dev), reg); 1759 } 1760 } 1761 1762 static inline void 1763 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v) 1764 { 1765 wa->wa_low = htole32(BUS_ADDR_LO32(v)); 1766 wa->wa_high = htole32(BUS_ADDR_HI32(v)); 1767 } 1768 1769 /* 1770 * Descriptor sync/init functions. 1771 */ 1772 static inline void 1773 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops) 1774 { 1775 struct wm_softc *sc = txq->txq_sc; 1776 1777 /* If it will wrap around, sync to the end of the ring. */ 1778 if ((start + num) > WM_NTXDESC(txq)) { 1779 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap, 1780 WM_CDTXOFF(txq, start), txq->txq_descsize * 1781 (WM_NTXDESC(txq) - start), ops); 1782 num -= (WM_NTXDESC(txq) - start); 1783 start = 0; 1784 } 1785 1786 /* Now sync whatever is left. */ 1787 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap, 1788 WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops); 1789 } 1790 1791 static inline void 1792 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops) 1793 { 1794 struct wm_softc *sc = rxq->rxq_sc; 1795 1796 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap, 1797 WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops); 1798 } 1799 1800 static inline void 1801 wm_init_rxdesc(struct wm_rxqueue *rxq, int start) 1802 { 1803 struct wm_softc *sc = rxq->rxq_sc; 1804 struct wm_rxsoft *rxs = &rxq->rxq_soft[start]; 1805 struct mbuf *m = rxs->rxs_mbuf; 1806 1807 /* 1808 * Note: We scoot the packet forward 2 bytes in the buffer 1809 * so that the payload after the Ethernet header is aligned 1810 * to a 4-byte boundary. 1811 1812 * XXX BRAINDAMAGE ALERT! 1813 * The stupid chip uses the same size for every buffer, which 1814 * is set in the Receive Control register. We are using the 2K 1815 * size option, but what we REALLY want is (2K - 2)! For this 1816 * reason, we can't "scoot" packets longer than the standard 1817 * Ethernet MTU. On strict-alignment platforms, if the total 1818 * size exceeds (2K - 2) we set align_tweak to 0 and let 1819 * the upper layer copy the headers. 1820 */ 1821 m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak; 1822 1823 if (sc->sc_type == WM_T_82574) { 1824 ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start]; 1825 rxd->erx_data.erxd_addr = 1826 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak); 1827 rxd->erx_data.erxd_dd = 0; 1828 } else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 1829 nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start]; 1830 1831 rxd->nqrx_data.nrxd_paddr = 1832 htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak); 1833 /* Currently, split header is not supported. */ 1834 rxd->nqrx_data.nrxd_haddr = 0; 1835 } else { 1836 wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start]; 1837 1838 wm_set_dma_addr(&rxd->wrx_addr, 1839 rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak); 1840 rxd->wrx_len = 0; 1841 rxd->wrx_cksum = 0; 1842 rxd->wrx_status = 0; 1843 rxd->wrx_errors = 0; 1844 rxd->wrx_special = 0; 1845 } 1846 wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1847 1848 CSR_WRITE(sc, rxq->rxq_rdt_reg, start); 1849 } 1850 1851 /* 1852 * Device driver interface functions and commonly used functions. 1853 * match, attach, detach, init, start, stop, ioctl, watchdog and so on. 1854 */ 1855 1856 /* Lookup supported device table */ 1857 static const struct wm_product * 1858 wm_lookup(const struct pci_attach_args *pa) 1859 { 1860 const struct wm_product *wmp; 1861 1862 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) { 1863 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor && 1864 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product) 1865 return wmp; 1866 } 1867 return NULL; 1868 } 1869 1870 /* The match function (ca_match) */ 1871 static int 1872 wm_match(device_t parent, cfdata_t cf, void *aux) 1873 { 1874 struct pci_attach_args *pa = aux; 1875 1876 if (wm_lookup(pa) != NULL) 1877 return 1; 1878 1879 return 0; 1880 } 1881 1882 /* The attach function (ca_attach) */ 1883 static void 1884 wm_attach(device_t parent, device_t self, void *aux) 1885 { 1886 struct wm_softc *sc = device_private(self); 1887 struct pci_attach_args *pa = aux; 1888 prop_dictionary_t dict; 1889 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1890 pci_chipset_tag_t pc = pa->pa_pc; 1891 int counts[PCI_INTR_TYPE_SIZE]; 1892 pci_intr_type_t max_type; 1893 const char *eetype, *xname; 1894 bus_space_tag_t memt; 1895 bus_space_handle_t memh; 1896 bus_size_t memsize; 1897 int memh_valid; 1898 int i, error; 1899 const struct wm_product *wmp; 1900 prop_data_t ea; 1901 prop_number_t pn; 1902 uint8_t enaddr[ETHER_ADDR_LEN]; 1903 char buf[256]; 1904 char wqname[MAXCOMLEN]; 1905 uint16_t cfg1, cfg2, swdpin, nvmword; 1906 pcireg_t preg, memtype; 1907 uint16_t eeprom_data, apme_mask; 1908 bool force_clear_smbi; 1909 uint32_t link_mode; 1910 uint32_t reg; 1911 1912 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT) 1913 sc->sc_debug = WM_DEBUG_DEFAULT; 1914 #endif 1915 sc->sc_dev = self; 1916 callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS); 1917 callout_setfunc(&sc->sc_tick_ch, wm_tick, sc); 1918 sc->sc_core_stopping = false; 1919 1920 wmp = wm_lookup(pa); 1921 #ifdef DIAGNOSTIC 1922 if (wmp == NULL) { 1923 printf("\n"); 1924 panic("wm_attach: impossible"); 1925 } 1926 #endif 1927 sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags); 1928 1929 sc->sc_pc = pa->pa_pc; 1930 sc->sc_pcitag = pa->pa_tag; 1931 1932 if (pci_dma64_available(pa)) { 1933 aprint_verbose(", 64-bit DMA"); 1934 sc->sc_dmat = pa->pa_dmat64; 1935 } else { 1936 aprint_verbose(", 32-bit DMA"); 1937 sc->sc_dmat = pa->pa_dmat; 1938 } 1939 1940 sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id); 1941 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG)); 1942 pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1); 1943 1944 sc->sc_type = wmp->wmp_type; 1945 1946 /* Set default function pointers */ 1947 sc->phy.acquire = sc->nvm.acquire = wm_get_null; 1948 sc->phy.release = sc->nvm.release = wm_put_null; 1949 sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000; 1950 1951 if (sc->sc_type < WM_T_82543) { 1952 if (sc->sc_rev < 2) { 1953 aprint_error_dev(sc->sc_dev, 1954 "i82542 must be at least rev. 2\n"); 1955 return; 1956 } 1957 if (sc->sc_rev < 3) 1958 sc->sc_type = WM_T_82542_2_0; 1959 } 1960 1961 /* 1962 * Disable MSI for Errata: 1963 * "Message Signaled Interrupt Feature May Corrupt Write Transactions" 1964 * 1965 * 82544: Errata 25 1966 * 82540: Errata 6 (easy to reproduce device timeout) 1967 * 82545: Errata 4 (easy to reproduce device timeout) 1968 * 82546: Errata 26 (easy to reproduce device timeout) 1969 * 82541: Errata 7 (easy to reproduce device timeout) 1970 * 1971 * "Byte Enables 2 and 3 are not set on MSI writes" 1972 * 1973 * 82571 & 82572: Errata 63 1974 */ 1975 if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571) 1976 || (sc->sc_type == WM_T_82572)) 1977 pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY; 1978 1979 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 1980 || (sc->sc_type == WM_T_82580) 1981 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354) 1982 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) 1983 sc->sc_flags |= WM_F_NEWQUEUE; 1984 1985 /* Set device properties (mactype) */ 1986 dict = device_properties(sc->sc_dev); 1987 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type); 1988 1989 /* 1990 * Map the device. All devices support memory-mapped acccess, 1991 * and it is really required for normal operation. 1992 */ 1993 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA); 1994 switch (memtype) { 1995 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 1996 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 1997 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA, 1998 memtype, 0, &memt, &memh, NULL, &memsize) == 0); 1999 break; 2000 default: 2001 memh_valid = 0; 2002 break; 2003 } 2004 2005 if (memh_valid) { 2006 sc->sc_st = memt; 2007 sc->sc_sh = memh; 2008 sc->sc_ss = memsize; 2009 } else { 2010 aprint_error_dev(sc->sc_dev, 2011 "unable to map device registers\n"); 2012 return; 2013 } 2014 2015 /* 2016 * In addition, i82544 and later support I/O mapped indirect 2017 * register access. It is not desirable (nor supported in 2018 * this driver) to use it for normal operation, though it is 2019 * required to work around bugs in some chip versions. 2020 */ 2021 switch (sc->sc_type) { 2022 case WM_T_82544: 2023 case WM_T_82541: 2024 case WM_T_82541_2: 2025 case WM_T_82547: 2026 case WM_T_82547_2: 2027 /* First we have to find the I/O BAR. */ 2028 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) { 2029 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i); 2030 if (memtype == PCI_MAPREG_TYPE_IO) 2031 break; 2032 if (PCI_MAPREG_MEM_TYPE(memtype) == 2033 PCI_MAPREG_MEM_TYPE_64BIT) 2034 i += 4; /* skip high bits, too */ 2035 } 2036 if (i < PCI_MAPREG_END) { 2037 /* 2038 * We found PCI_MAPREG_TYPE_IO. Note that 82580 2039 * (and newer?) chip has no PCI_MAPREG_TYPE_IO. 2040 * It's no problem because newer chips has no this 2041 * bug. 2042 * 2043 * The i8254x doesn't apparently respond when the 2044 * I/O BAR is 0, which looks somewhat like it's not 2045 * been configured. 2046 */ 2047 preg = pci_conf_read(pc, pa->pa_tag, i); 2048 if (PCI_MAPREG_MEM_ADDR(preg) == 0) { 2049 aprint_error_dev(sc->sc_dev, 2050 "WARNING: I/O BAR at zero.\n"); 2051 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO, 2052 0, &sc->sc_iot, &sc->sc_ioh, 2053 NULL, &sc->sc_ios) == 0) { 2054 sc->sc_flags |= WM_F_IOH_VALID; 2055 } else 2056 aprint_error_dev(sc->sc_dev, 2057 "WARNING: unable to map I/O space\n"); 2058 } 2059 break; 2060 default: 2061 break; 2062 } 2063 2064 /* Enable bus mastering. Disable MWI on the i82542 2.0. */ 2065 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 2066 preg |= PCI_COMMAND_MASTER_ENABLE; 2067 if (sc->sc_type < WM_T_82542_2_1) 2068 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE; 2069 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg); 2070 2071 /* Power up chip */ 2072 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL)) 2073 && error != EOPNOTSUPP) { 2074 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error); 2075 return; 2076 } 2077 2078 wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag)); 2079 /* 2080 * Don't use MSI-X if we can use only one queue to save interrupt 2081 * resource. 2082 */ 2083 if (sc->sc_nqueues > 1) { 2084 max_type = PCI_INTR_TYPE_MSIX; 2085 /* 2086 * 82583 has a MSI-X capability in the PCI configuration space 2087 * but it doesn't support it. At least the document doesn't 2088 * say anything about MSI-X. 2089 */ 2090 counts[PCI_INTR_TYPE_MSIX] 2091 = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1; 2092 } else { 2093 max_type = PCI_INTR_TYPE_MSI; 2094 counts[PCI_INTR_TYPE_MSIX] = 0; 2095 } 2096 2097 /* Allocation settings */ 2098 counts[PCI_INTR_TYPE_MSI] = 1; 2099 counts[PCI_INTR_TYPE_INTX] = 1; 2100 /* overridden by disable flags */ 2101 if (wm_disable_msi != 0) { 2102 counts[PCI_INTR_TYPE_MSI] = 0; 2103 if (wm_disable_msix != 0) { 2104 max_type = PCI_INTR_TYPE_INTX; 2105 counts[PCI_INTR_TYPE_MSIX] = 0; 2106 } 2107 } else if (wm_disable_msix != 0) { 2108 max_type = PCI_INTR_TYPE_MSI; 2109 counts[PCI_INTR_TYPE_MSIX] = 0; 2110 } 2111 2112 alloc_retry: 2113 if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) { 2114 aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n"); 2115 return; 2116 } 2117 2118 if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) { 2119 error = wm_setup_msix(sc); 2120 if (error) { 2121 pci_intr_release(pc, sc->sc_intrs, 2122 counts[PCI_INTR_TYPE_MSIX]); 2123 2124 /* Setup for MSI: Disable MSI-X */ 2125 max_type = PCI_INTR_TYPE_MSI; 2126 counts[PCI_INTR_TYPE_MSI] = 1; 2127 counts[PCI_INTR_TYPE_INTX] = 1; 2128 goto alloc_retry; 2129 } 2130 } else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) { 2131 wm_adjust_qnum(sc, 0); /* Must not use multiqueue */ 2132 error = wm_setup_legacy(sc); 2133 if (error) { 2134 pci_intr_release(sc->sc_pc, sc->sc_intrs, 2135 counts[PCI_INTR_TYPE_MSI]); 2136 2137 /* The next try is for INTx: Disable MSI */ 2138 max_type = PCI_INTR_TYPE_INTX; 2139 counts[PCI_INTR_TYPE_INTX] = 1; 2140 goto alloc_retry; 2141 } 2142 } else { 2143 wm_adjust_qnum(sc, 0); /* Must not use multiqueue */ 2144 error = wm_setup_legacy(sc); 2145 if (error) { 2146 pci_intr_release(sc->sc_pc, sc->sc_intrs, 2147 counts[PCI_INTR_TYPE_INTX]); 2148 return; 2149 } 2150 } 2151 2152 snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev)); 2153 error = workqueue_create(&sc->sc_queue_wq, wqname, 2154 wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET, 2155 WM_WORKQUEUE_FLAGS); 2156 if (error) { 2157 aprint_error_dev(sc->sc_dev, 2158 "unable to create workqueue\n"); 2159 goto out; 2160 } 2161 2162 /* 2163 * Check the function ID (unit number of the chip). 2164 */ 2165 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3) 2166 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003) 2167 || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 2168 || (sc->sc_type == WM_T_82580) 2169 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) 2170 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS) 2171 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK; 2172 else 2173 sc->sc_funcid = 0; 2174 2175 /* 2176 * Determine a few things about the bus we're connected to. 2177 */ 2178 if (sc->sc_type < WM_T_82543) { 2179 /* We don't really know the bus characteristics here. */ 2180 sc->sc_bus_speed = 33; 2181 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) { 2182 /* 2183 * CSA (Communication Streaming Architecture) is about as fast 2184 * a 32-bit 66MHz PCI Bus. 2185 */ 2186 sc->sc_flags |= WM_F_CSA; 2187 sc->sc_bus_speed = 66; 2188 aprint_verbose_dev(sc->sc_dev, 2189 "Communication Streaming Architecture\n"); 2190 if (sc->sc_type == WM_T_82547) { 2191 callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS); 2192 callout_setfunc(&sc->sc_txfifo_ch, 2193 wm_82547_txfifo_stall, sc); 2194 aprint_verbose_dev(sc->sc_dev, 2195 "using 82547 Tx FIFO stall work-around\n"); 2196 } 2197 } else if (sc->sc_type >= WM_T_82571) { 2198 sc->sc_flags |= WM_F_PCIE; 2199 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9) 2200 && (sc->sc_type != WM_T_ICH10) 2201 && (sc->sc_type != WM_T_PCH) 2202 && (sc->sc_type != WM_T_PCH2) 2203 && (sc->sc_type != WM_T_PCH_LPT) 2204 && (sc->sc_type != WM_T_PCH_SPT) 2205 && (sc->sc_type != WM_T_PCH_CNP)) { 2206 /* ICH* and PCH* have no PCIe capability registers */ 2207 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 2208 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff, 2209 NULL) == 0) 2210 aprint_error_dev(sc->sc_dev, 2211 "unable to find PCIe capability\n"); 2212 } 2213 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n"); 2214 } else { 2215 reg = CSR_READ(sc, WMREG_STATUS); 2216 if (reg & STATUS_BUS64) 2217 sc->sc_flags |= WM_F_BUS64; 2218 if ((reg & STATUS_PCIX_MODE) != 0) { 2219 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb; 2220 2221 sc->sc_flags |= WM_F_PCIX; 2222 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 2223 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0) 2224 aprint_error_dev(sc->sc_dev, 2225 "unable to find PCIX capability\n"); 2226 else if (sc->sc_type != WM_T_82545_3 && 2227 sc->sc_type != WM_T_82546_3) { 2228 /* 2229 * Work around a problem caused by the BIOS 2230 * setting the max memory read byte count 2231 * incorrectly. 2232 */ 2233 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, 2234 sc->sc_pcixe_capoff + PCIX_CMD); 2235 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag, 2236 sc->sc_pcixe_capoff + PCIX_STATUS); 2237 2238 bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >> 2239 PCIX_CMD_BYTECNT_SHIFT; 2240 maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >> 2241 PCIX_STATUS_MAXB_SHIFT; 2242 if (bytecnt > maxb) { 2243 aprint_verbose_dev(sc->sc_dev, 2244 "resetting PCI-X MMRBC: %d -> %d\n", 2245 512 << bytecnt, 512 << maxb); 2246 pcix_cmd = (pcix_cmd & 2247 ~PCIX_CMD_BYTECNT_MASK) | 2248 (maxb << PCIX_CMD_BYTECNT_SHIFT); 2249 pci_conf_write(pa->pa_pc, pa->pa_tag, 2250 sc->sc_pcixe_capoff + PCIX_CMD, 2251 pcix_cmd); 2252 } 2253 } 2254 } 2255 /* 2256 * The quad port adapter is special; it has a PCIX-PCIX 2257 * bridge on the board, and can run the secondary bus at 2258 * a higher speed. 2259 */ 2260 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) { 2261 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120 2262 : 66; 2263 } else if (sc->sc_flags & WM_F_PCIX) { 2264 switch (reg & STATUS_PCIXSPD_MASK) { 2265 case STATUS_PCIXSPD_50_66: 2266 sc->sc_bus_speed = 66; 2267 break; 2268 case STATUS_PCIXSPD_66_100: 2269 sc->sc_bus_speed = 100; 2270 break; 2271 case STATUS_PCIXSPD_100_133: 2272 sc->sc_bus_speed = 133; 2273 break; 2274 default: 2275 aprint_error_dev(sc->sc_dev, 2276 "unknown PCIXSPD %d; assuming 66MHz\n", 2277 reg & STATUS_PCIXSPD_MASK); 2278 sc->sc_bus_speed = 66; 2279 break; 2280 } 2281 } else 2282 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33; 2283 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n", 2284 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed, 2285 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI"); 2286 } 2287 2288 /* clear interesting stat counters */ 2289 CSR_READ(sc, WMREG_COLC); 2290 CSR_READ(sc, WMREG_RXERRC); 2291 2292 if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583) 2293 || (sc->sc_type >= WM_T_ICH8)) 2294 sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 2295 if (sc->sc_type >= WM_T_ICH8) 2296 sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 2297 2298 /* Set PHY, NVM mutex related stuff */ 2299 switch (sc->sc_type) { 2300 case WM_T_82542_2_0: 2301 case WM_T_82542_2_1: 2302 case WM_T_82543: 2303 case WM_T_82544: 2304 /* Microwire */ 2305 sc->nvm.read = wm_nvm_read_uwire; 2306 sc->sc_nvm_wordsize = 64; 2307 sc->sc_nvm_addrbits = 6; 2308 break; 2309 case WM_T_82540: 2310 case WM_T_82545: 2311 case WM_T_82545_3: 2312 case WM_T_82546: 2313 case WM_T_82546_3: 2314 /* Microwire */ 2315 sc->nvm.read = wm_nvm_read_uwire; 2316 reg = CSR_READ(sc, WMREG_EECD); 2317 if (reg & EECD_EE_SIZE) { 2318 sc->sc_nvm_wordsize = 256; 2319 sc->sc_nvm_addrbits = 8; 2320 } else { 2321 sc->sc_nvm_wordsize = 64; 2322 sc->sc_nvm_addrbits = 6; 2323 } 2324 sc->sc_flags |= WM_F_LOCK_EECD; 2325 sc->nvm.acquire = wm_get_eecd; 2326 sc->nvm.release = wm_put_eecd; 2327 break; 2328 case WM_T_82541: 2329 case WM_T_82541_2: 2330 case WM_T_82547: 2331 case WM_T_82547_2: 2332 reg = CSR_READ(sc, WMREG_EECD); 2333 /* 2334 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only 2335 * on 8254[17], so set flags and functios before calling it. 2336 */ 2337 sc->sc_flags |= WM_F_LOCK_EECD; 2338 sc->nvm.acquire = wm_get_eecd; 2339 sc->nvm.release = wm_put_eecd; 2340 if (reg & EECD_EE_TYPE) { 2341 /* SPI */ 2342 sc->nvm.read = wm_nvm_read_spi; 2343 sc->sc_flags |= WM_F_EEPROM_SPI; 2344 wm_nvm_set_addrbits_size_eecd(sc); 2345 } else { 2346 /* Microwire */ 2347 sc->nvm.read = wm_nvm_read_uwire; 2348 if ((reg & EECD_EE_ABITS) != 0) { 2349 sc->sc_nvm_wordsize = 256; 2350 sc->sc_nvm_addrbits = 8; 2351 } else { 2352 sc->sc_nvm_wordsize = 64; 2353 sc->sc_nvm_addrbits = 6; 2354 } 2355 } 2356 break; 2357 case WM_T_82571: 2358 case WM_T_82572: 2359 /* SPI */ 2360 sc->nvm.read = wm_nvm_read_eerd; 2361 /* Not use WM_F_LOCK_EECD because we use EERD */ 2362 sc->sc_flags |= WM_F_EEPROM_SPI; 2363 wm_nvm_set_addrbits_size_eecd(sc); 2364 sc->phy.acquire = wm_get_swsm_semaphore; 2365 sc->phy.release = wm_put_swsm_semaphore; 2366 sc->nvm.acquire = wm_get_nvm_82571; 2367 sc->nvm.release = wm_put_nvm_82571; 2368 break; 2369 case WM_T_82573: 2370 case WM_T_82574: 2371 case WM_T_82583: 2372 sc->nvm.read = wm_nvm_read_eerd; 2373 /* Not use WM_F_LOCK_EECD because we use EERD */ 2374 if (sc->sc_type == WM_T_82573) { 2375 sc->phy.acquire = wm_get_swsm_semaphore; 2376 sc->phy.release = wm_put_swsm_semaphore; 2377 sc->nvm.acquire = wm_get_nvm_82571; 2378 sc->nvm.release = wm_put_nvm_82571; 2379 } else { 2380 /* Both PHY and NVM use the same semaphore. */ 2381 sc->phy.acquire = sc->nvm.acquire 2382 = wm_get_swfwhw_semaphore; 2383 sc->phy.release = sc->nvm.release 2384 = wm_put_swfwhw_semaphore; 2385 } 2386 if (wm_nvm_is_onboard_eeprom(sc) == 0) { 2387 sc->sc_flags |= WM_F_EEPROM_FLASH; 2388 sc->sc_nvm_wordsize = 2048; 2389 } else { 2390 /* SPI */ 2391 sc->sc_flags |= WM_F_EEPROM_SPI; 2392 wm_nvm_set_addrbits_size_eecd(sc); 2393 } 2394 break; 2395 case WM_T_82575: 2396 case WM_T_82576: 2397 case WM_T_82580: 2398 case WM_T_I350: 2399 case WM_T_I354: 2400 case WM_T_80003: 2401 /* SPI */ 2402 sc->sc_flags |= WM_F_EEPROM_SPI; 2403 wm_nvm_set_addrbits_size_eecd(sc); 2404 if ((sc->sc_type == WM_T_80003) 2405 || (sc->sc_nvm_wordsize < (1 << 15))) { 2406 sc->nvm.read = wm_nvm_read_eerd; 2407 /* Don't use WM_F_LOCK_EECD because we use EERD */ 2408 } else { 2409 sc->nvm.read = wm_nvm_read_spi; 2410 sc->sc_flags |= WM_F_LOCK_EECD; 2411 } 2412 sc->phy.acquire = wm_get_phy_82575; 2413 sc->phy.release = wm_put_phy_82575; 2414 sc->nvm.acquire = wm_get_nvm_80003; 2415 sc->nvm.release = wm_put_nvm_80003; 2416 break; 2417 case WM_T_ICH8: 2418 case WM_T_ICH9: 2419 case WM_T_ICH10: 2420 case WM_T_PCH: 2421 case WM_T_PCH2: 2422 case WM_T_PCH_LPT: 2423 sc->nvm.read = wm_nvm_read_ich8; 2424 /* FLASH */ 2425 sc->sc_flags |= WM_F_EEPROM_FLASH; 2426 sc->sc_nvm_wordsize = 2048; 2427 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH); 2428 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0, 2429 &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) { 2430 aprint_error_dev(sc->sc_dev, 2431 "can't map FLASH registers\n"); 2432 goto out; 2433 } 2434 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG); 2435 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) * 2436 ICH_FLASH_SECTOR_SIZE; 2437 sc->sc_ich8_flash_bank_size = 2438 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1; 2439 sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK); 2440 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE; 2441 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t); 2442 sc->sc_flashreg_offset = 0; 2443 sc->phy.acquire = wm_get_swflag_ich8lan; 2444 sc->phy.release = wm_put_swflag_ich8lan; 2445 sc->nvm.acquire = wm_get_nvm_ich8lan; 2446 sc->nvm.release = wm_put_nvm_ich8lan; 2447 break; 2448 case WM_T_PCH_SPT: 2449 case WM_T_PCH_CNP: 2450 sc->nvm.read = wm_nvm_read_spt; 2451 /* SPT has no GFPREG; flash registers mapped through BAR0 */ 2452 sc->sc_flags |= WM_F_EEPROM_FLASH; 2453 sc->sc_flasht = sc->sc_st; 2454 sc->sc_flashh = sc->sc_sh; 2455 sc->sc_ich8_flash_base = 0; 2456 sc->sc_nvm_wordsize = 2457 (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1) 2458 * NVM_SIZE_MULTIPLIER; 2459 /* It is size in bytes, we want words */ 2460 sc->sc_nvm_wordsize /= 2; 2461 /* Assume 2 banks */ 2462 sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2; 2463 sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET; 2464 sc->phy.acquire = wm_get_swflag_ich8lan; 2465 sc->phy.release = wm_put_swflag_ich8lan; 2466 sc->nvm.acquire = wm_get_nvm_ich8lan; 2467 sc->nvm.release = wm_put_nvm_ich8lan; 2468 break; 2469 case WM_T_I210: 2470 case WM_T_I211: 2471 /* Allow a single clear of the SW semaphore on I210 and newer*/ 2472 sc->sc_flags |= WM_F_WA_I210_CLSEM; 2473 if (wm_nvm_flash_presence_i210(sc)) { 2474 sc->nvm.read = wm_nvm_read_eerd; 2475 /* Don't use WM_F_LOCK_EECD because we use EERD */ 2476 sc->sc_flags |= WM_F_EEPROM_FLASH_HW; 2477 wm_nvm_set_addrbits_size_eecd(sc); 2478 } else { 2479 sc->nvm.read = wm_nvm_read_invm; 2480 sc->sc_flags |= WM_F_EEPROM_INVM; 2481 sc->sc_nvm_wordsize = INVM_SIZE; 2482 } 2483 sc->phy.acquire = wm_get_phy_82575; 2484 sc->phy.release = wm_put_phy_82575; 2485 sc->nvm.acquire = wm_get_nvm_80003; 2486 sc->nvm.release = wm_put_nvm_80003; 2487 break; 2488 default: 2489 break; 2490 } 2491 2492 /* Ensure the SMBI bit is clear before first NVM or PHY access */ 2493 switch (sc->sc_type) { 2494 case WM_T_82571: 2495 case WM_T_82572: 2496 reg = CSR_READ(sc, WMREG_SWSM2); 2497 if ((reg & SWSM2_LOCK) == 0) { 2498 CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK); 2499 force_clear_smbi = true; 2500 } else 2501 force_clear_smbi = false; 2502 break; 2503 case WM_T_82573: 2504 case WM_T_82574: 2505 case WM_T_82583: 2506 force_clear_smbi = true; 2507 break; 2508 default: 2509 force_clear_smbi = false; 2510 break; 2511 } 2512 if (force_clear_smbi) { 2513 reg = CSR_READ(sc, WMREG_SWSM); 2514 if ((reg & SWSM_SMBI) != 0) 2515 aprint_error_dev(sc->sc_dev, 2516 "Please update the Bootagent\n"); 2517 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI); 2518 } 2519 2520 /* 2521 * Defer printing the EEPROM type until after verifying the checksum 2522 * This allows the EEPROM type to be printed correctly in the case 2523 * that no EEPROM is attached. 2524 */ 2525 /* 2526 * Validate the EEPROM checksum. If the checksum fails, flag 2527 * this for later, so we can fail future reads from the EEPROM. 2528 */ 2529 if (wm_nvm_validate_checksum(sc)) { 2530 /* 2531 * Read twice again because some PCI-e parts fail the 2532 * first check due to the link being in sleep state. 2533 */ 2534 if (wm_nvm_validate_checksum(sc)) 2535 sc->sc_flags |= WM_F_EEPROM_INVALID; 2536 } 2537 2538 if (sc->sc_flags & WM_F_EEPROM_INVALID) 2539 aprint_verbose_dev(sc->sc_dev, "No EEPROM"); 2540 else { 2541 aprint_verbose_dev(sc->sc_dev, "%u words ", 2542 sc->sc_nvm_wordsize); 2543 if (sc->sc_flags & WM_F_EEPROM_INVM) 2544 aprint_verbose("iNVM"); 2545 else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) 2546 aprint_verbose("FLASH(HW)"); 2547 else if (sc->sc_flags & WM_F_EEPROM_FLASH) 2548 aprint_verbose("FLASH"); 2549 else { 2550 if (sc->sc_flags & WM_F_EEPROM_SPI) 2551 eetype = "SPI"; 2552 else 2553 eetype = "MicroWire"; 2554 aprint_verbose("(%d address bits) %s EEPROM", 2555 sc->sc_nvm_addrbits, eetype); 2556 } 2557 } 2558 wm_nvm_version(sc); 2559 aprint_verbose("\n"); 2560 2561 /* 2562 * XXX The first call of wm_gmii_setup_phytype. The result might be 2563 * incorrect. 2564 */ 2565 wm_gmii_setup_phytype(sc, 0, 0); 2566 2567 /* Check for WM_F_WOL on some chips before wm_reset() */ 2568 switch (sc->sc_type) { 2569 case WM_T_ICH8: 2570 case WM_T_ICH9: 2571 case WM_T_ICH10: 2572 case WM_T_PCH: 2573 case WM_T_PCH2: 2574 case WM_T_PCH_LPT: 2575 case WM_T_PCH_SPT: 2576 case WM_T_PCH_CNP: 2577 apme_mask = WUC_APME; 2578 eeprom_data = CSR_READ(sc, WMREG_WUC); 2579 if ((eeprom_data & apme_mask) != 0) 2580 sc->sc_flags |= WM_F_WOL; 2581 break; 2582 default: 2583 break; 2584 } 2585 2586 /* Reset the chip to a known state. */ 2587 wm_reset(sc); 2588 2589 /* 2590 * Check for I21[01] PLL workaround. 2591 * 2592 * Three cases: 2593 * a) Chip is I211. 2594 * b) Chip is I210 and it uses INVM (not FLASH). 2595 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25 2596 */ 2597 if (sc->sc_type == WM_T_I211) 2598 sc->sc_flags |= WM_F_PLL_WA_I210; 2599 if (sc->sc_type == WM_T_I210) { 2600 if (!wm_nvm_flash_presence_i210(sc)) 2601 sc->sc_flags |= WM_F_PLL_WA_I210; 2602 else if ((sc->sc_nvm_ver_major < 3) 2603 || ((sc->sc_nvm_ver_major == 3) 2604 && (sc->sc_nvm_ver_minor < 25))) { 2605 aprint_verbose_dev(sc->sc_dev, 2606 "ROM image version %d.%d is older than 3.25\n", 2607 sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor); 2608 sc->sc_flags |= WM_F_PLL_WA_I210; 2609 } 2610 } 2611 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0) 2612 wm_pll_workaround_i210(sc); 2613 2614 wm_get_wakeup(sc); 2615 2616 /* Non-AMT based hardware can now take control from firmware */ 2617 if ((sc->sc_flags & WM_F_HAS_AMT) == 0) 2618 wm_get_hw_control(sc); 2619 2620 /* 2621 * Read the Ethernet address from the EEPROM, if not first found 2622 * in device properties. 2623 */ 2624 ea = prop_dictionary_get(dict, "mac-address"); 2625 if (ea != NULL) { 2626 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 2627 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 2628 memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN); 2629 } else { 2630 if (wm_read_mac_addr(sc, enaddr) != 0) { 2631 aprint_error_dev(sc->sc_dev, 2632 "unable to read Ethernet address\n"); 2633 goto out; 2634 } 2635 } 2636 2637 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 2638 ether_sprintf(enaddr)); 2639 2640 /* 2641 * Read the config info from the EEPROM, and set up various 2642 * bits in the control registers based on their contents. 2643 */ 2644 pn = prop_dictionary_get(dict, "i82543-cfg1"); 2645 if (pn != NULL) { 2646 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 2647 cfg1 = (uint16_t) prop_number_signed_value(pn); 2648 } else { 2649 if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) { 2650 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n"); 2651 goto out; 2652 } 2653 } 2654 2655 pn = prop_dictionary_get(dict, "i82543-cfg2"); 2656 if (pn != NULL) { 2657 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 2658 cfg2 = (uint16_t) prop_number_signed_value(pn); 2659 } else { 2660 if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) { 2661 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n"); 2662 goto out; 2663 } 2664 } 2665 2666 /* check for WM_F_WOL */ 2667 switch (sc->sc_type) { 2668 case WM_T_82542_2_0: 2669 case WM_T_82542_2_1: 2670 case WM_T_82543: 2671 /* dummy? */ 2672 eeprom_data = 0; 2673 apme_mask = NVM_CFG3_APME; 2674 break; 2675 case WM_T_82544: 2676 apme_mask = NVM_CFG2_82544_APM_EN; 2677 eeprom_data = cfg2; 2678 break; 2679 case WM_T_82546: 2680 case WM_T_82546_3: 2681 case WM_T_82571: 2682 case WM_T_82572: 2683 case WM_T_82573: 2684 case WM_T_82574: 2685 case WM_T_82583: 2686 case WM_T_80003: 2687 case WM_T_82575: 2688 case WM_T_82576: 2689 apme_mask = NVM_CFG3_APME; 2690 wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB 2691 : NVM_OFF_CFG3_PORTA, 1, &eeprom_data); 2692 break; 2693 case WM_T_82580: 2694 case WM_T_I350: 2695 case WM_T_I354: 2696 case WM_T_I210: 2697 case WM_T_I211: 2698 apme_mask = NVM_CFG3_APME; 2699 wm_nvm_read(sc, 2700 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA, 2701 1, &eeprom_data); 2702 break; 2703 case WM_T_ICH8: 2704 case WM_T_ICH9: 2705 case WM_T_ICH10: 2706 case WM_T_PCH: 2707 case WM_T_PCH2: 2708 case WM_T_PCH_LPT: 2709 case WM_T_PCH_SPT: 2710 case WM_T_PCH_CNP: 2711 /* Already checked before wm_reset () */ 2712 apme_mask = eeprom_data = 0; 2713 break; 2714 default: /* XXX 82540 */ 2715 apme_mask = NVM_CFG3_APME; 2716 wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data); 2717 break; 2718 } 2719 /* Check for WM_F_WOL flag after the setting of the EEPROM stuff */ 2720 if ((eeprom_data & apme_mask) != 0) 2721 sc->sc_flags |= WM_F_WOL; 2722 2723 /* 2724 * We have the eeprom settings, now apply the special cases 2725 * where the eeprom may be wrong or the board won't support 2726 * wake on lan on a particular port 2727 */ 2728 switch (sc->sc_pcidevid) { 2729 case PCI_PRODUCT_INTEL_82546GB_PCIE: 2730 sc->sc_flags &= ~WM_F_WOL; 2731 break; 2732 case PCI_PRODUCT_INTEL_82546EB_FIBER: 2733 case PCI_PRODUCT_INTEL_82546GB_FIBER: 2734 /* Wake events only supported on port A for dual fiber 2735 * regardless of eeprom setting */ 2736 if (sc->sc_funcid == 1) 2737 sc->sc_flags &= ~WM_F_WOL; 2738 break; 2739 case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3: 2740 /* If quad port adapter, disable WoL on all but port A */ 2741 if (sc->sc_funcid != 0) 2742 sc->sc_flags &= ~WM_F_WOL; 2743 break; 2744 case PCI_PRODUCT_INTEL_82571EB_FIBER: 2745 /* Wake events only supported on port A for dual fiber 2746 * regardless of eeprom setting */ 2747 if (sc->sc_funcid == 1) 2748 sc->sc_flags &= ~WM_F_WOL; 2749 break; 2750 case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER: 2751 case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER: 2752 case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER: 2753 /* If quad port adapter, disable WoL on all but port A */ 2754 if (sc->sc_funcid != 0) 2755 sc->sc_flags &= ~WM_F_WOL; 2756 break; 2757 } 2758 2759 if (sc->sc_type >= WM_T_82575) { 2760 if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) { 2761 aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n", 2762 nvmword); 2763 if ((sc->sc_type == WM_T_82575) || 2764 (sc->sc_type == WM_T_82576)) { 2765 /* Check NVM for autonegotiation */ 2766 if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) 2767 != 0) 2768 sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO; 2769 } 2770 if ((sc->sc_type == WM_T_82575) || 2771 (sc->sc_type == WM_T_I350)) { 2772 if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid)) 2773 sc->sc_flags |= WM_F_MAS; 2774 } 2775 } 2776 } 2777 2778 /* 2779 * XXX need special handling for some multiple port cards 2780 * to disable a paticular port. 2781 */ 2782 2783 if (sc->sc_type >= WM_T_82544) { 2784 pn = prop_dictionary_get(dict, "i82543-swdpin"); 2785 if (pn != NULL) { 2786 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER); 2787 swdpin = (uint16_t) prop_number_signed_value(pn); 2788 } else { 2789 if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) { 2790 aprint_error_dev(sc->sc_dev, 2791 "unable to read SWDPIN\n"); 2792 goto out; 2793 } 2794 } 2795 } 2796 2797 if (cfg1 & NVM_CFG1_ILOS) 2798 sc->sc_ctrl |= CTRL_ILOS; 2799 2800 /* 2801 * XXX 2802 * This code isn't correct because pin 2 and 3 are located 2803 * in different position on newer chips. Check all datasheet. 2804 * 2805 * Until resolve this problem, check if a chip < 82580 2806 */ 2807 if (sc->sc_type <= WM_T_82580) { 2808 if (sc->sc_type >= WM_T_82544) { 2809 sc->sc_ctrl |= 2810 ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) << 2811 CTRL_SWDPIO_SHIFT; 2812 sc->sc_ctrl |= 2813 ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) << 2814 CTRL_SWDPINS_SHIFT; 2815 } else { 2816 sc->sc_ctrl |= 2817 ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) << 2818 CTRL_SWDPIO_SHIFT; 2819 } 2820 } 2821 2822 if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) { 2823 wm_nvm_read(sc, 2824 NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA, 2825 1, &nvmword); 2826 if (nvmword & NVM_CFG3_ILOS) 2827 sc->sc_ctrl |= CTRL_ILOS; 2828 } 2829 2830 #if 0 2831 if (sc->sc_type >= WM_T_82544) { 2832 if (cfg1 & NVM_CFG1_IPS0) 2833 sc->sc_ctrl_ext |= CTRL_EXT_IPS; 2834 if (cfg1 & NVM_CFG1_IPS1) 2835 sc->sc_ctrl_ext |= CTRL_EXT_IPS1; 2836 sc->sc_ctrl_ext |= 2837 ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) << 2838 CTRL_EXT_SWDPIO_SHIFT; 2839 sc->sc_ctrl_ext |= 2840 ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) << 2841 CTRL_EXT_SWDPINS_SHIFT; 2842 } else { 2843 sc->sc_ctrl_ext |= 2844 ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) << 2845 CTRL_EXT_SWDPIO_SHIFT; 2846 } 2847 #endif 2848 2849 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 2850 #if 0 2851 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 2852 #endif 2853 2854 if (sc->sc_type == WM_T_PCH) { 2855 uint16_t val; 2856 2857 /* Save the NVM K1 bit setting */ 2858 wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val); 2859 2860 if ((val & NVM_K1_CONFIG_ENABLE) != 0) 2861 sc->sc_nvm_k1_enabled = 1; 2862 else 2863 sc->sc_nvm_k1_enabled = 0; 2864 } 2865 2866 /* Determine if we're GMII, TBI, SERDES or SGMII mode */ 2867 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9 2868 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH 2869 || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT 2870 || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP 2871 || sc->sc_type == WM_T_82573 2872 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) { 2873 /* Copper only */ 2874 } else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 2875 || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350) 2876 || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210) 2877 || (sc->sc_type ==WM_T_I211)) { 2878 reg = CSR_READ(sc, WMREG_CTRL_EXT); 2879 link_mode = reg & CTRL_EXT_LINK_MODE_MASK; 2880 switch (link_mode) { 2881 case CTRL_EXT_LINK_MODE_1000KX: 2882 aprint_normal_dev(sc->sc_dev, "1000KX\n"); 2883 sc->sc_mediatype = WM_MEDIATYPE_SERDES; 2884 break; 2885 case CTRL_EXT_LINK_MODE_SGMII: 2886 if (wm_sgmii_uses_mdio(sc)) { 2887 aprint_normal_dev(sc->sc_dev, 2888 "SGMII(MDIO)\n"); 2889 sc->sc_flags |= WM_F_SGMII; 2890 sc->sc_mediatype = WM_MEDIATYPE_COPPER; 2891 break; 2892 } 2893 aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n"); 2894 /*FALLTHROUGH*/ 2895 case CTRL_EXT_LINK_MODE_PCIE_SERDES: 2896 sc->sc_mediatype = wm_sfp_get_media_type(sc); 2897 if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) { 2898 if (link_mode 2899 == CTRL_EXT_LINK_MODE_SGMII) { 2900 sc->sc_mediatype = WM_MEDIATYPE_COPPER; 2901 sc->sc_flags |= WM_F_SGMII; 2902 aprint_verbose_dev(sc->sc_dev, 2903 "SGMII\n"); 2904 } else { 2905 sc->sc_mediatype = WM_MEDIATYPE_SERDES; 2906 aprint_verbose_dev(sc->sc_dev, 2907 "SERDES\n"); 2908 } 2909 break; 2910 } 2911 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) 2912 aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n"); 2913 else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) { 2914 aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n"); 2915 sc->sc_flags |= WM_F_SGMII; 2916 } 2917 /* Do not change link mode for 100BaseFX */ 2918 if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX) 2919 break; 2920 2921 /* Change current link mode setting */ 2922 reg &= ~CTRL_EXT_LINK_MODE_MASK; 2923 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) 2924 reg |= CTRL_EXT_LINK_MODE_SGMII; 2925 else 2926 reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES; 2927 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 2928 break; 2929 case CTRL_EXT_LINK_MODE_GMII: 2930 default: 2931 aprint_normal_dev(sc->sc_dev, "Copper\n"); 2932 sc->sc_mediatype = WM_MEDIATYPE_COPPER; 2933 break; 2934 } 2935 2936 reg &= ~CTRL_EXT_I2C_ENA; 2937 if ((sc->sc_flags & WM_F_SGMII) != 0) 2938 reg |= CTRL_EXT_I2C_ENA; 2939 else 2940 reg &= ~CTRL_EXT_I2C_ENA; 2941 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 2942 if ((sc->sc_flags & WM_F_SGMII) != 0) { 2943 if (!wm_sgmii_uses_mdio(sc)) 2944 wm_gmii_setup_phytype(sc, 0, 0); 2945 wm_reset_mdicnfg_82580(sc); 2946 } 2947 } else if (sc->sc_type < WM_T_82543 || 2948 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) { 2949 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) { 2950 aprint_error_dev(sc->sc_dev, 2951 "WARNING: TBIMODE set on 1000BASE-T product!\n"); 2952 sc->sc_mediatype = WM_MEDIATYPE_FIBER; 2953 } 2954 } else { 2955 if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) { 2956 aprint_error_dev(sc->sc_dev, 2957 "WARNING: TBIMODE clear on 1000BASE-X product!\n"); 2958 sc->sc_mediatype = WM_MEDIATYPE_COPPER; 2959 } 2960 } 2961 2962 if (sc->sc_type >= WM_T_PCH2) 2963 sc->sc_flags |= WM_F_EEE; 2964 else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211) 2965 && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) { 2966 /* XXX: Need special handling for I354. (not yet) */ 2967 if (sc->sc_type != WM_T_I354) 2968 sc->sc_flags |= WM_F_EEE; 2969 } 2970 2971 /* 2972 * The I350 has a bug where it always strips the CRC whether 2973 * asked to or not. So ask for stripped CRC here and cope in rxeof 2974 */ 2975 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354) 2976 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) 2977 sc->sc_flags |= WM_F_CRC_STRIP; 2978 2979 /* Set device properties (macflags) */ 2980 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags); 2981 2982 if (sc->sc_flags != 0) { 2983 snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags); 2984 aprint_verbose_dev(sc->sc_dev, "%s\n", buf); 2985 } 2986 2987 #ifdef WM_MPSAFE 2988 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 2989 #else 2990 sc->sc_core_lock = NULL; 2991 #endif 2992 2993 /* Initialize the media structures accordingly. */ 2994 if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) 2995 wm_gmii_mediainit(sc, wmp->wmp_product); 2996 else 2997 wm_tbi_mediainit(sc); /* All others */ 2998 2999 ifp = &sc->sc_ethercom.ec_if; 3000 xname = device_xname(sc->sc_dev); 3001 strlcpy(ifp->if_xname, xname, IFNAMSIZ); 3002 ifp->if_softc = sc; 3003 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 3004 #ifdef WM_MPSAFE 3005 ifp->if_extflags = IFEF_MPSAFE; 3006 #endif 3007 ifp->if_ioctl = wm_ioctl; 3008 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 3009 ifp->if_start = wm_nq_start; 3010 /* 3011 * When the number of CPUs is one and the controller can use 3012 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue. 3013 * That is, wm(4) use two interrupts, one is used for Tx/Rx 3014 * and the other is used for link status changing. 3015 * In this situation, wm_nq_transmit() is disadvantageous 3016 * because of wm_select_txqueue() and pcq(9) overhead. 3017 */ 3018 if (wm_is_using_multiqueue(sc)) 3019 ifp->if_transmit = wm_nq_transmit; 3020 } else { 3021 ifp->if_start = wm_start; 3022 /* 3023 * wm_transmit() has the same disadvantage as wm_transmit(). 3024 */ 3025 if (wm_is_using_multiqueue(sc)) 3026 ifp->if_transmit = wm_transmit; 3027 } 3028 /* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */ 3029 ifp->if_init = wm_init; 3030 ifp->if_stop = wm_stop; 3031 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN)); 3032 IFQ_SET_READY(&ifp->if_snd); 3033 3034 /* Check for jumbo frame */ 3035 switch (sc->sc_type) { 3036 case WM_T_82573: 3037 /* XXX limited to 9234 if ASPM is disabled */ 3038 wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword); 3039 if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0) 3040 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 3041 break; 3042 case WM_T_82571: 3043 case WM_T_82572: 3044 case WM_T_82574: 3045 case WM_T_82583: 3046 case WM_T_82575: 3047 case WM_T_82576: 3048 case WM_T_82580: 3049 case WM_T_I350: 3050 case WM_T_I354: 3051 case WM_T_I210: 3052 case WM_T_I211: 3053 case WM_T_80003: 3054 case WM_T_ICH9: 3055 case WM_T_ICH10: 3056 case WM_T_PCH2: /* PCH2 supports 9K frame size */ 3057 case WM_T_PCH_LPT: 3058 case WM_T_PCH_SPT: 3059 case WM_T_PCH_CNP: 3060 /* XXX limited to 9234 */ 3061 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 3062 break; 3063 case WM_T_PCH: 3064 /* XXX limited to 4096 */ 3065 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 3066 break; 3067 case WM_T_82542_2_0: 3068 case WM_T_82542_2_1: 3069 case WM_T_ICH8: 3070 /* No support for jumbo frame */ 3071 break; 3072 default: 3073 /* ETHER_MAX_LEN_JUMBO */ 3074 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 3075 break; 3076 } 3077 3078 /* If we're a i82543 or greater, we can support VLANs. */ 3079 if (sc->sc_type >= WM_T_82543) { 3080 sc->sc_ethercom.ec_capabilities |= 3081 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING; 3082 sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING; 3083 } 3084 3085 if ((sc->sc_flags & WM_F_EEE) != 0) 3086 sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE; 3087 3088 /* 3089 * We can perform TCPv4 and UDPv4 checksums in-bound. Only 3090 * on i82543 and later. 3091 */ 3092 if (sc->sc_type >= WM_T_82543) { 3093 ifp->if_capabilities |= 3094 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 3095 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 3096 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx | 3097 IFCAP_CSUM_TCPv6_Tx | 3098 IFCAP_CSUM_UDPv6_Tx; 3099 } 3100 3101 /* 3102 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL. 3103 * 3104 * 82541GI (8086:1076) ... no 3105 * 82572EI (8086:10b9) ... yes 3106 */ 3107 if (sc->sc_type >= WM_T_82571) { 3108 ifp->if_capabilities |= 3109 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 3110 } 3111 3112 /* 3113 * If we're a i82544 or greater (except i82547), we can do 3114 * TCP segmentation offload. 3115 */ 3116 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) { 3117 ifp->if_capabilities |= IFCAP_TSOv4; 3118 } 3119 3120 if (sc->sc_type >= WM_T_82571) { 3121 ifp->if_capabilities |= IFCAP_TSOv6; 3122 } 3123 3124 sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT; 3125 sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT; 3126 sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT; 3127 sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT; 3128 3129 /* Attach the interface. */ 3130 if_initialize(ifp); 3131 sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if); 3132 ether_ifattach(ifp, enaddr); 3133 ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb); 3134 if_register(ifp); 3135 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 3136 RND_FLAG_DEFAULT); 3137 3138 #ifdef WM_EVENT_COUNTERS 3139 /* Attach event counters. */ 3140 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR, 3141 NULL, xname, "linkintr"); 3142 3143 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC, 3144 NULL, xname, "tx_xoff"); 3145 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC, 3146 NULL, xname, "tx_xon"); 3147 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC, 3148 NULL, xname, "rx_xoff"); 3149 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC, 3150 NULL, xname, "rx_xon"); 3151 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC, 3152 NULL, xname, "rx_macctl"); 3153 #endif /* WM_EVENT_COUNTERS */ 3154 3155 sc->sc_txrx_use_workqueue = false; 3156 3157 if (wm_phy_need_linkdown_discard(sc)) { 3158 DPRINTF(sc, WM_DEBUG_LINK, 3159 ("%s: %s: Set linkdown discard flag\n", 3160 device_xname(sc->sc_dev), __func__)); 3161 wm_set_linkdown_discard(sc); 3162 } 3163 3164 wm_init_sysctls(sc); 3165 3166 if (pmf_device_register(self, wm_suspend, wm_resume)) 3167 pmf_class_network_register(self, ifp); 3168 else 3169 aprint_error_dev(self, "couldn't establish power handler\n"); 3170 3171 sc->sc_flags |= WM_F_ATTACHED; 3172 out: 3173 return; 3174 } 3175 3176 /* The detach function (ca_detach) */ 3177 static int 3178 wm_detach(device_t self, int flags __unused) 3179 { 3180 struct wm_softc *sc = device_private(self); 3181 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3182 int i; 3183 3184 if ((sc->sc_flags & WM_F_ATTACHED) == 0) 3185 return 0; 3186 3187 /* Stop the interface. Callouts are stopped in it. */ 3188 wm_stop(ifp, 1); 3189 3190 pmf_device_deregister(self); 3191 3192 sysctl_teardown(&sc->sc_sysctllog); 3193 3194 #ifdef WM_EVENT_COUNTERS 3195 evcnt_detach(&sc->sc_ev_linkintr); 3196 3197 evcnt_detach(&sc->sc_ev_tx_xoff); 3198 evcnt_detach(&sc->sc_ev_tx_xon); 3199 evcnt_detach(&sc->sc_ev_rx_xoff); 3200 evcnt_detach(&sc->sc_ev_rx_xon); 3201 evcnt_detach(&sc->sc_ev_rx_macctl); 3202 #endif /* WM_EVENT_COUNTERS */ 3203 3204 rnd_detach_source(&sc->rnd_source); 3205 3206 /* Tell the firmware about the release */ 3207 WM_CORE_LOCK(sc); 3208 wm_release_manageability(sc); 3209 wm_release_hw_control(sc); 3210 wm_enable_wakeup(sc); 3211 WM_CORE_UNLOCK(sc); 3212 3213 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 3214 3215 ether_ifdetach(ifp); 3216 if_detach(ifp); 3217 if_percpuq_destroy(sc->sc_ipq); 3218 3219 /* Delete all remaining media. */ 3220 ifmedia_fini(&sc->sc_mii.mii_media); 3221 3222 /* Unload RX dmamaps and free mbufs */ 3223 for (i = 0; i < sc->sc_nqueues; i++) { 3224 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 3225 mutex_enter(rxq->rxq_lock); 3226 wm_rxdrain(rxq); 3227 mutex_exit(rxq->rxq_lock); 3228 } 3229 /* Must unlock here */ 3230 3231 /* Disestablish the interrupt handler */ 3232 for (i = 0; i < sc->sc_nintrs; i++) { 3233 if (sc->sc_ihs[i] != NULL) { 3234 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]); 3235 sc->sc_ihs[i] = NULL; 3236 } 3237 } 3238 pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs); 3239 3240 /* wm_stop() ensure workqueue is stopped. */ 3241 workqueue_destroy(sc->sc_queue_wq); 3242 3243 for (i = 0; i < sc->sc_nqueues; i++) 3244 softint_disestablish(sc->sc_queue[i].wmq_si); 3245 3246 wm_free_txrx_queues(sc); 3247 3248 /* Unmap the registers */ 3249 if (sc->sc_ss) { 3250 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss); 3251 sc->sc_ss = 0; 3252 } 3253 if (sc->sc_ios) { 3254 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios); 3255 sc->sc_ios = 0; 3256 } 3257 if (sc->sc_flashs) { 3258 bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs); 3259 sc->sc_flashs = 0; 3260 } 3261 3262 if (sc->sc_core_lock) 3263 mutex_obj_free(sc->sc_core_lock); 3264 if (sc->sc_ich_phymtx) 3265 mutex_obj_free(sc->sc_ich_phymtx); 3266 if (sc->sc_ich_nvmmtx) 3267 mutex_obj_free(sc->sc_ich_nvmmtx); 3268 3269 return 0; 3270 } 3271 3272 static bool 3273 wm_suspend(device_t self, const pmf_qual_t *qual) 3274 { 3275 struct wm_softc *sc = device_private(self); 3276 3277 wm_release_manageability(sc); 3278 wm_release_hw_control(sc); 3279 wm_enable_wakeup(sc); 3280 3281 return true; 3282 } 3283 3284 static bool 3285 wm_resume(device_t self, const pmf_qual_t *qual) 3286 { 3287 struct wm_softc *sc = device_private(self); 3288 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3289 pcireg_t reg; 3290 char buf[256]; 3291 3292 reg = CSR_READ(sc, WMREG_WUS); 3293 if (reg != 0) { 3294 snprintb(buf, sizeof(buf), WUS_FLAGS, reg); 3295 device_printf(sc->sc_dev, "wakeup status %s\n", buf); 3296 CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */ 3297 } 3298 3299 if (sc->sc_type >= WM_T_PCH2) 3300 wm_resume_workarounds_pchlan(sc); 3301 if ((ifp->if_flags & IFF_UP) == 0) { 3302 /* >= PCH_SPT hardware workaround before reset. */ 3303 if (sc->sc_type >= WM_T_PCH_SPT) 3304 wm_flush_desc_rings(sc); 3305 3306 wm_reset(sc); 3307 /* Non-AMT based hardware can now take control from firmware */ 3308 if ((sc->sc_flags & WM_F_HAS_AMT) == 0) 3309 wm_get_hw_control(sc); 3310 wm_init_manageability(sc); 3311 } else { 3312 /* 3313 * We called pmf_class_network_register(), so if_init() is 3314 * automatically called when IFF_UP. wm_reset(), 3315 * wm_get_hw_control() and wm_init_manageability() are called 3316 * via wm_init(). 3317 */ 3318 } 3319 3320 return true; 3321 } 3322 3323 /* 3324 * wm_watchdog: [ifnet interface function] 3325 * 3326 * Watchdog timer handler. 3327 */ 3328 static void 3329 wm_watchdog(struct ifnet *ifp) 3330 { 3331 int qid; 3332 struct wm_softc *sc = ifp->if_softc; 3333 uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */ 3334 3335 for (qid = 0; qid < sc->sc_nqueues; qid++) { 3336 struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq; 3337 3338 wm_watchdog_txq(ifp, txq, &hang_queue); 3339 } 3340 3341 /* IF any of queues hanged up, reset the interface. */ 3342 if (hang_queue != 0) { 3343 (void)wm_init(ifp); 3344 3345 /* 3346 * There are still some upper layer processing which call 3347 * ifp->if_start(). e.g. ALTQ or one CPU system 3348 */ 3349 /* Try to get more packets going. */ 3350 ifp->if_start(ifp); 3351 } 3352 } 3353 3354 3355 static void 3356 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang) 3357 { 3358 3359 mutex_enter(txq->txq_lock); 3360 if (txq->txq_sending && 3361 time_uptime - txq->txq_lastsent > wm_watchdog_timeout) 3362 wm_watchdog_txq_locked(ifp, txq, hang); 3363 3364 mutex_exit(txq->txq_lock); 3365 } 3366 3367 static void 3368 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq, 3369 uint16_t *hang) 3370 { 3371 struct wm_softc *sc = ifp->if_softc; 3372 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq); 3373 3374 KASSERT(mutex_owned(txq->txq_lock)); 3375 3376 /* 3377 * Since we're using delayed interrupts, sweep up 3378 * before we report an error. 3379 */ 3380 wm_txeof(txq, UINT_MAX); 3381 3382 if (txq->txq_sending) 3383 *hang |= __BIT(wmq->wmq_id); 3384 3385 if (txq->txq_free == WM_NTXDESC(txq)) { 3386 log(LOG_ERR, "%s: device timeout (lost interrupt)\n", 3387 device_xname(sc->sc_dev)); 3388 } else { 3389 #ifdef WM_DEBUG 3390 int i, j; 3391 struct wm_txsoft *txs; 3392 #endif 3393 log(LOG_ERR, 3394 "%s: device timeout (txfree %d txsfree %d txnext %d)\n", 3395 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree, 3396 txq->txq_next); 3397 if_statinc(ifp, if_oerrors); 3398 #ifdef WM_DEBUG 3399 for (i = txq->txq_sdirty; i != txq->txq_snext; 3400 i = WM_NEXTTXS(txq, i)) { 3401 txs = &txq->txq_soft[i]; 3402 printf("txs %d tx %d -> %d\n", 3403 i, txs->txs_firstdesc, txs->txs_lastdesc); 3404 for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) { 3405 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 3406 printf("\tdesc %d: 0x%" PRIx64 "\n", j, 3407 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr); 3408 printf("\t %#08x%08x\n", 3409 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields, 3410 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen); 3411 } else { 3412 printf("\tdesc %d: 0x%" PRIx64 "\n", j, 3413 (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 | 3414 txq->txq_descs[j].wtx_addr.wa_low); 3415 printf("\t %#04x%02x%02x%08x\n", 3416 txq->txq_descs[j].wtx_fields.wtxu_vlan, 3417 txq->txq_descs[j].wtx_fields.wtxu_options, 3418 txq->txq_descs[j].wtx_fields.wtxu_status, 3419 txq->txq_descs[j].wtx_cmdlen); 3420 } 3421 if (j == txs->txs_lastdesc) 3422 break; 3423 } 3424 } 3425 #endif 3426 } 3427 } 3428 3429 /* 3430 * wm_tick: 3431 * 3432 * One second timer, used to check link status, sweep up 3433 * completed transmit jobs, etc. 3434 */ 3435 static void 3436 wm_tick(void *arg) 3437 { 3438 struct wm_softc *sc = arg; 3439 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3440 #ifndef WM_MPSAFE 3441 int s = splnet(); 3442 #endif 3443 3444 WM_CORE_LOCK(sc); 3445 3446 if (sc->sc_core_stopping) { 3447 WM_CORE_UNLOCK(sc); 3448 #ifndef WM_MPSAFE 3449 splx(s); 3450 #endif 3451 return; 3452 } 3453 3454 if (sc->sc_type >= WM_T_82542_2_1) { 3455 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC)); 3456 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC)); 3457 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC)); 3458 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC)); 3459 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC)); 3460 } 3461 3462 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 3463 if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC)); 3464 if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */ 3465 + CSR_READ(sc, WMREG_CRCERRS) 3466 + CSR_READ(sc, WMREG_ALGNERRC) 3467 + CSR_READ(sc, WMREG_SYMERRC) 3468 + CSR_READ(sc, WMREG_RXERRC) 3469 + CSR_READ(sc, WMREG_SEC) 3470 + CSR_READ(sc, WMREG_CEXTERR) 3471 + CSR_READ(sc, WMREG_RLEC)); 3472 /* 3473 * WMREG_RNBC is incremented when there is no available buffers in host 3474 * memory. It does not mean the number of dropped packet. Because 3475 * ethernet controller can receive packets in such case if there is 3476 * space in phy's FIFO. 3477 * 3478 * If you want to know the nubmer of WMREG_RMBC, you should use such as 3479 * own EVCNT instead of if_iqdrops. 3480 */ 3481 if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC)); 3482 IF_STAT_PUTREF(ifp); 3483 3484 if (sc->sc_flags & WM_F_HAS_MII) 3485 mii_tick(&sc->sc_mii); 3486 else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211) 3487 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) 3488 wm_serdes_tick(sc); 3489 else 3490 wm_tbi_tick(sc); 3491 3492 WM_CORE_UNLOCK(sc); 3493 3494 wm_watchdog(ifp); 3495 3496 callout_schedule(&sc->sc_tick_ch, hz); 3497 } 3498 3499 static int 3500 wm_ifflags_cb(struct ethercom *ec) 3501 { 3502 struct ifnet *ifp = &ec->ec_if; 3503 struct wm_softc *sc = ifp->if_softc; 3504 u_short iffchange; 3505 int ecchange; 3506 bool needreset = false; 3507 int rc = 0; 3508 3509 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 3510 device_xname(sc->sc_dev), __func__)); 3511 3512 WM_CORE_LOCK(sc); 3513 3514 /* 3515 * Check for if_flags. 3516 * Main usage is to prevent linkdown when opening bpf. 3517 */ 3518 iffchange = ifp->if_flags ^ sc->sc_if_flags; 3519 sc->sc_if_flags = ifp->if_flags; 3520 if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) { 3521 needreset = true; 3522 goto ec; 3523 } 3524 3525 /* iff related updates */ 3526 if ((iffchange & IFF_PROMISC) != 0) 3527 wm_set_filter(sc); 3528 3529 wm_set_vlan(sc); 3530 3531 ec: 3532 /* Check for ec_capenable. */ 3533 ecchange = ec->ec_capenable ^ sc->sc_ec_capenable; 3534 sc->sc_ec_capenable = ec->ec_capenable; 3535 if ((ecchange & ~ETHERCAP_EEE) != 0) { 3536 needreset = true; 3537 goto out; 3538 } 3539 3540 /* ec related updates */ 3541 wm_set_eee(sc); 3542 3543 out: 3544 if (needreset) 3545 rc = ENETRESET; 3546 WM_CORE_UNLOCK(sc); 3547 3548 return rc; 3549 } 3550 3551 static bool 3552 wm_phy_need_linkdown_discard(struct wm_softc *sc) 3553 { 3554 3555 switch (sc->sc_phytype) { 3556 case WMPHY_82577: /* ihphy */ 3557 case WMPHY_82578: /* atphy */ 3558 case WMPHY_82579: /* ihphy */ 3559 case WMPHY_I217: /* ihphy */ 3560 case WMPHY_82580: /* ihphy */ 3561 case WMPHY_I350: /* ihphy */ 3562 return true; 3563 default: 3564 return false; 3565 } 3566 } 3567 3568 static void 3569 wm_set_linkdown_discard(struct wm_softc *sc) 3570 { 3571 3572 for (int i = 0; i < sc->sc_nqueues; i++) { 3573 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; 3574 3575 mutex_enter(txq->txq_lock); 3576 txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD; 3577 mutex_exit(txq->txq_lock); 3578 } 3579 } 3580 3581 static void 3582 wm_clear_linkdown_discard(struct wm_softc *sc) 3583 { 3584 3585 for (int i = 0; i < sc->sc_nqueues; i++) { 3586 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; 3587 3588 mutex_enter(txq->txq_lock); 3589 txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD; 3590 mutex_exit(txq->txq_lock); 3591 } 3592 } 3593 3594 /* 3595 * wm_ioctl: [ifnet interface function] 3596 * 3597 * Handle control requests from the operator. 3598 */ 3599 static int 3600 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data) 3601 { 3602 struct wm_softc *sc = ifp->if_softc; 3603 struct ifreq *ifr = (struct ifreq *)data; 3604 struct ifaddr *ifa = (struct ifaddr *)data; 3605 struct sockaddr_dl *sdl; 3606 int s, error; 3607 3608 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 3609 device_xname(sc->sc_dev), __func__)); 3610 3611 #ifndef WM_MPSAFE 3612 s = splnet(); 3613 #endif 3614 switch (cmd) { 3615 case SIOCSIFMEDIA: 3616 WM_CORE_LOCK(sc); 3617 /* Flow control requires full-duplex mode. */ 3618 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 3619 (ifr->ifr_media & IFM_FDX) == 0) 3620 ifr->ifr_media &= ~IFM_ETH_FMASK; 3621 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 3622 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 3623 /* We can do both TXPAUSE and RXPAUSE. */ 3624 ifr->ifr_media |= 3625 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 3626 } 3627 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 3628 } 3629 WM_CORE_UNLOCK(sc); 3630 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 3631 if (error == 0 && wm_phy_need_linkdown_discard(sc)) { 3632 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) { 3633 DPRINTF(sc, WM_DEBUG_LINK, 3634 ("%s: %s: Set linkdown discard flag\n", 3635 device_xname(sc->sc_dev), __func__)); 3636 wm_set_linkdown_discard(sc); 3637 } 3638 } 3639 break; 3640 case SIOCINITIFADDR: 3641 WM_CORE_LOCK(sc); 3642 if (ifa->ifa_addr->sa_family == AF_LINK) { 3643 sdl = satosdl(ifp->if_dl->ifa_addr); 3644 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len, 3645 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen); 3646 /* Unicast address is the first multicast entry */ 3647 wm_set_filter(sc); 3648 error = 0; 3649 WM_CORE_UNLOCK(sc); 3650 break; 3651 } 3652 WM_CORE_UNLOCK(sc); 3653 /*FALLTHROUGH*/ 3654 default: 3655 if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) { 3656 if (((ifp->if_flags & IFF_UP) != 0) && 3657 ((ifr->ifr_flags & IFF_UP) == 0)) { 3658 DPRINTF(sc, WM_DEBUG_LINK, 3659 ("%s: %s: Set linkdown discard flag\n", 3660 device_xname(sc->sc_dev), __func__)); 3661 wm_set_linkdown_discard(sc); 3662 } 3663 } 3664 #ifdef WM_MPSAFE 3665 s = splnet(); 3666 #endif 3667 /* It may call wm_start, so unlock here */ 3668 error = ether_ioctl(ifp, cmd, data); 3669 #ifdef WM_MPSAFE 3670 splx(s); 3671 #endif 3672 if (error != ENETRESET) 3673 break; 3674 3675 error = 0; 3676 3677 if (cmd == SIOCSIFCAP) 3678 error = if_init(ifp); 3679 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 3680 ; 3681 else if (ifp->if_flags & IFF_RUNNING) { 3682 /* 3683 * Multicast list has changed; set the hardware filter 3684 * accordingly. 3685 */ 3686 WM_CORE_LOCK(sc); 3687 wm_set_filter(sc); 3688 WM_CORE_UNLOCK(sc); 3689 } 3690 break; 3691 } 3692 3693 #ifndef WM_MPSAFE 3694 splx(s); 3695 #endif 3696 return error; 3697 } 3698 3699 /* MAC address related */ 3700 3701 /* 3702 * Get the offset of MAC address and return it. 3703 * If error occured, use offset 0. 3704 */ 3705 static uint16_t 3706 wm_check_alt_mac_addr(struct wm_softc *sc) 3707 { 3708 uint16_t myea[ETHER_ADDR_LEN / 2]; 3709 uint16_t offset = NVM_OFF_MACADDR; 3710 3711 /* Try to read alternative MAC address pointer */ 3712 if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0) 3713 return 0; 3714 3715 /* Check pointer if it's valid or not. */ 3716 if ((offset == 0x0000) || (offset == 0xffff)) 3717 return 0; 3718 3719 offset += NVM_OFF_MACADDR_82571(sc->sc_funcid); 3720 /* 3721 * Check whether alternative MAC address is valid or not. 3722 * Some cards have non 0xffff pointer but those don't use 3723 * alternative MAC address in reality. 3724 * 3725 * Check whether the broadcast bit is set or not. 3726 */ 3727 if (wm_nvm_read(sc, offset, 1, myea) == 0) 3728 if (((myea[0] & 0xff) & 0x01) == 0) 3729 return offset; /* Found */ 3730 3731 /* Not found */ 3732 return 0; 3733 } 3734 3735 static int 3736 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr) 3737 { 3738 uint16_t myea[ETHER_ADDR_LEN / 2]; 3739 uint16_t offset = NVM_OFF_MACADDR; 3740 int do_invert = 0; 3741 3742 switch (sc->sc_type) { 3743 case WM_T_82580: 3744 case WM_T_I350: 3745 case WM_T_I354: 3746 /* EEPROM Top Level Partitioning */ 3747 offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0; 3748 break; 3749 case WM_T_82571: 3750 case WM_T_82575: 3751 case WM_T_82576: 3752 case WM_T_80003: 3753 case WM_T_I210: 3754 case WM_T_I211: 3755 offset = wm_check_alt_mac_addr(sc); 3756 if (offset == 0) 3757 if ((sc->sc_funcid & 0x01) == 1) 3758 do_invert = 1; 3759 break; 3760 default: 3761 if ((sc->sc_funcid & 0x01) == 1) 3762 do_invert = 1; 3763 break; 3764 } 3765 3766 if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0) 3767 goto bad; 3768 3769 enaddr[0] = myea[0] & 0xff; 3770 enaddr[1] = myea[0] >> 8; 3771 enaddr[2] = myea[1] & 0xff; 3772 enaddr[3] = myea[1] >> 8; 3773 enaddr[4] = myea[2] & 0xff; 3774 enaddr[5] = myea[2] >> 8; 3775 3776 /* 3777 * Toggle the LSB of the MAC address on the second port 3778 * of some dual port cards. 3779 */ 3780 if (do_invert != 0) 3781 enaddr[5] ^= 1; 3782 3783 return 0; 3784 3785 bad: 3786 return -1; 3787 } 3788 3789 /* 3790 * wm_set_ral: 3791 * 3792 * Set an entery in the receive address list. 3793 */ 3794 static void 3795 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx) 3796 { 3797 uint32_t ral_lo, ral_hi, addrl, addrh; 3798 uint32_t wlock_mac; 3799 int rv; 3800 3801 if (enaddr != NULL) { 3802 ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) | 3803 ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24); 3804 ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8); 3805 ral_hi |= RAL_AV; 3806 } else { 3807 ral_lo = 0; 3808 ral_hi = 0; 3809 } 3810 3811 switch (sc->sc_type) { 3812 case WM_T_82542_2_0: 3813 case WM_T_82542_2_1: 3814 case WM_T_82543: 3815 CSR_WRITE(sc, WMREG_RAL(idx), ral_lo); 3816 CSR_WRITE_FLUSH(sc); 3817 CSR_WRITE(sc, WMREG_RAH(idx), ral_hi); 3818 CSR_WRITE_FLUSH(sc); 3819 break; 3820 case WM_T_PCH2: 3821 case WM_T_PCH_LPT: 3822 case WM_T_PCH_SPT: 3823 case WM_T_PCH_CNP: 3824 if (idx == 0) { 3825 CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo); 3826 CSR_WRITE_FLUSH(sc); 3827 CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi); 3828 CSR_WRITE_FLUSH(sc); 3829 return; 3830 } 3831 if (sc->sc_type != WM_T_PCH2) { 3832 wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), 3833 FWSM_WLOCK_MAC); 3834 addrl = WMREG_SHRAL(idx - 1); 3835 addrh = WMREG_SHRAH(idx - 1); 3836 } else { 3837 wlock_mac = 0; 3838 addrl = WMREG_PCH_LPT_SHRAL(idx - 1); 3839 addrh = WMREG_PCH_LPT_SHRAH(idx - 1); 3840 } 3841 3842 if ((wlock_mac == 0) || (idx <= wlock_mac)) { 3843 rv = wm_get_swflag_ich8lan(sc); 3844 if (rv != 0) 3845 return; 3846 CSR_WRITE(sc, addrl, ral_lo); 3847 CSR_WRITE_FLUSH(sc); 3848 CSR_WRITE(sc, addrh, ral_hi); 3849 CSR_WRITE_FLUSH(sc); 3850 wm_put_swflag_ich8lan(sc); 3851 } 3852 3853 break; 3854 default: 3855 CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo); 3856 CSR_WRITE_FLUSH(sc); 3857 CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi); 3858 CSR_WRITE_FLUSH(sc); 3859 break; 3860 } 3861 } 3862 3863 /* 3864 * wm_mchash: 3865 * 3866 * Compute the hash of the multicast address for the 4096-bit 3867 * multicast filter. 3868 */ 3869 static uint32_t 3870 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr) 3871 { 3872 static const int lo_shift[4] = { 4, 3, 2, 0 }; 3873 static const int hi_shift[4] = { 4, 5, 6, 8 }; 3874 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 }; 3875 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 }; 3876 uint32_t hash; 3877 3878 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 3879 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 3880 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) 3881 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){ 3882 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) | 3883 (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]); 3884 return (hash & 0x3ff); 3885 } 3886 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) | 3887 (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]); 3888 3889 return (hash & 0xfff); 3890 } 3891 3892 /* 3893 * 3894 * 3895 */ 3896 static int 3897 wm_rar_count(struct wm_softc *sc) 3898 { 3899 int size; 3900 3901 switch (sc->sc_type) { 3902 case WM_T_ICH8: 3903 size = WM_RAL_TABSIZE_ICH8 -1; 3904 break; 3905 case WM_T_ICH9: 3906 case WM_T_ICH10: 3907 case WM_T_PCH: 3908 size = WM_RAL_TABSIZE_ICH8; 3909 break; 3910 case WM_T_PCH2: 3911 size = WM_RAL_TABSIZE_PCH2; 3912 break; 3913 case WM_T_PCH_LPT: 3914 case WM_T_PCH_SPT: 3915 case WM_T_PCH_CNP: 3916 size = WM_RAL_TABSIZE_PCH_LPT; 3917 break; 3918 case WM_T_82575: 3919 case WM_T_I210: 3920 case WM_T_I211: 3921 size = WM_RAL_TABSIZE_82575; 3922 break; 3923 case WM_T_82576: 3924 case WM_T_82580: 3925 size = WM_RAL_TABSIZE_82576; 3926 break; 3927 case WM_T_I350: 3928 case WM_T_I354: 3929 size = WM_RAL_TABSIZE_I350; 3930 break; 3931 default: 3932 size = WM_RAL_TABSIZE; 3933 } 3934 3935 return size; 3936 } 3937 3938 /* 3939 * wm_set_filter: 3940 * 3941 * Set up the receive filter. 3942 */ 3943 static void 3944 wm_set_filter(struct wm_softc *sc) 3945 { 3946 struct ethercom *ec = &sc->sc_ethercom; 3947 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3948 struct ether_multi *enm; 3949 struct ether_multistep step; 3950 bus_addr_t mta_reg; 3951 uint32_t hash, reg, bit; 3952 int i, size, ralmax, rv; 3953 3954 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 3955 device_xname(sc->sc_dev), __func__)); 3956 3957 if (sc->sc_type >= WM_T_82544) 3958 mta_reg = WMREG_CORDOVA_MTA; 3959 else 3960 mta_reg = WMREG_MTA; 3961 3962 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE); 3963 3964 if (ifp->if_flags & IFF_BROADCAST) 3965 sc->sc_rctl |= RCTL_BAM; 3966 if (ifp->if_flags & IFF_PROMISC) { 3967 sc->sc_rctl |= RCTL_UPE; 3968 ETHER_LOCK(ec); 3969 ec->ec_flags |= ETHER_F_ALLMULTI; 3970 ETHER_UNLOCK(ec); 3971 goto allmulti; 3972 } 3973 3974 /* 3975 * Set the station address in the first RAL slot, and 3976 * clear the remaining slots. 3977 */ 3978 size = wm_rar_count(sc); 3979 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0); 3980 3981 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT) 3982 || (sc->sc_type == WM_T_PCH_CNP)) { 3983 i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC); 3984 switch (i) { 3985 case 0: 3986 /* We can use all entries */ 3987 ralmax = size; 3988 break; 3989 case 1: 3990 /* Only RAR[0] */ 3991 ralmax = 1; 3992 break; 3993 default: 3994 /* Available SHRA + RAR[0] */ 3995 ralmax = i + 1; 3996 } 3997 } else 3998 ralmax = size; 3999 for (i = 1; i < size; i++) { 4000 if (i < ralmax) 4001 wm_set_ral(sc, NULL, i); 4002 } 4003 4004 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 4005 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 4006 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) 4007 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)) 4008 size = WM_ICH8_MC_TABSIZE; 4009 else 4010 size = WM_MC_TABSIZE; 4011 /* Clear out the multicast table. */ 4012 for (i = 0; i < size; i++) { 4013 CSR_WRITE(sc, mta_reg + (i << 2), 0); 4014 CSR_WRITE_FLUSH(sc); 4015 } 4016 4017 ETHER_LOCK(ec); 4018 ETHER_FIRST_MULTI(step, ec, enm); 4019 while (enm != NULL) { 4020 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 4021 ec->ec_flags |= ETHER_F_ALLMULTI; 4022 ETHER_UNLOCK(ec); 4023 /* 4024 * We must listen to a range of multicast addresses. 4025 * For now, just accept all multicasts, rather than 4026 * trying to set only those filter bits needed to match 4027 * the range. (At this time, the only use of address 4028 * ranges is for IP multicast routing, for which the 4029 * range is big enough to require all bits set.) 4030 */ 4031 goto allmulti; 4032 } 4033 4034 hash = wm_mchash(sc, enm->enm_addrlo); 4035 4036 reg = (hash >> 5); 4037 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 4038 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 4039 || (sc->sc_type == WM_T_PCH2) 4040 || (sc->sc_type == WM_T_PCH_LPT) 4041 || (sc->sc_type == WM_T_PCH_SPT) 4042 || (sc->sc_type == WM_T_PCH_CNP)) 4043 reg &= 0x1f; 4044 else 4045 reg &= 0x7f; 4046 bit = hash & 0x1f; 4047 4048 hash = CSR_READ(sc, mta_reg + (reg << 2)); 4049 hash |= 1U << bit; 4050 4051 if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) { 4052 /* 4053 * 82544 Errata 9: Certain register cannot be written 4054 * with particular alignments in PCI-X bus operation 4055 * (FCAH, MTA and VFTA). 4056 */ 4057 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2)); 4058 CSR_WRITE(sc, mta_reg + (reg << 2), hash); 4059 CSR_WRITE_FLUSH(sc); 4060 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit); 4061 CSR_WRITE_FLUSH(sc); 4062 } else { 4063 CSR_WRITE(sc, mta_reg + (reg << 2), hash); 4064 CSR_WRITE_FLUSH(sc); 4065 } 4066 4067 ETHER_NEXT_MULTI(step, enm); 4068 } 4069 ec->ec_flags &= ~ETHER_F_ALLMULTI; 4070 ETHER_UNLOCK(ec); 4071 4072 goto setit; 4073 4074 allmulti: 4075 sc->sc_rctl |= RCTL_MPE; 4076 4077 setit: 4078 if (sc->sc_type >= WM_T_PCH2) { 4079 if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0) 4080 && (ifp->if_mtu > ETHERMTU)) 4081 rv = wm_lv_jumbo_workaround_ich8lan(sc, true); 4082 else 4083 rv = wm_lv_jumbo_workaround_ich8lan(sc, false); 4084 if (rv != 0) 4085 device_printf(sc->sc_dev, 4086 "Failed to do workaround for jumbo frame.\n"); 4087 } 4088 4089 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl); 4090 } 4091 4092 /* Reset and init related */ 4093 4094 static void 4095 wm_set_vlan(struct wm_softc *sc) 4096 { 4097 4098 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 4099 device_xname(sc->sc_dev), __func__)); 4100 4101 /* Deal with VLAN enables. */ 4102 if (VLAN_ATTACHED(&sc->sc_ethercom)) 4103 sc->sc_ctrl |= CTRL_VME; 4104 else 4105 sc->sc_ctrl &= ~CTRL_VME; 4106 4107 /* Write the control registers. */ 4108 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4109 } 4110 4111 static void 4112 wm_set_pcie_completion_timeout(struct wm_softc *sc) 4113 { 4114 uint32_t gcr; 4115 pcireg_t ctrl2; 4116 4117 gcr = CSR_READ(sc, WMREG_GCR); 4118 4119 /* Only take action if timeout value is defaulted to 0 */ 4120 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0) 4121 goto out; 4122 4123 if ((gcr & GCR_CAP_VER2) == 0) { 4124 gcr |= GCR_CMPL_TMOUT_10MS; 4125 goto out; 4126 } 4127 4128 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 4129 sc->sc_pcixe_capoff + PCIE_DCSR2); 4130 ctrl2 |= WM_PCIE_DCSR2_16MS; 4131 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 4132 sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2); 4133 4134 out: 4135 /* Disable completion timeout resend */ 4136 gcr &= ~GCR_CMPL_TMOUT_RESEND; 4137 4138 CSR_WRITE(sc, WMREG_GCR, gcr); 4139 } 4140 4141 void 4142 wm_get_auto_rd_done(struct wm_softc *sc) 4143 { 4144 int i; 4145 4146 /* wait for eeprom to reload */ 4147 switch (sc->sc_type) { 4148 case WM_T_82571: 4149 case WM_T_82572: 4150 case WM_T_82573: 4151 case WM_T_82574: 4152 case WM_T_82583: 4153 case WM_T_82575: 4154 case WM_T_82576: 4155 case WM_T_82580: 4156 case WM_T_I350: 4157 case WM_T_I354: 4158 case WM_T_I210: 4159 case WM_T_I211: 4160 case WM_T_80003: 4161 case WM_T_ICH8: 4162 case WM_T_ICH9: 4163 for (i = 0; i < 10; i++) { 4164 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD) 4165 break; 4166 delay(1000); 4167 } 4168 if (i == 10) { 4169 log(LOG_ERR, "%s: auto read from eeprom failed to " 4170 "complete\n", device_xname(sc->sc_dev)); 4171 } 4172 break; 4173 default: 4174 break; 4175 } 4176 } 4177 4178 void 4179 wm_lan_init_done(struct wm_softc *sc) 4180 { 4181 uint32_t reg = 0; 4182 int i; 4183 4184 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 4185 device_xname(sc->sc_dev), __func__)); 4186 4187 /* Wait for eeprom to reload */ 4188 switch (sc->sc_type) { 4189 case WM_T_ICH10: 4190 case WM_T_PCH: 4191 case WM_T_PCH2: 4192 case WM_T_PCH_LPT: 4193 case WM_T_PCH_SPT: 4194 case WM_T_PCH_CNP: 4195 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) { 4196 reg = CSR_READ(sc, WMREG_STATUS); 4197 if ((reg & STATUS_LAN_INIT_DONE) != 0) 4198 break; 4199 delay(100); 4200 } 4201 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) { 4202 log(LOG_ERR, "%s: %s: lan_init_done failed to " 4203 "complete\n", device_xname(sc->sc_dev), __func__); 4204 } 4205 break; 4206 default: 4207 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev), 4208 __func__); 4209 break; 4210 } 4211 4212 reg &= ~STATUS_LAN_INIT_DONE; 4213 CSR_WRITE(sc, WMREG_STATUS, reg); 4214 } 4215 4216 void 4217 wm_get_cfg_done(struct wm_softc *sc) 4218 { 4219 int mask; 4220 uint32_t reg; 4221 int i; 4222 4223 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 4224 device_xname(sc->sc_dev), __func__)); 4225 4226 /* Wait for eeprom to reload */ 4227 switch (sc->sc_type) { 4228 case WM_T_82542_2_0: 4229 case WM_T_82542_2_1: 4230 /* null */ 4231 break; 4232 case WM_T_82543: 4233 case WM_T_82544: 4234 case WM_T_82540: 4235 case WM_T_82545: 4236 case WM_T_82545_3: 4237 case WM_T_82546: 4238 case WM_T_82546_3: 4239 case WM_T_82541: 4240 case WM_T_82541_2: 4241 case WM_T_82547: 4242 case WM_T_82547_2: 4243 case WM_T_82573: 4244 case WM_T_82574: 4245 case WM_T_82583: 4246 /* generic */ 4247 delay(10*1000); 4248 break; 4249 case WM_T_80003: 4250 case WM_T_82571: 4251 case WM_T_82572: 4252 case WM_T_82575: 4253 case WM_T_82576: 4254 case WM_T_82580: 4255 case WM_T_I350: 4256 case WM_T_I354: 4257 case WM_T_I210: 4258 case WM_T_I211: 4259 if (sc->sc_type == WM_T_82571) { 4260 /* Only 82571 shares port 0 */ 4261 mask = EEMNGCTL_CFGDONE_0; 4262 } else 4263 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid; 4264 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) { 4265 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask) 4266 break; 4267 delay(1000); 4268 } 4269 if (i >= WM_PHY_CFG_TIMEOUT) 4270 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n", 4271 device_xname(sc->sc_dev), __func__)); 4272 break; 4273 case WM_T_ICH8: 4274 case WM_T_ICH9: 4275 case WM_T_ICH10: 4276 case WM_T_PCH: 4277 case WM_T_PCH2: 4278 case WM_T_PCH_LPT: 4279 case WM_T_PCH_SPT: 4280 case WM_T_PCH_CNP: 4281 delay(10*1000); 4282 if (sc->sc_type >= WM_T_ICH10) 4283 wm_lan_init_done(sc); 4284 else 4285 wm_get_auto_rd_done(sc); 4286 4287 /* Clear PHY Reset Asserted bit */ 4288 reg = CSR_READ(sc, WMREG_STATUS); 4289 if ((reg & STATUS_PHYRA) != 0) 4290 CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA); 4291 break; 4292 default: 4293 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev), 4294 __func__); 4295 break; 4296 } 4297 } 4298 4299 int 4300 wm_phy_post_reset(struct wm_softc *sc) 4301 { 4302 device_t dev = sc->sc_dev; 4303 uint16_t reg; 4304 int rv = 0; 4305 4306 /* This function is only for ICH8 and newer. */ 4307 if (sc->sc_type < WM_T_ICH8) 4308 return 0; 4309 4310 if (wm_phy_resetisblocked(sc)) { 4311 /* XXX */ 4312 device_printf(dev, "PHY is blocked\n"); 4313 return -1; 4314 } 4315 4316 /* Allow time for h/w to get to quiescent state after reset */ 4317 delay(10*1000); 4318 4319 /* Perform any necessary post-reset workarounds */ 4320 if (sc->sc_type == WM_T_PCH) 4321 rv = wm_hv_phy_workarounds_ich8lan(sc); 4322 else if (sc->sc_type == WM_T_PCH2) 4323 rv = wm_lv_phy_workarounds_ich8lan(sc); 4324 if (rv != 0) 4325 return rv; 4326 4327 /* Clear the host wakeup bit after lcd reset */ 4328 if (sc->sc_type >= WM_T_PCH) { 4329 wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, ®); 4330 reg &= ~BM_WUC_HOST_WU_BIT; 4331 wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg); 4332 } 4333 4334 /* Configure the LCD with the extended configuration region in NVM */ 4335 if ((rv = wm_init_lcd_from_nvm(sc)) != 0) 4336 return rv; 4337 4338 /* Configure the LCD with the OEM bits in NVM */ 4339 rv = wm_oem_bits_config_ich8lan(sc, true); 4340 4341 if (sc->sc_type == WM_T_PCH2) { 4342 /* Ungate automatic PHY configuration on non-managed 82579 */ 4343 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) { 4344 delay(10 * 1000); 4345 wm_gate_hw_phy_config_ich8lan(sc, false); 4346 } 4347 /* Set EEE LPI Update Timer to 200usec */ 4348 rv = sc->phy.acquire(sc); 4349 if (rv) 4350 return rv; 4351 rv = wm_write_emi_reg_locked(dev, 4352 I82579_LPI_UPDATE_TIMER, 0x1387); 4353 sc->phy.release(sc); 4354 } 4355 4356 return rv; 4357 } 4358 4359 /* Only for PCH and newer */ 4360 static int 4361 wm_write_smbus_addr(struct wm_softc *sc) 4362 { 4363 uint32_t strap, freq; 4364 uint16_t phy_data; 4365 int rv; 4366 4367 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 4368 device_xname(sc->sc_dev), __func__)); 4369 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP); 4370 4371 strap = CSR_READ(sc, WMREG_STRAP); 4372 freq = __SHIFTOUT(strap, STRAP_FREQ); 4373 4374 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data); 4375 if (rv != 0) 4376 return -1; 4377 4378 phy_data &= ~HV_SMB_ADDR_ADDR; 4379 phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR); 4380 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; 4381 4382 if (sc->sc_phytype == WMPHY_I217) { 4383 /* Restore SMBus frequency */ 4384 if (freq --) { 4385 phy_data &= ~(HV_SMB_ADDR_FREQ_LOW 4386 | HV_SMB_ADDR_FREQ_HIGH); 4387 phy_data |= __SHIFTIN((freq & 0x01) != 0, 4388 HV_SMB_ADDR_FREQ_LOW); 4389 phy_data |= __SHIFTIN((freq & 0x02) != 0, 4390 HV_SMB_ADDR_FREQ_HIGH); 4391 } else 4392 DPRINTF(sc, WM_DEBUG_INIT, 4393 ("%s: %s Unsupported SMB frequency in PHY\n", 4394 device_xname(sc->sc_dev), __func__)); 4395 } 4396 4397 return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, 4398 phy_data); 4399 } 4400 4401 static int 4402 wm_init_lcd_from_nvm(struct wm_softc *sc) 4403 { 4404 uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg; 4405 uint16_t phy_page = 0; 4406 int rv = 0; 4407 4408 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 4409 device_xname(sc->sc_dev), __func__)); 4410 4411 switch (sc->sc_type) { 4412 case WM_T_ICH8: 4413 if ((sc->sc_phytype == WMPHY_UNKNOWN) 4414 || (sc->sc_phytype != WMPHY_IGP_3)) 4415 return 0; 4416 4417 if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT) 4418 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) { 4419 sw_cfg_mask = FEXTNVM_SW_CONFIG; 4420 break; 4421 } 4422 /* FALLTHROUGH */ 4423 case WM_T_PCH: 4424 case WM_T_PCH2: 4425 case WM_T_PCH_LPT: 4426 case WM_T_PCH_SPT: 4427 case WM_T_PCH_CNP: 4428 sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M; 4429 break; 4430 default: 4431 return 0; 4432 } 4433 4434 if ((rv = sc->phy.acquire(sc)) != 0) 4435 return rv; 4436 4437 reg = CSR_READ(sc, WMREG_FEXTNVM); 4438 if ((reg & sw_cfg_mask) == 0) 4439 goto release; 4440 4441 /* 4442 * Make sure HW does not configure LCD from PHY extended configuration 4443 * before SW configuration 4444 */ 4445 extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR); 4446 if ((sc->sc_type < WM_T_PCH2) 4447 && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0)) 4448 goto release; 4449 4450 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n", 4451 device_xname(sc->sc_dev), __func__)); 4452 /* word_addr is in DWORD */ 4453 word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1; 4454 4455 reg = CSR_READ(sc, WMREG_EXTCNFSIZE); 4456 cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH); 4457 if (cnf_size == 0) 4458 goto release; 4459 4460 if (((sc->sc_type == WM_T_PCH) 4461 && ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0)) 4462 || (sc->sc_type > WM_T_PCH)) { 4463 /* 4464 * HW configures the SMBus address and LEDs when the OEM and 4465 * LCD Write Enable bits are set in the NVM. When both NVM bits 4466 * are cleared, SW will configure them instead. 4467 */ 4468 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n", 4469 device_xname(sc->sc_dev), __func__)); 4470 if ((rv = wm_write_smbus_addr(sc)) != 0) 4471 goto release; 4472 4473 reg = CSR_READ(sc, WMREG_LEDCTL); 4474 rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, 4475 (uint16_t)reg); 4476 if (rv != 0) 4477 goto release; 4478 } 4479 4480 /* Configure LCD from extended configuration region. */ 4481 for (i = 0; i < cnf_size; i++) { 4482 uint16_t reg_data, reg_addr; 4483 4484 if (wm_nvm_read(sc, (word_addr + i * 2), 1, ®_data) != 0) 4485 goto release; 4486 4487 if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, ®_addr) !=0) 4488 goto release; 4489 4490 if (reg_addr == IGPHY_PAGE_SELECT) 4491 phy_page = reg_data; 4492 4493 reg_addr &= IGPHY_MAXREGADDR; 4494 reg_addr |= phy_page; 4495 4496 KASSERT(sc->phy.writereg_locked != NULL); 4497 rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr, 4498 reg_data); 4499 } 4500 4501 release: 4502 sc->phy.release(sc); 4503 return rv; 4504 } 4505 4506 /* 4507 * wm_oem_bits_config_ich8lan - SW-based LCD Configuration 4508 * @sc: pointer to the HW structure 4509 * @d0_state: boolean if entering d0 or d3 device state 4510 * 4511 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are 4512 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit 4513 * in NVM determines whether HW should configure LPLU and Gbe Disable. 4514 */ 4515 int 4516 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state) 4517 { 4518 uint32_t mac_reg; 4519 uint16_t oem_reg; 4520 int rv; 4521 4522 if (sc->sc_type < WM_T_PCH) 4523 return 0; 4524 4525 rv = sc->phy.acquire(sc); 4526 if (rv != 0) 4527 return rv; 4528 4529 if (sc->sc_type == WM_T_PCH) { 4530 mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR); 4531 if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0) 4532 goto release; 4533 } 4534 4535 mac_reg = CSR_READ(sc, WMREG_FEXTNVM); 4536 if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0) 4537 goto release; 4538 4539 mac_reg = CSR_READ(sc, WMREG_PHY_CTRL); 4540 4541 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg); 4542 if (rv != 0) 4543 goto release; 4544 oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU); 4545 4546 if (d0_state) { 4547 if ((mac_reg & PHY_CTRL_GBE_DIS) != 0) 4548 oem_reg |= HV_OEM_BITS_A1KDIS; 4549 if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0) 4550 oem_reg |= HV_OEM_BITS_LPLU; 4551 } else { 4552 if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS)) 4553 != 0) 4554 oem_reg |= HV_OEM_BITS_A1KDIS; 4555 if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU)) 4556 != 0) 4557 oem_reg |= HV_OEM_BITS_LPLU; 4558 } 4559 4560 /* Set Restart auto-neg to activate the bits */ 4561 if ((d0_state || (sc->sc_type != WM_T_PCH)) 4562 && (wm_phy_resetisblocked(sc) == false)) 4563 oem_reg |= HV_OEM_BITS_ANEGNOW; 4564 4565 rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg); 4566 4567 release: 4568 sc->phy.release(sc); 4569 4570 return rv; 4571 } 4572 4573 /* Init hardware bits */ 4574 void 4575 wm_initialize_hardware_bits(struct wm_softc *sc) 4576 { 4577 uint32_t tarc0, tarc1, reg; 4578 4579 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 4580 device_xname(sc->sc_dev), __func__)); 4581 4582 /* For 82571 variant, 80003 and ICHs */ 4583 if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583)) 4584 || (sc->sc_type >= WM_T_80003)) { 4585 4586 /* Transmit Descriptor Control 0 */ 4587 reg = CSR_READ(sc, WMREG_TXDCTL(0)); 4588 reg |= TXDCTL_COUNT_DESC; 4589 CSR_WRITE(sc, WMREG_TXDCTL(0), reg); 4590 4591 /* Transmit Descriptor Control 1 */ 4592 reg = CSR_READ(sc, WMREG_TXDCTL(1)); 4593 reg |= TXDCTL_COUNT_DESC; 4594 CSR_WRITE(sc, WMREG_TXDCTL(1), reg); 4595 4596 /* TARC0 */ 4597 tarc0 = CSR_READ(sc, WMREG_TARC0); 4598 switch (sc->sc_type) { 4599 case WM_T_82571: 4600 case WM_T_82572: 4601 case WM_T_82573: 4602 case WM_T_82574: 4603 case WM_T_82583: 4604 case WM_T_80003: 4605 /* Clear bits 30..27 */ 4606 tarc0 &= ~__BITS(30, 27); 4607 break; 4608 default: 4609 break; 4610 } 4611 4612 switch (sc->sc_type) { 4613 case WM_T_82571: 4614 case WM_T_82572: 4615 tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */ 4616 4617 tarc1 = CSR_READ(sc, WMREG_TARC1); 4618 tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */ 4619 tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */ 4620 /* 8257[12] Errata No.7 */ 4621 tarc1 |= __BIT(22); /* TARC1 bits 22 */ 4622 4623 /* TARC1 bit 28 */ 4624 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0) 4625 tarc1 &= ~__BIT(28); 4626 else 4627 tarc1 |= __BIT(28); 4628 CSR_WRITE(sc, WMREG_TARC1, tarc1); 4629 4630 /* 4631 * 8257[12] Errata No.13 4632 * Disable Dyamic Clock Gating. 4633 */ 4634 reg = CSR_READ(sc, WMREG_CTRL_EXT); 4635 reg &= ~CTRL_EXT_DMA_DYN_CLK; 4636 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 4637 break; 4638 case WM_T_82573: 4639 case WM_T_82574: 4640 case WM_T_82583: 4641 if ((sc->sc_type == WM_T_82574) 4642 || (sc->sc_type == WM_T_82583)) 4643 tarc0 |= __BIT(26); /* TARC0 bit 26 */ 4644 4645 /* Extended Device Control */ 4646 reg = CSR_READ(sc, WMREG_CTRL_EXT); 4647 reg &= ~__BIT(23); /* Clear bit 23 */ 4648 reg |= __BIT(22); /* Set bit 22 */ 4649 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 4650 4651 /* Device Control */ 4652 sc->sc_ctrl &= ~__BIT(29); /* Clear bit 29 */ 4653 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 4654 4655 /* PCIe Control Register */ 4656 /* 4657 * 82573 Errata (unknown). 4658 * 4659 * 82574 Errata 25 and 82583 Errata 12 4660 * "Dropped Rx Packets": 4661 * NVM Image Version 2.1.4 and newer has no this bug. 4662 */ 4663 reg = CSR_READ(sc, WMREG_GCR); 4664 reg |= GCR_L1_ACT_WITHOUT_L0S_RX; 4665 CSR_WRITE(sc, WMREG_GCR, reg); 4666 4667 if ((sc->sc_type == WM_T_82574) 4668 || (sc->sc_type == WM_T_82583)) { 4669 /* 4670 * Document says this bit must be set for 4671 * proper operation. 4672 */ 4673 reg = CSR_READ(sc, WMREG_GCR); 4674 reg |= __BIT(22); 4675 CSR_WRITE(sc, WMREG_GCR, reg); 4676 4677 /* 4678 * Apply workaround for hardware errata 4679 * documented in errata docs Fixes issue where 4680 * some error prone or unreliable PCIe 4681 * completions are occurring, particularly 4682 * with ASPM enabled. Without fix, issue can 4683 * cause Tx timeouts. 4684 */ 4685 reg = CSR_READ(sc, WMREG_GCR2); 4686 reg |= __BIT(0); 4687 CSR_WRITE(sc, WMREG_GCR2, reg); 4688 } 4689 break; 4690 case WM_T_80003: 4691 /* TARC0 */ 4692 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER) 4693 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) 4694 tarc0 &= ~__BIT(20); /* Clear bits 20 */ 4695 4696 /* TARC1 bit 28 */ 4697 tarc1 = CSR_READ(sc, WMREG_TARC1); 4698 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0) 4699 tarc1 &= ~__BIT(28); 4700 else 4701 tarc1 |= __BIT(28); 4702 CSR_WRITE(sc, WMREG_TARC1, tarc1); 4703 break; 4704 case WM_T_ICH8: 4705 case WM_T_ICH9: 4706 case WM_T_ICH10: 4707 case WM_T_PCH: 4708 case WM_T_PCH2: 4709 case WM_T_PCH_LPT: 4710 case WM_T_PCH_SPT: 4711 case WM_T_PCH_CNP: 4712 /* TARC0 */ 4713 if (sc->sc_type == WM_T_ICH8) { 4714 /* Set TARC0 bits 29 and 28 */ 4715 tarc0 |= __BITS(29, 28); 4716 } else if (sc->sc_type == WM_T_PCH_SPT) { 4717 tarc0 |= __BIT(29); 4718 /* 4719 * Drop bit 28. From Linux. 4720 * See I218/I219 spec update 4721 * "5. Buffer Overrun While the I219 is 4722 * Processing DMA Transactions" 4723 */ 4724 tarc0 &= ~__BIT(28); 4725 } 4726 /* Set TARC0 bits 23,24,26,27 */ 4727 tarc0 |= __BITS(27, 26) | __BITS(24, 23); 4728 4729 /* CTRL_EXT */ 4730 reg = CSR_READ(sc, WMREG_CTRL_EXT); 4731 reg |= __BIT(22); /* Set bit 22 */ 4732 /* 4733 * Enable PHY low-power state when MAC is at D3 4734 * w/o WoL 4735 */ 4736 if (sc->sc_type >= WM_T_PCH) 4737 reg |= CTRL_EXT_PHYPDEN; 4738 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 4739 4740 /* TARC1 */ 4741 tarc1 = CSR_READ(sc, WMREG_TARC1); 4742 /* bit 28 */ 4743 if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0) 4744 tarc1 &= ~__BIT(28); 4745 else 4746 tarc1 |= __BIT(28); 4747 tarc1 |= __BIT(24) | __BIT(26) | __BIT(30); 4748 CSR_WRITE(sc, WMREG_TARC1, tarc1); 4749 4750 /* Device Status */ 4751 if (sc->sc_type == WM_T_ICH8) { 4752 reg = CSR_READ(sc, WMREG_STATUS); 4753 reg &= ~__BIT(31); 4754 CSR_WRITE(sc, WMREG_STATUS, reg); 4755 4756 } 4757 4758 /* IOSFPC */ 4759 if (sc->sc_type == WM_T_PCH_SPT) { 4760 reg = CSR_READ(sc, WMREG_IOSFPC); 4761 reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */ 4762 CSR_WRITE(sc, WMREG_IOSFPC, reg); 4763 } 4764 /* 4765 * Work-around descriptor data corruption issue during 4766 * NFS v2 UDP traffic, just disable the NFS filtering 4767 * capability. 4768 */ 4769 reg = CSR_READ(sc, WMREG_RFCTL); 4770 reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS; 4771 CSR_WRITE(sc, WMREG_RFCTL, reg); 4772 break; 4773 default: 4774 break; 4775 } 4776 CSR_WRITE(sc, WMREG_TARC0, tarc0); 4777 4778 switch (sc->sc_type) { 4779 /* 4780 * 8257[12] Errata No.52, 82573 Errata No.43 and some others. 4781 * Avoid RSS Hash Value bug. 4782 */ 4783 case WM_T_82571: 4784 case WM_T_82572: 4785 case WM_T_82573: 4786 case WM_T_80003: 4787 case WM_T_ICH8: 4788 reg = CSR_READ(sc, WMREG_RFCTL); 4789 reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS; 4790 CSR_WRITE(sc, WMREG_RFCTL, reg); 4791 break; 4792 case WM_T_82574: 4793 /* Use extened Rx descriptor. */ 4794 reg = CSR_READ(sc, WMREG_RFCTL); 4795 reg |= WMREG_RFCTL_EXSTEN; 4796 CSR_WRITE(sc, WMREG_RFCTL, reg); 4797 break; 4798 default: 4799 break; 4800 } 4801 } else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) { 4802 /* 4803 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24, 4804 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11: 4805 * "Certain Malformed IPv6 Extension Headers are Not Processed 4806 * Correctly by the Device" 4807 * 4808 * I354(C2000) Errata AVR53: 4809 * "Malformed IPv6 Extension Headers May Result in LAN Device 4810 * Hang" 4811 */ 4812 reg = CSR_READ(sc, WMREG_RFCTL); 4813 reg |= WMREG_RFCTL_IPV6EXDIS; 4814 CSR_WRITE(sc, WMREG_RFCTL, reg); 4815 } 4816 } 4817 4818 static uint32_t 4819 wm_rxpbs_adjust_82580(uint32_t val) 4820 { 4821 uint32_t rv = 0; 4822 4823 if (val < __arraycount(wm_82580_rxpbs_table)) 4824 rv = wm_82580_rxpbs_table[val]; 4825 4826 return rv; 4827 } 4828 4829 /* 4830 * wm_reset_phy: 4831 * 4832 * generic PHY reset function. 4833 * Same as e1000_phy_hw_reset_generic() 4834 */ 4835 static int 4836 wm_reset_phy(struct wm_softc *sc) 4837 { 4838 uint32_t reg; 4839 4840 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 4841 device_xname(sc->sc_dev), __func__)); 4842 if (wm_phy_resetisblocked(sc)) 4843 return -1; 4844 4845 sc->phy.acquire(sc); 4846 4847 reg = CSR_READ(sc, WMREG_CTRL); 4848 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET); 4849 CSR_WRITE_FLUSH(sc); 4850 4851 delay(sc->phy.reset_delay_us); 4852 4853 CSR_WRITE(sc, WMREG_CTRL, reg); 4854 CSR_WRITE_FLUSH(sc); 4855 4856 delay(150); 4857 4858 sc->phy.release(sc); 4859 4860 wm_get_cfg_done(sc); 4861 wm_phy_post_reset(sc); 4862 4863 return 0; 4864 } 4865 4866 /* 4867 * wm_flush_desc_rings - remove all descriptors from the descriptor rings. 4868 * 4869 * In i219, the descriptor rings must be emptied before resetting the HW 4870 * or before changing the device state to D3 during runtime (runtime PM). 4871 * 4872 * Failure to do this will cause the HW to enter a unit hang state which can 4873 * only be released by PCI reset on the device. 4874 * 4875 * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only. 4876 */ 4877 static void 4878 wm_flush_desc_rings(struct wm_softc *sc) 4879 { 4880 pcireg_t preg; 4881 uint32_t reg; 4882 struct wm_txqueue *txq; 4883 wiseman_txdesc_t *txd; 4884 int nexttx; 4885 uint32_t rctl; 4886 4887 /* First, disable MULR fix in FEXTNVM11 */ 4888 reg = CSR_READ(sc, WMREG_FEXTNVM11); 4889 reg |= FEXTNVM11_DIS_MULRFIX; 4890 CSR_WRITE(sc, WMREG_FEXTNVM11, reg); 4891 4892 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS); 4893 reg = CSR_READ(sc, WMREG_TDLEN(0)); 4894 if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0)) 4895 return; 4896 4897 /* 4898 * Remove all descriptors from the tx_ring. 4899 * 4900 * We want to clear all pending descriptors from the TX ring. Zeroing 4901 * happens when the HW reads the regs. We assign the ring itself as 4902 * the data of the next descriptor. We don't care about the data we are 4903 * about to reset the HW. 4904 */ 4905 #ifdef WM_DEBUG 4906 device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg); 4907 #endif 4908 reg = CSR_READ(sc, WMREG_TCTL); 4909 CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN); 4910 4911 txq = &sc->sc_queue[0].wmq_txq; 4912 nexttx = txq->txq_next; 4913 txd = &txq->txq_descs[nexttx]; 4914 wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma); 4915 txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512); 4916 txd->wtx_fields.wtxu_status = 0; 4917 txd->wtx_fields.wtxu_options = 0; 4918 txd->wtx_fields.wtxu_vlan = 0; 4919 4920 bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0, 4921 BUS_SPACE_BARRIER_WRITE); 4922 4923 txq->txq_next = WM_NEXTTX(txq, txq->txq_next); 4924 CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next); 4925 bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0, 4926 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 4927 delay(250); 4928 4929 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS); 4930 if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0) 4931 return; 4932 4933 /* 4934 * Mark all descriptors in the RX ring as consumed and disable the 4935 * rx ring. 4936 */ 4937 #ifdef WM_DEBUG 4938 device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg); 4939 #endif 4940 rctl = CSR_READ(sc, WMREG_RCTL); 4941 CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN); 4942 CSR_WRITE_FLUSH(sc); 4943 delay(150); 4944 4945 reg = CSR_READ(sc, WMREG_RXDCTL(0)); 4946 /* Zero the lower 14 bits (prefetch and host thresholds) */ 4947 reg &= 0xffffc000; 4948 /* 4949 * Update thresholds: prefetch threshold to 31, host threshold 4950 * to 1 and make sure the granularity is "descriptors" and not 4951 * "cache lines" 4952 */ 4953 reg |= (0x1f | (1 << 8) | RXDCTL_GRAN); 4954 CSR_WRITE(sc, WMREG_RXDCTL(0), reg); 4955 4956 /* Momentarily enable the RX ring for the changes to take effect */ 4957 CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN); 4958 CSR_WRITE_FLUSH(sc); 4959 delay(150); 4960 CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN); 4961 } 4962 4963 /* 4964 * wm_reset: 4965 * 4966 * Reset the i82542 chip. 4967 */ 4968 static void 4969 wm_reset(struct wm_softc *sc) 4970 { 4971 int phy_reset = 0; 4972 int i, error = 0; 4973 uint32_t reg; 4974 uint16_t kmreg; 4975 int rv; 4976 4977 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 4978 device_xname(sc->sc_dev), __func__)); 4979 KASSERT(sc->sc_type != 0); 4980 4981 /* 4982 * Allocate on-chip memory according to the MTU size. 4983 * The Packet Buffer Allocation register must be written 4984 * before the chip is reset. 4985 */ 4986 switch (sc->sc_type) { 4987 case WM_T_82547: 4988 case WM_T_82547_2: 4989 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? 4990 PBA_22K : PBA_30K; 4991 for (i = 0; i < sc->sc_nqueues; i++) { 4992 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; 4993 txq->txq_fifo_head = 0; 4994 txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT; 4995 txq->txq_fifo_size = 4996 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT; 4997 txq->txq_fifo_stall = 0; 4998 } 4999 break; 5000 case WM_T_82571: 5001 case WM_T_82572: 5002 case WM_T_82575: /* XXX need special handing for jumbo frames */ 5003 case WM_T_80003: 5004 sc->sc_pba = PBA_32K; 5005 break; 5006 case WM_T_82573: 5007 sc->sc_pba = PBA_12K; 5008 break; 5009 case WM_T_82574: 5010 case WM_T_82583: 5011 sc->sc_pba = PBA_20K; 5012 break; 5013 case WM_T_82576: 5014 sc->sc_pba = CSR_READ(sc, WMREG_RXPBS); 5015 sc->sc_pba &= RXPBS_SIZE_MASK_82576; 5016 break; 5017 case WM_T_82580: 5018 case WM_T_I350: 5019 case WM_T_I354: 5020 sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS)); 5021 break; 5022 case WM_T_I210: 5023 case WM_T_I211: 5024 sc->sc_pba = PBA_34K; 5025 break; 5026 case WM_T_ICH8: 5027 /* Workaround for a bit corruption issue in FIFO memory */ 5028 sc->sc_pba = PBA_8K; 5029 CSR_WRITE(sc, WMREG_PBS, PBA_16K); 5030 break; 5031 case WM_T_ICH9: 5032 case WM_T_ICH10: 5033 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ? 5034 PBA_14K : PBA_10K; 5035 break; 5036 case WM_T_PCH: 5037 case WM_T_PCH2: /* XXX 14K? */ 5038 case WM_T_PCH_LPT: 5039 case WM_T_PCH_SPT: 5040 case WM_T_PCH_CNP: 5041 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ? 5042 PBA_12K : PBA_26K; 5043 break; 5044 default: 5045 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ? 5046 PBA_40K : PBA_48K; 5047 break; 5048 } 5049 /* 5050 * Only old or non-multiqueue devices have the PBA register 5051 * XXX Need special handling for 82575. 5052 */ 5053 if (((sc->sc_flags & WM_F_NEWQUEUE) == 0) 5054 || (sc->sc_type == WM_T_82575)) 5055 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba); 5056 5057 /* Prevent the PCI-E bus from sticking */ 5058 if (sc->sc_flags & WM_F_PCIE) { 5059 int timeout = 800; 5060 5061 sc->sc_ctrl |= CTRL_GIO_M_DIS; 5062 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 5063 5064 while (timeout--) { 5065 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) 5066 == 0) 5067 break; 5068 delay(100); 5069 } 5070 if (timeout == 0) 5071 device_printf(sc->sc_dev, 5072 "failed to disable busmastering\n"); 5073 } 5074 5075 /* Set the completion timeout for interface */ 5076 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 5077 || (sc->sc_type == WM_T_82580) 5078 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354) 5079 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) 5080 wm_set_pcie_completion_timeout(sc); 5081 5082 /* Clear interrupt */ 5083 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 5084 if (wm_is_using_msix(sc)) { 5085 if (sc->sc_type != WM_T_82574) { 5086 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU); 5087 CSR_WRITE(sc, WMREG_EIAC, 0); 5088 } else 5089 CSR_WRITE(sc, WMREG_EIAC_82574, 0); 5090 } 5091 5092 /* Stop the transmit and receive processes. */ 5093 CSR_WRITE(sc, WMREG_RCTL, 0); 5094 sc->sc_rctl &= ~RCTL_EN; 5095 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP); 5096 CSR_WRITE_FLUSH(sc); 5097 5098 /* XXX set_tbi_sbp_82543() */ 5099 5100 delay(10*1000); 5101 5102 /* Must acquire the MDIO ownership before MAC reset */ 5103 switch (sc->sc_type) { 5104 case WM_T_82573: 5105 case WM_T_82574: 5106 case WM_T_82583: 5107 error = wm_get_hw_semaphore_82573(sc); 5108 break; 5109 default: 5110 break; 5111 } 5112 5113 /* 5114 * 82541 Errata 29? & 82547 Errata 28? 5115 * See also the description about PHY_RST bit in CTRL register 5116 * in 8254x_GBe_SDM.pdf. 5117 */ 5118 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) { 5119 CSR_WRITE(sc, WMREG_CTRL, 5120 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET); 5121 CSR_WRITE_FLUSH(sc); 5122 delay(5000); 5123 } 5124 5125 switch (sc->sc_type) { 5126 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */ 5127 case WM_T_82541: 5128 case WM_T_82541_2: 5129 case WM_T_82547: 5130 case WM_T_82547_2: 5131 /* 5132 * On some chipsets, a reset through a memory-mapped write 5133 * cycle can cause the chip to reset before completing the 5134 * write cycle. This causes major headache that can be avoided 5135 * by issuing the reset via indirect register writes through 5136 * I/O space. 5137 * 5138 * So, if we successfully mapped the I/O BAR at attach time, 5139 * use that. Otherwise, try our luck with a memory-mapped 5140 * reset. 5141 */ 5142 if (sc->sc_flags & WM_F_IOH_VALID) 5143 wm_io_write(sc, WMREG_CTRL, CTRL_RST); 5144 else 5145 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST); 5146 break; 5147 case WM_T_82545_3: 5148 case WM_T_82546_3: 5149 /* Use the shadow control register on these chips. */ 5150 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST); 5151 break; 5152 case WM_T_80003: 5153 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST; 5154 sc->phy.acquire(sc); 5155 CSR_WRITE(sc, WMREG_CTRL, reg); 5156 sc->phy.release(sc); 5157 break; 5158 case WM_T_ICH8: 5159 case WM_T_ICH9: 5160 case WM_T_ICH10: 5161 case WM_T_PCH: 5162 case WM_T_PCH2: 5163 case WM_T_PCH_LPT: 5164 case WM_T_PCH_SPT: 5165 case WM_T_PCH_CNP: 5166 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST; 5167 if (wm_phy_resetisblocked(sc) == false) { 5168 /* 5169 * Gate automatic PHY configuration by hardware on 5170 * non-managed 82579 5171 */ 5172 if ((sc->sc_type == WM_T_PCH2) 5173 && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) 5174 == 0)) 5175 wm_gate_hw_phy_config_ich8lan(sc, true); 5176 5177 reg |= CTRL_PHY_RESET; 5178 phy_reset = 1; 5179 } else 5180 device_printf(sc->sc_dev, "XXX reset is blocked!!!\n"); 5181 sc->phy.acquire(sc); 5182 CSR_WRITE(sc, WMREG_CTRL, reg); 5183 /* Don't insert a completion barrier when reset */ 5184 delay(20*1000); 5185 mutex_exit(sc->sc_ich_phymtx); 5186 break; 5187 case WM_T_82580: 5188 case WM_T_I350: 5189 case WM_T_I354: 5190 case WM_T_I210: 5191 case WM_T_I211: 5192 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST); 5193 if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII) 5194 CSR_WRITE_FLUSH(sc); 5195 delay(5000); 5196 break; 5197 case WM_T_82542_2_0: 5198 case WM_T_82542_2_1: 5199 case WM_T_82543: 5200 case WM_T_82540: 5201 case WM_T_82545: 5202 case WM_T_82546: 5203 case WM_T_82571: 5204 case WM_T_82572: 5205 case WM_T_82573: 5206 case WM_T_82574: 5207 case WM_T_82575: 5208 case WM_T_82576: 5209 case WM_T_82583: 5210 default: 5211 /* Everything else can safely use the documented method. */ 5212 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST); 5213 break; 5214 } 5215 5216 /* Must release the MDIO ownership after MAC reset */ 5217 switch (sc->sc_type) { 5218 case WM_T_82573: 5219 case WM_T_82574: 5220 case WM_T_82583: 5221 if (error == 0) 5222 wm_put_hw_semaphore_82573(sc); 5223 break; 5224 default: 5225 break; 5226 } 5227 5228 /* Set Phy Config Counter to 50msec */ 5229 if (sc->sc_type == WM_T_PCH2) { 5230 reg = CSR_READ(sc, WMREG_FEXTNVM3); 5231 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK; 5232 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS; 5233 CSR_WRITE(sc, WMREG_FEXTNVM3, reg); 5234 } 5235 5236 if (phy_reset != 0) 5237 wm_get_cfg_done(sc); 5238 5239 /* Reload EEPROM */ 5240 switch (sc->sc_type) { 5241 case WM_T_82542_2_0: 5242 case WM_T_82542_2_1: 5243 case WM_T_82543: 5244 case WM_T_82544: 5245 delay(10); 5246 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; 5247 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 5248 CSR_WRITE_FLUSH(sc); 5249 delay(2000); 5250 break; 5251 case WM_T_82540: 5252 case WM_T_82545: 5253 case WM_T_82545_3: 5254 case WM_T_82546: 5255 case WM_T_82546_3: 5256 delay(5*1000); 5257 /* XXX Disable HW ARPs on ASF enabled adapters */ 5258 break; 5259 case WM_T_82541: 5260 case WM_T_82541_2: 5261 case WM_T_82547: 5262 case WM_T_82547_2: 5263 delay(20000); 5264 /* XXX Disable HW ARPs on ASF enabled adapters */ 5265 break; 5266 case WM_T_82571: 5267 case WM_T_82572: 5268 case WM_T_82573: 5269 case WM_T_82574: 5270 case WM_T_82583: 5271 if (sc->sc_flags & WM_F_EEPROM_FLASH) { 5272 delay(10); 5273 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST; 5274 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 5275 CSR_WRITE_FLUSH(sc); 5276 } 5277 /* check EECD_EE_AUTORD */ 5278 wm_get_auto_rd_done(sc); 5279 /* 5280 * Phy configuration from NVM just starts after EECD_AUTO_RD 5281 * is set. 5282 */ 5283 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574) 5284 || (sc->sc_type == WM_T_82583)) 5285 delay(25*1000); 5286 break; 5287 case WM_T_82575: 5288 case WM_T_82576: 5289 case WM_T_82580: 5290 case WM_T_I350: 5291 case WM_T_I354: 5292 case WM_T_I210: 5293 case WM_T_I211: 5294 case WM_T_80003: 5295 /* check EECD_EE_AUTORD */ 5296 wm_get_auto_rd_done(sc); 5297 break; 5298 case WM_T_ICH8: 5299 case WM_T_ICH9: 5300 case WM_T_ICH10: 5301 case WM_T_PCH: 5302 case WM_T_PCH2: 5303 case WM_T_PCH_LPT: 5304 case WM_T_PCH_SPT: 5305 case WM_T_PCH_CNP: 5306 break; 5307 default: 5308 panic("%s: unknown type\n", __func__); 5309 } 5310 5311 /* Check whether EEPROM is present or not */ 5312 switch (sc->sc_type) { 5313 case WM_T_82575: 5314 case WM_T_82576: 5315 case WM_T_82580: 5316 case WM_T_I350: 5317 case WM_T_I354: 5318 case WM_T_ICH8: 5319 case WM_T_ICH9: 5320 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) { 5321 /* Not found */ 5322 sc->sc_flags |= WM_F_EEPROM_INVALID; 5323 if (sc->sc_type == WM_T_82575) 5324 wm_reset_init_script_82575(sc); 5325 } 5326 break; 5327 default: 5328 break; 5329 } 5330 5331 if (phy_reset != 0) 5332 wm_phy_post_reset(sc); 5333 5334 if ((sc->sc_type == WM_T_82580) 5335 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) { 5336 /* Clear global device reset status bit */ 5337 CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET); 5338 } 5339 5340 /* Clear any pending interrupt events. */ 5341 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 5342 reg = CSR_READ(sc, WMREG_ICR); 5343 if (wm_is_using_msix(sc)) { 5344 if (sc->sc_type != WM_T_82574) { 5345 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU); 5346 CSR_WRITE(sc, WMREG_EIAC, 0); 5347 } else 5348 CSR_WRITE(sc, WMREG_EIAC_82574, 0); 5349 } 5350 5351 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 5352 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 5353 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) 5354 || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){ 5355 reg = CSR_READ(sc, WMREG_KABGTXD); 5356 reg |= KABGTXD_BGSQLBIAS; 5357 CSR_WRITE(sc, WMREG_KABGTXD, reg); 5358 } 5359 5360 /* Reload sc_ctrl */ 5361 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); 5362 5363 wm_set_eee(sc); 5364 5365 /* 5366 * For PCH, this write will make sure that any noise will be detected 5367 * as a CRC error and be dropped rather than show up as a bad packet 5368 * to the DMA engine 5369 */ 5370 if (sc->sc_type == WM_T_PCH) 5371 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565); 5372 5373 if (sc->sc_type >= WM_T_82544) 5374 CSR_WRITE(sc, WMREG_WUC, 0); 5375 5376 if (sc->sc_type < WM_T_82575) 5377 wm_disable_aspm(sc); /* Workaround for some chips */ 5378 5379 wm_reset_mdicnfg_82580(sc); 5380 5381 if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0) 5382 wm_pll_workaround_i210(sc); 5383 5384 if (sc->sc_type == WM_T_80003) { 5385 /* Default to TRUE to enable the MDIC W/A */ 5386 sc->sc_flags |= WM_F_80003_MDIC_WA; 5387 5388 rv = wm_kmrn_readreg(sc, 5389 KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg); 5390 if (rv == 0) { 5391 if ((kmreg & KUMCTRLSTA_OPMODE_MASK) 5392 == KUMCTRLSTA_OPMODE_INBAND_MDIO) 5393 sc->sc_flags &= ~WM_F_80003_MDIC_WA; 5394 else 5395 sc->sc_flags |= WM_F_80003_MDIC_WA; 5396 } 5397 } 5398 } 5399 5400 /* 5401 * wm_add_rxbuf: 5402 * 5403 * Add a receive buffer to the indiciated descriptor. 5404 */ 5405 static int 5406 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx) 5407 { 5408 struct wm_softc *sc = rxq->rxq_sc; 5409 struct wm_rxsoft *rxs = &rxq->rxq_soft[idx]; 5410 struct mbuf *m; 5411 int error; 5412 5413 KASSERT(mutex_owned(rxq->rxq_lock)); 5414 5415 MGETHDR(m, M_DONTWAIT, MT_DATA); 5416 if (m == NULL) 5417 return ENOBUFS; 5418 5419 MCLGET(m, M_DONTWAIT); 5420 if ((m->m_flags & M_EXT) == 0) { 5421 m_freem(m); 5422 return ENOBUFS; 5423 } 5424 5425 if (rxs->rxs_mbuf != NULL) 5426 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 5427 5428 rxs->rxs_mbuf = m; 5429 5430 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 5431 /* 5432 * Cannot use bus_dmamap_load_mbuf() here because m_data may be 5433 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync(). 5434 */ 5435 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf, 5436 m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 5437 if (error) { 5438 /* XXX XXX XXX */ 5439 aprint_error_dev(sc->sc_dev, 5440 "unable to load rx DMA map %d, error = %d\n", idx, error); 5441 panic("wm_add_rxbuf"); 5442 } 5443 5444 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 5445 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 5446 5447 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 5448 if ((sc->sc_rctl & RCTL_EN) != 0) 5449 wm_init_rxdesc(rxq, idx); 5450 } else 5451 wm_init_rxdesc(rxq, idx); 5452 5453 return 0; 5454 } 5455 5456 /* 5457 * wm_rxdrain: 5458 * 5459 * Drain the receive queue. 5460 */ 5461 static void 5462 wm_rxdrain(struct wm_rxqueue *rxq) 5463 { 5464 struct wm_softc *sc = rxq->rxq_sc; 5465 struct wm_rxsoft *rxs; 5466 int i; 5467 5468 KASSERT(mutex_owned(rxq->rxq_lock)); 5469 5470 for (i = 0; i < WM_NRXDESC; i++) { 5471 rxs = &rxq->rxq_soft[i]; 5472 if (rxs->rxs_mbuf != NULL) { 5473 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 5474 m_freem(rxs->rxs_mbuf); 5475 rxs->rxs_mbuf = NULL; 5476 } 5477 } 5478 } 5479 5480 /* 5481 * Setup registers for RSS. 5482 * 5483 * XXX not yet VMDq support 5484 */ 5485 static void 5486 wm_init_rss(struct wm_softc *sc) 5487 { 5488 uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS]; 5489 int i; 5490 5491 CTASSERT(sizeof(rss_key) == RSS_KEYSIZE); 5492 5493 for (i = 0; i < RETA_NUM_ENTRIES; i++) { 5494 unsigned int qid, reta_ent; 5495 5496 qid = i % sc->sc_nqueues; 5497 switch (sc->sc_type) { 5498 case WM_T_82574: 5499 reta_ent = __SHIFTIN(qid, 5500 RETA_ENT_QINDEX_MASK_82574); 5501 break; 5502 case WM_T_82575: 5503 reta_ent = __SHIFTIN(qid, 5504 RETA_ENT_QINDEX1_MASK_82575); 5505 break; 5506 default: 5507 reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK); 5508 break; 5509 } 5510 5511 reta_reg = CSR_READ(sc, WMREG_RETA_Q(i)); 5512 reta_reg &= ~RETA_ENTRY_MASK_Q(i); 5513 reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i)); 5514 CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg); 5515 } 5516 5517 rss_getkey((uint8_t *)rss_key); 5518 for (i = 0; i < RSSRK_NUM_REGS; i++) 5519 CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]); 5520 5521 if (sc->sc_type == WM_T_82574) 5522 mrqc = MRQC_ENABLE_RSS_MQ_82574; 5523 else 5524 mrqc = MRQC_ENABLE_RSS_MQ; 5525 5526 /* 5527 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata. 5528 * See IPV6EXDIS bit in wm_initialize_hardware_bits(). 5529 */ 5530 mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP); 5531 mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP); 5532 #if 0 5533 mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP); 5534 mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX; 5535 #endif 5536 mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX; 5537 5538 CSR_WRITE(sc, WMREG_MRQC, mrqc); 5539 } 5540 5541 /* 5542 * Adjust TX and RX queue numbers which the system actulally uses. 5543 * 5544 * The numbers are affected by below parameters. 5545 * - The nubmer of hardware queues 5546 * - The number of MSI-X vectors (= "nvectors" argument) 5547 * - ncpu 5548 */ 5549 static void 5550 wm_adjust_qnum(struct wm_softc *sc, int nvectors) 5551 { 5552 int hw_ntxqueues, hw_nrxqueues, hw_nqueues; 5553 5554 if (nvectors < 2) { 5555 sc->sc_nqueues = 1; 5556 return; 5557 } 5558 5559 switch (sc->sc_type) { 5560 case WM_T_82572: 5561 hw_ntxqueues = 2; 5562 hw_nrxqueues = 2; 5563 break; 5564 case WM_T_82574: 5565 hw_ntxqueues = 2; 5566 hw_nrxqueues = 2; 5567 break; 5568 case WM_T_82575: 5569 hw_ntxqueues = 4; 5570 hw_nrxqueues = 4; 5571 break; 5572 case WM_T_82576: 5573 hw_ntxqueues = 16; 5574 hw_nrxqueues = 16; 5575 break; 5576 case WM_T_82580: 5577 case WM_T_I350: 5578 case WM_T_I354: 5579 hw_ntxqueues = 8; 5580 hw_nrxqueues = 8; 5581 break; 5582 case WM_T_I210: 5583 hw_ntxqueues = 4; 5584 hw_nrxqueues = 4; 5585 break; 5586 case WM_T_I211: 5587 hw_ntxqueues = 2; 5588 hw_nrxqueues = 2; 5589 break; 5590 /* 5591 * As below ethernet controllers does not support MSI-X, 5592 * this driver let them not use multiqueue. 5593 * - WM_T_80003 5594 * - WM_T_ICH8 5595 * - WM_T_ICH9 5596 * - WM_T_ICH10 5597 * - WM_T_PCH 5598 * - WM_T_PCH2 5599 * - WM_T_PCH_LPT 5600 */ 5601 default: 5602 hw_ntxqueues = 1; 5603 hw_nrxqueues = 1; 5604 break; 5605 } 5606 5607 hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues); 5608 5609 /* 5610 * As queues more than MSI-X vectors cannot improve scaling, we limit 5611 * the number of queues used actually. 5612 */ 5613 if (nvectors < hw_nqueues + 1) 5614 sc->sc_nqueues = nvectors - 1; 5615 else 5616 sc->sc_nqueues = hw_nqueues; 5617 5618 /* 5619 * As queues more then cpus cannot improve scaling, we limit 5620 * the number of queues used actually. 5621 */ 5622 if (ncpu < sc->sc_nqueues) 5623 sc->sc_nqueues = ncpu; 5624 } 5625 5626 static inline bool 5627 wm_is_using_msix(struct wm_softc *sc) 5628 { 5629 5630 return (sc->sc_nintrs > 1); 5631 } 5632 5633 static inline bool 5634 wm_is_using_multiqueue(struct wm_softc *sc) 5635 { 5636 5637 return (sc->sc_nqueues > 1); 5638 } 5639 5640 static int 5641 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx) 5642 { 5643 struct wm_queue *wmq = &sc->sc_queue[qidx]; 5644 5645 wmq->wmq_id = qidx; 5646 wmq->wmq_intr_idx = intr_idx; 5647 wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS, 5648 wm_handle_queue, wmq); 5649 if (wmq->wmq_si != NULL) 5650 return 0; 5651 5652 aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n", 5653 wmq->wmq_id); 5654 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]); 5655 sc->sc_ihs[wmq->wmq_intr_idx] = NULL; 5656 return ENOMEM; 5657 } 5658 5659 /* 5660 * Both single interrupt MSI and INTx can use this function. 5661 */ 5662 static int 5663 wm_setup_legacy(struct wm_softc *sc) 5664 { 5665 pci_chipset_tag_t pc = sc->sc_pc; 5666 const char *intrstr = NULL; 5667 char intrbuf[PCI_INTRSTR_LEN]; 5668 int error; 5669 5670 error = wm_alloc_txrx_queues(sc); 5671 if (error) { 5672 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n", 5673 error); 5674 return ENOMEM; 5675 } 5676 intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf, 5677 sizeof(intrbuf)); 5678 #ifdef WM_MPSAFE 5679 pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true); 5680 #endif 5681 sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0], 5682 IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev)); 5683 if (sc->sc_ihs[0] == NULL) { 5684 aprint_error_dev(sc->sc_dev,"unable to establish %s\n", 5685 (pci_intr_type(pc, sc->sc_intrs[0]) 5686 == PCI_INTR_TYPE_MSI) ? "MSI" : "INTx"); 5687 return ENOMEM; 5688 } 5689 5690 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); 5691 sc->sc_nintrs = 1; 5692 5693 return wm_softint_establish_queue(sc, 0, 0); 5694 } 5695 5696 static int 5697 wm_setup_msix(struct wm_softc *sc) 5698 { 5699 void *vih; 5700 kcpuset_t *affinity; 5701 int qidx, error, intr_idx, txrx_established; 5702 pci_chipset_tag_t pc = sc->sc_pc; 5703 const char *intrstr = NULL; 5704 char intrbuf[PCI_INTRSTR_LEN]; 5705 char intr_xname[INTRDEVNAMEBUF]; 5706 5707 if (sc->sc_nqueues < ncpu) { 5708 /* 5709 * To avoid other devices' interrupts, the affinity of Tx/Rx 5710 * interrupts start from CPU#1. 5711 */ 5712 sc->sc_affinity_offset = 1; 5713 } else { 5714 /* 5715 * In this case, this device use all CPUs. So, we unify 5716 * affinitied cpu_index to msix vector number for readability. 5717 */ 5718 sc->sc_affinity_offset = 0; 5719 } 5720 5721 error = wm_alloc_txrx_queues(sc); 5722 if (error) { 5723 aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n", 5724 error); 5725 return ENOMEM; 5726 } 5727 5728 kcpuset_create(&affinity, false); 5729 intr_idx = 0; 5730 5731 /* 5732 * TX and RX 5733 */ 5734 txrx_established = 0; 5735 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) { 5736 struct wm_queue *wmq = &sc->sc_queue[qidx]; 5737 int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu; 5738 5739 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf, 5740 sizeof(intrbuf)); 5741 #ifdef WM_MPSAFE 5742 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], 5743 PCI_INTR_MPSAFE, true); 5744 #endif 5745 memset(intr_xname, 0, sizeof(intr_xname)); 5746 snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d", 5747 device_xname(sc->sc_dev), qidx); 5748 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx], 5749 IPL_NET, wm_txrxintr_msix, wmq, intr_xname); 5750 if (vih == NULL) { 5751 aprint_error_dev(sc->sc_dev, 5752 "unable to establish MSI-X(for TX and RX)%s%s\n", 5753 intrstr ? " at " : "", 5754 intrstr ? intrstr : ""); 5755 5756 goto fail; 5757 } 5758 kcpuset_zero(affinity); 5759 /* Round-robin affinity */ 5760 kcpuset_set(affinity, affinity_to); 5761 error = interrupt_distribute(vih, affinity, NULL); 5762 if (error == 0) { 5763 aprint_normal_dev(sc->sc_dev, 5764 "for TX and RX interrupting at %s affinity to %u\n", 5765 intrstr, affinity_to); 5766 } else { 5767 aprint_normal_dev(sc->sc_dev, 5768 "for TX and RX interrupting at %s\n", intrstr); 5769 } 5770 sc->sc_ihs[intr_idx] = vih; 5771 if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0) 5772 goto fail; 5773 txrx_established++; 5774 intr_idx++; 5775 } 5776 5777 /* LINK */ 5778 intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf, 5779 sizeof(intrbuf)); 5780 #ifdef WM_MPSAFE 5781 pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true); 5782 #endif 5783 memset(intr_xname, 0, sizeof(intr_xname)); 5784 snprintf(intr_xname, sizeof(intr_xname), "%sLINK", 5785 device_xname(sc->sc_dev)); 5786 vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx], 5787 IPL_NET, wm_linkintr_msix, sc, intr_xname); 5788 if (vih == NULL) { 5789 aprint_error_dev(sc->sc_dev, 5790 "unable to establish MSI-X(for LINK)%s%s\n", 5791 intrstr ? " at " : "", 5792 intrstr ? intrstr : ""); 5793 5794 goto fail; 5795 } 5796 /* Keep default affinity to LINK interrupt */ 5797 aprint_normal_dev(sc->sc_dev, 5798 "for LINK interrupting at %s\n", intrstr); 5799 sc->sc_ihs[intr_idx] = vih; 5800 sc->sc_link_intr_idx = intr_idx; 5801 5802 sc->sc_nintrs = sc->sc_nqueues + 1; 5803 kcpuset_destroy(affinity); 5804 return 0; 5805 5806 fail: 5807 for (qidx = 0; qidx < txrx_established; qidx++) { 5808 struct wm_queue *wmq = &sc->sc_queue[qidx]; 5809 pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]); 5810 sc->sc_ihs[wmq->wmq_intr_idx] = NULL; 5811 } 5812 5813 kcpuset_destroy(affinity); 5814 return ENOMEM; 5815 } 5816 5817 static void 5818 wm_unset_stopping_flags(struct wm_softc *sc) 5819 { 5820 int i; 5821 5822 KASSERT(WM_CORE_LOCKED(sc)); 5823 5824 /* Must unset stopping flags in ascending order. */ 5825 for (i = 0; i < sc->sc_nqueues; i++) { 5826 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; 5827 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 5828 5829 mutex_enter(txq->txq_lock); 5830 txq->txq_stopping = false; 5831 mutex_exit(txq->txq_lock); 5832 5833 mutex_enter(rxq->rxq_lock); 5834 rxq->rxq_stopping = false; 5835 mutex_exit(rxq->rxq_lock); 5836 } 5837 5838 sc->sc_core_stopping = false; 5839 } 5840 5841 static void 5842 wm_set_stopping_flags(struct wm_softc *sc) 5843 { 5844 int i; 5845 5846 KASSERT(WM_CORE_LOCKED(sc)); 5847 5848 sc->sc_core_stopping = true; 5849 5850 /* Must set stopping flags in ascending order. */ 5851 for (i = 0; i < sc->sc_nqueues; i++) { 5852 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 5853 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; 5854 5855 mutex_enter(rxq->rxq_lock); 5856 rxq->rxq_stopping = true; 5857 mutex_exit(rxq->rxq_lock); 5858 5859 mutex_enter(txq->txq_lock); 5860 txq->txq_stopping = true; 5861 mutex_exit(txq->txq_lock); 5862 } 5863 } 5864 5865 /* 5866 * Write interrupt interval value to ITR or EITR 5867 */ 5868 static void 5869 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq) 5870 { 5871 5872 if (!wmq->wmq_set_itr) 5873 return; 5874 5875 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 5876 uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK); 5877 5878 /* 5879 * 82575 doesn't have CNT_INGR field. 5880 * So, overwrite counter field by software. 5881 */ 5882 if (sc->sc_type == WM_T_82575) 5883 eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575); 5884 else 5885 eitr |= EITR_CNT_INGR; 5886 5887 CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr); 5888 } else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) { 5889 /* 5890 * 82574 has both ITR and EITR. SET EITR when we use 5891 * the multi queue function with MSI-X. 5892 */ 5893 CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx), 5894 wmq->wmq_itr & EITR_ITR_INT_MASK_82574); 5895 } else { 5896 KASSERT(wmq->wmq_id == 0); 5897 CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr); 5898 } 5899 5900 wmq->wmq_set_itr = false; 5901 } 5902 5903 /* 5904 * TODO 5905 * Below dynamic calculation of itr is almost the same as linux igb, 5906 * however it does not fit to wm(4). So, we will have been disable AIM 5907 * until we will find appropriate calculation of itr. 5908 */ 5909 /* 5910 * calculate interrupt interval value to be going to write register in 5911 * wm_itrs_writereg(). This function does not write ITR/EITR register. 5912 */ 5913 static void 5914 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq) 5915 { 5916 #ifdef NOTYET 5917 struct wm_rxqueue *rxq = &wmq->wmq_rxq; 5918 struct wm_txqueue *txq = &wmq->wmq_txq; 5919 uint32_t avg_size = 0; 5920 uint32_t new_itr; 5921 5922 if (rxq->rxq_packets) 5923 avg_size = rxq->rxq_bytes / rxq->rxq_packets; 5924 if (txq->txq_packets) 5925 avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets); 5926 5927 if (avg_size == 0) { 5928 new_itr = 450; /* restore default value */ 5929 goto out; 5930 } 5931 5932 /* Add 24 bytes to size to account for CRC, preamble, and gap */ 5933 avg_size += 24; 5934 5935 /* Don't starve jumbo frames */ 5936 avg_size = uimin(avg_size, 3000); 5937 5938 /* Give a little boost to mid-size frames */ 5939 if ((avg_size > 300) && (avg_size < 1200)) 5940 new_itr = avg_size / 3; 5941 else 5942 new_itr = avg_size / 2; 5943 5944 out: 5945 /* 5946 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE 5947 * controllers. See sc->sc_itr_init setting in wm_init_locked(). 5948 */ 5949 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575) 5950 new_itr *= 4; 5951 5952 if (new_itr != wmq->wmq_itr) { 5953 wmq->wmq_itr = new_itr; 5954 wmq->wmq_set_itr = true; 5955 } else 5956 wmq->wmq_set_itr = false; 5957 5958 rxq->rxq_packets = 0; 5959 rxq->rxq_bytes = 0; 5960 txq->txq_packets = 0; 5961 txq->txq_bytes = 0; 5962 #endif 5963 } 5964 5965 static void 5966 wm_init_sysctls(struct wm_softc *sc) 5967 { 5968 struct sysctllog **log; 5969 const struct sysctlnode *rnode, *qnode, *cnode; 5970 int i, rv; 5971 const char *dvname; 5972 5973 log = &sc->sc_sysctllog; 5974 dvname = device_xname(sc->sc_dev); 5975 5976 rv = sysctl_createv(log, 0, NULL, &rnode, 5977 0, CTLTYPE_NODE, dvname, 5978 SYSCTL_DESCR("wm information and settings"), 5979 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 5980 if (rv != 0) 5981 goto err; 5982 5983 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 5984 CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"), 5985 NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL); 5986 if (rv != 0) 5987 goto teardown; 5988 5989 for (i = 0; i < sc->sc_nqueues; i++) { 5990 struct wm_queue *wmq = &sc->sc_queue[i]; 5991 struct wm_txqueue *txq = &wmq->wmq_txq; 5992 struct wm_rxqueue *rxq = &wmq->wmq_rxq; 5993 5994 snprintf(sc->sc_queue[i].sysctlname, 5995 sizeof(sc->sc_queue[i].sysctlname), "q%d", i); 5996 5997 if (sysctl_createv(log, 0, &rnode, &qnode, 5998 0, CTLTYPE_NODE, 5999 sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"), 6000 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0) 6001 break; 6002 6003 if (sysctl_createv(log, 0, &qnode, &cnode, 6004 CTLFLAG_READONLY, CTLTYPE_INT, 6005 "txq_free", SYSCTL_DESCR("TX queue free"), 6006 NULL, 0, &txq->txq_free, 6007 0, CTL_CREATE, CTL_EOL) != 0) 6008 break; 6009 if (sysctl_createv(log, 0, &qnode, &cnode, 6010 CTLFLAG_READONLY, CTLTYPE_INT, 6011 "txd_head", SYSCTL_DESCR("TX descriptor head"), 6012 wm_sysctl_tdh_handler, 0, (void *)txq, 6013 0, CTL_CREATE, CTL_EOL) != 0) 6014 break; 6015 if (sysctl_createv(log, 0, &qnode, &cnode, 6016 CTLFLAG_READONLY, CTLTYPE_INT, 6017 "txd_tail", SYSCTL_DESCR("TX descriptor tail"), 6018 wm_sysctl_tdt_handler, 0, (void *)txq, 6019 0, CTL_CREATE, CTL_EOL) != 0) 6020 break; 6021 if (sysctl_createv(log, 0, &qnode, &cnode, 6022 CTLFLAG_READONLY, CTLTYPE_INT, 6023 "txq_next", SYSCTL_DESCR("TX queue next"), 6024 NULL, 0, &txq->txq_next, 6025 0, CTL_CREATE, CTL_EOL) != 0) 6026 break; 6027 if (sysctl_createv(log, 0, &qnode, &cnode, 6028 CTLFLAG_READONLY, CTLTYPE_INT, 6029 "txq_sfree", SYSCTL_DESCR("TX queue sfree"), 6030 NULL, 0, &txq->txq_sfree, 6031 0, CTL_CREATE, CTL_EOL) != 0) 6032 break; 6033 if (sysctl_createv(log, 0, &qnode, &cnode, 6034 CTLFLAG_READONLY, CTLTYPE_INT, 6035 "txq_snext", SYSCTL_DESCR("TX queue snext"), 6036 NULL, 0, &txq->txq_snext, 6037 0, CTL_CREATE, CTL_EOL) != 0) 6038 break; 6039 if (sysctl_createv(log, 0, &qnode, &cnode, 6040 CTLFLAG_READONLY, CTLTYPE_INT, 6041 "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"), 6042 NULL, 0, &txq->txq_sdirty, 6043 0, CTL_CREATE, CTL_EOL) != 0) 6044 break; 6045 if (sysctl_createv(log, 0, &qnode, &cnode, 6046 CTLFLAG_READONLY, CTLTYPE_INT, 6047 "txq_flags", SYSCTL_DESCR("TX queue flags"), 6048 NULL, 0, &txq->txq_flags, 6049 0, CTL_CREATE, CTL_EOL) != 0) 6050 break; 6051 if (sysctl_createv(log, 0, &qnode, &cnode, 6052 CTLFLAG_READONLY, CTLTYPE_BOOL, 6053 "txq_stopping", SYSCTL_DESCR("TX queue stopping"), 6054 NULL, 0, &txq->txq_stopping, 6055 0, CTL_CREATE, CTL_EOL) != 0) 6056 break; 6057 if (sysctl_createv(log, 0, &qnode, &cnode, 6058 CTLFLAG_READONLY, CTLTYPE_BOOL, 6059 "txq_sending", SYSCTL_DESCR("TX queue sending"), 6060 NULL, 0, &txq->txq_sending, 6061 0, CTL_CREATE, CTL_EOL) != 0) 6062 break; 6063 6064 if (sysctl_createv(log, 0, &qnode, &cnode, 6065 CTLFLAG_READONLY, CTLTYPE_INT, 6066 "rxq_ptr", SYSCTL_DESCR("RX queue pointer"), 6067 NULL, 0, &rxq->rxq_ptr, 6068 0, CTL_CREATE, CTL_EOL) != 0) 6069 break; 6070 } 6071 6072 #ifdef WM_DEBUG 6073 rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE, 6074 CTLTYPE_INT, "debug_flags", 6075 SYSCTL_DESCR( 6076 "Debug flags:\n" \ 6077 "\t0x01 LINK\n" \ 6078 "\t0x02 TX\n" \ 6079 "\t0x04 RX\n" \ 6080 "\t0x08 GMII\n" \ 6081 "\t0x10 MANAGE\n" \ 6082 "\t0x20 NVM\n" \ 6083 "\t0x40 INIT\n" \ 6084 "\t0x80 LOCK"), 6085 wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL); 6086 if (rv != 0) 6087 goto teardown; 6088 #endif 6089 6090 return; 6091 6092 teardown: 6093 sysctl_teardown(log); 6094 err: 6095 sc->sc_sysctllog = NULL; 6096 device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n", 6097 __func__, rv); 6098 } 6099 6100 /* 6101 * wm_init: [ifnet interface function] 6102 * 6103 * Initialize the interface. 6104 */ 6105 static int 6106 wm_init(struct ifnet *ifp) 6107 { 6108 struct wm_softc *sc = ifp->if_softc; 6109 int ret; 6110 6111 WM_CORE_LOCK(sc); 6112 ret = wm_init_locked(ifp); 6113 WM_CORE_UNLOCK(sc); 6114 6115 return ret; 6116 } 6117 6118 static int 6119 wm_init_locked(struct ifnet *ifp) 6120 { 6121 struct wm_softc *sc = ifp->if_softc; 6122 struct ethercom *ec = &sc->sc_ethercom; 6123 int i, j, trynum, error = 0; 6124 uint32_t reg, sfp_mask = 0; 6125 6126 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 6127 device_xname(sc->sc_dev), __func__)); 6128 KASSERT(WM_CORE_LOCKED(sc)); 6129 6130 /* 6131 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set. 6132 * There is a small but measurable benefit to avoiding the adjusment 6133 * of the descriptor so that the headers are aligned, for normal mtu, 6134 * on such platforms. One possibility is that the DMA itself is 6135 * slightly more efficient if the front of the entire packet (instead 6136 * of the front of the headers) is aligned. 6137 * 6138 * Note we must always set align_tweak to 0 if we are using 6139 * jumbo frames. 6140 */ 6141 #ifdef __NO_STRICT_ALIGNMENT 6142 sc->sc_align_tweak = 0; 6143 #else 6144 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2)) 6145 sc->sc_align_tweak = 0; 6146 else 6147 sc->sc_align_tweak = 2; 6148 #endif /* __NO_STRICT_ALIGNMENT */ 6149 6150 /* Cancel any pending I/O. */ 6151 wm_stop_locked(ifp, false, false); 6152 6153 /* Update statistics before reset */ 6154 if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC), 6155 if_ierrors, CSR_READ(sc, WMREG_RXERRC)); 6156 6157 /* >= PCH_SPT hardware workaround before reset. */ 6158 if (sc->sc_type >= WM_T_PCH_SPT) 6159 wm_flush_desc_rings(sc); 6160 6161 /* Reset the chip to a known state. */ 6162 wm_reset(sc); 6163 6164 /* 6165 * AMT based hardware can now take control from firmware 6166 * Do this after reset. 6167 */ 6168 if ((sc->sc_flags & WM_F_HAS_AMT) != 0) 6169 wm_get_hw_control(sc); 6170 6171 if ((sc->sc_type >= WM_T_PCH_SPT) && 6172 pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX) 6173 wm_legacy_irq_quirk_spt(sc); 6174 6175 /* Init hardware bits */ 6176 wm_initialize_hardware_bits(sc); 6177 6178 /* Reset the PHY. */ 6179 if (sc->sc_flags & WM_F_HAS_MII) 6180 wm_gmii_reset(sc); 6181 6182 if (sc->sc_type >= WM_T_ICH8) { 6183 reg = CSR_READ(sc, WMREG_GCR); 6184 /* 6185 * ICH8 No-snoop bits are opposite polarity. Set to snoop by 6186 * default after reset. 6187 */ 6188 if (sc->sc_type == WM_T_ICH8) 6189 reg |= GCR_NO_SNOOP_ALL; 6190 else 6191 reg &= ~GCR_NO_SNOOP_ALL; 6192 CSR_WRITE(sc, WMREG_GCR, reg); 6193 } 6194 6195 if ((sc->sc_type >= WM_T_ICH8) 6196 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER) 6197 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) { 6198 6199 reg = CSR_READ(sc, WMREG_CTRL_EXT); 6200 reg |= CTRL_EXT_RO_DIS; 6201 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 6202 } 6203 6204 /* Calculate (E)ITR value */ 6205 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) { 6206 /* 6207 * For NEWQUEUE's EITR (except for 82575). 6208 * 82575's EITR should be set same throttling value as other 6209 * old controllers' ITR because the interrupt/sec calculation 6210 * is the same, that is, 1,000,000,000 / (N * 256). 6211 * 6212 * 82574's EITR should be set same throttling value as ITR. 6213 * 6214 * For N interrupts/sec, set this value to: 6215 * 1,000,000 / N in contrast to ITR throttling value. 6216 */ 6217 sc->sc_itr_init = 450; 6218 } else if (sc->sc_type >= WM_T_82543) { 6219 /* 6220 * Set up the interrupt throttling register (units of 256ns) 6221 * Note that a footnote in Intel's documentation says this 6222 * ticker runs at 1/4 the rate when the chip is in 100Mbit 6223 * or 10Mbit mode. Empirically, it appears to be the case 6224 * that that is also true for the 1024ns units of the other 6225 * interrupt-related timer registers -- so, really, we ought 6226 * to divide this value by 4 when the link speed is low. 6227 * 6228 * XXX implement this division at link speed change! 6229 */ 6230 6231 /* 6232 * For N interrupts/sec, set this value to: 6233 * 1,000,000,000 / (N * 256). Note that we set the 6234 * absolute and packet timer values to this value 6235 * divided by 4 to get "simple timer" behavior. 6236 */ 6237 sc->sc_itr_init = 1500; /* 2604 ints/sec */ 6238 } 6239 6240 error = wm_init_txrx_queues(sc); 6241 if (error) 6242 goto out; 6243 6244 if (((sc->sc_flags & WM_F_SGMII) == 0) && 6245 (sc->sc_mediatype == WM_MEDIATYPE_SERDES) && 6246 (sc->sc_type >= WM_T_82575)) 6247 wm_serdes_power_up_link_82575(sc); 6248 6249 /* Clear out the VLAN table -- we don't use it (yet). */ 6250 CSR_WRITE(sc, WMREG_VET, 0); 6251 if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) 6252 trynum = 10; /* Due to hw errata */ 6253 else 6254 trynum = 1; 6255 for (i = 0; i < WM_VLAN_TABSIZE; i++) 6256 for (j = 0; j < trynum; j++) 6257 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0); 6258 6259 /* 6260 * Set up flow-control parameters. 6261 * 6262 * XXX Values could probably stand some tuning. 6263 */ 6264 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9) 6265 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH) 6266 && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT) 6267 && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){ 6268 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST); 6269 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST); 6270 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL); 6271 } 6272 6273 sc->sc_fcrtl = FCRTL_DFLT; 6274 if (sc->sc_type < WM_T_82543) { 6275 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT); 6276 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl); 6277 } else { 6278 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT); 6279 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl); 6280 } 6281 6282 if (sc->sc_type == WM_T_80003) 6283 CSR_WRITE(sc, WMREG_FCTTV, 0xffff); 6284 else 6285 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT); 6286 6287 /* Writes the control register. */ 6288 wm_set_vlan(sc); 6289 6290 if (sc->sc_flags & WM_F_HAS_MII) { 6291 uint16_t kmreg; 6292 6293 switch (sc->sc_type) { 6294 case WM_T_80003: 6295 case WM_T_ICH8: 6296 case WM_T_ICH9: 6297 case WM_T_ICH10: 6298 case WM_T_PCH: 6299 case WM_T_PCH2: 6300 case WM_T_PCH_LPT: 6301 case WM_T_PCH_SPT: 6302 case WM_T_PCH_CNP: 6303 /* 6304 * Set the mac to wait the maximum time between each 6305 * iteration and increase the max iterations when 6306 * polling the phy; this fixes erroneous timeouts at 6307 * 10Mbps. 6308 */ 6309 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, 6310 0xFFFF); 6311 wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM, 6312 &kmreg); 6313 kmreg |= 0x3F; 6314 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM, 6315 kmreg); 6316 break; 6317 default: 6318 break; 6319 } 6320 6321 if (sc->sc_type == WM_T_80003) { 6322 reg = CSR_READ(sc, WMREG_CTRL_EXT); 6323 reg &= ~CTRL_EXT_LINK_MODE_MASK; 6324 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 6325 6326 /* Bypass RX and TX FIFO's */ 6327 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL, 6328 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 6329 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS); 6330 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL, 6331 KUMCTRLSTA_INB_CTRL_DIS_PADDING | 6332 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT); 6333 } 6334 } 6335 #if 0 6336 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext); 6337 #endif 6338 6339 /* Set up checksum offload parameters. */ 6340 reg = CSR_READ(sc, WMREG_RXCSUM); 6341 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL); 6342 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) 6343 reg |= RXCSUM_IPOFL; 6344 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) 6345 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL; 6346 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx)) 6347 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL; 6348 CSR_WRITE(sc, WMREG_RXCSUM, reg); 6349 6350 /* Set registers about MSI-X */ 6351 if (wm_is_using_msix(sc)) { 6352 uint32_t ivar, qintr_idx; 6353 struct wm_queue *wmq; 6354 unsigned int qid; 6355 6356 if (sc->sc_type == WM_T_82575) { 6357 /* Interrupt control */ 6358 reg = CSR_READ(sc, WMREG_CTRL_EXT); 6359 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR; 6360 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 6361 6362 /* TX and RX */ 6363 for (i = 0; i < sc->sc_nqueues; i++) { 6364 wmq = &sc->sc_queue[i]; 6365 CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx), 6366 EITR_TX_QUEUE(wmq->wmq_id) 6367 | EITR_RX_QUEUE(wmq->wmq_id)); 6368 } 6369 /* Link status */ 6370 CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx), 6371 EITR_OTHER); 6372 } else if (sc->sc_type == WM_T_82574) { 6373 /* Interrupt control */ 6374 reg = CSR_READ(sc, WMREG_CTRL_EXT); 6375 reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME; 6376 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 6377 6378 /* 6379 * Workaround issue with spurious interrupts 6380 * in MSI-X mode. 6381 * At wm_initialize_hardware_bits(), sc_nintrs has not 6382 * initialized yet. So re-initialize WMREG_RFCTL here. 6383 */ 6384 reg = CSR_READ(sc, WMREG_RFCTL); 6385 reg |= WMREG_RFCTL_ACKDIS; 6386 CSR_WRITE(sc, WMREG_RFCTL, reg); 6387 6388 ivar = 0; 6389 /* TX and RX */ 6390 for (i = 0; i < sc->sc_nqueues; i++) { 6391 wmq = &sc->sc_queue[i]; 6392 qid = wmq->wmq_id; 6393 qintr_idx = wmq->wmq_intr_idx; 6394 6395 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx), 6396 IVAR_TX_MASK_Q_82574(qid)); 6397 ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx), 6398 IVAR_RX_MASK_Q_82574(qid)); 6399 } 6400 /* Link status */ 6401 ivar |= __SHIFTIN((IVAR_VALID_82574 6402 | sc->sc_link_intr_idx), IVAR_OTHER_MASK); 6403 CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB); 6404 } else { 6405 /* Interrupt control */ 6406 CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX 6407 | GPIE_EIAME | GPIE_PBA); 6408 6409 switch (sc->sc_type) { 6410 case WM_T_82580: 6411 case WM_T_I350: 6412 case WM_T_I354: 6413 case WM_T_I210: 6414 case WM_T_I211: 6415 /* TX and RX */ 6416 for (i = 0; i < sc->sc_nqueues; i++) { 6417 wmq = &sc->sc_queue[i]; 6418 qid = wmq->wmq_id; 6419 qintr_idx = wmq->wmq_intr_idx; 6420 6421 ivar = CSR_READ(sc, WMREG_IVAR_Q(qid)); 6422 ivar &= ~IVAR_TX_MASK_Q(qid); 6423 ivar |= __SHIFTIN((qintr_idx 6424 | IVAR_VALID), 6425 IVAR_TX_MASK_Q(qid)); 6426 ivar &= ~IVAR_RX_MASK_Q(qid); 6427 ivar |= __SHIFTIN((qintr_idx 6428 | IVAR_VALID), 6429 IVAR_RX_MASK_Q(qid)); 6430 CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar); 6431 } 6432 break; 6433 case WM_T_82576: 6434 /* TX and RX */ 6435 for (i = 0; i < sc->sc_nqueues; i++) { 6436 wmq = &sc->sc_queue[i]; 6437 qid = wmq->wmq_id; 6438 qintr_idx = wmq->wmq_intr_idx; 6439 6440 ivar = CSR_READ(sc, 6441 WMREG_IVAR_Q_82576(qid)); 6442 ivar &= ~IVAR_TX_MASK_Q_82576(qid); 6443 ivar |= __SHIFTIN((qintr_idx 6444 | IVAR_VALID), 6445 IVAR_TX_MASK_Q_82576(qid)); 6446 ivar &= ~IVAR_RX_MASK_Q_82576(qid); 6447 ivar |= __SHIFTIN((qintr_idx 6448 | IVAR_VALID), 6449 IVAR_RX_MASK_Q_82576(qid)); 6450 CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid), 6451 ivar); 6452 } 6453 break; 6454 default: 6455 break; 6456 } 6457 6458 /* Link status */ 6459 ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID), 6460 IVAR_MISC_OTHER); 6461 CSR_WRITE(sc, WMREG_IVAR_MISC, ivar); 6462 } 6463 6464 if (wm_is_using_multiqueue(sc)) { 6465 wm_init_rss(sc); 6466 6467 /* 6468 ** NOTE: Receive Full-Packet Checksum Offload 6469 ** is mutually exclusive with Multiqueue. However 6470 ** this is not the same as TCP/IP checksums which 6471 ** still work. 6472 */ 6473 reg = CSR_READ(sc, WMREG_RXCSUM); 6474 reg |= RXCSUM_PCSD; 6475 CSR_WRITE(sc, WMREG_RXCSUM, reg); 6476 } 6477 } 6478 6479 /* Set up the interrupt registers. */ 6480 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 6481 6482 /* Enable SFP module insertion interrupt if it's required */ 6483 if ((sc->sc_flags & WM_F_SFP) != 0) { 6484 sc->sc_ctrl |= CTRL_EXTLINK_EN; 6485 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6486 sfp_mask = ICR_GPI(0); 6487 } 6488 6489 if (wm_is_using_msix(sc)) { 6490 uint32_t mask; 6491 struct wm_queue *wmq; 6492 6493 switch (sc->sc_type) { 6494 case WM_T_82574: 6495 mask = 0; 6496 for (i = 0; i < sc->sc_nqueues; i++) { 6497 wmq = &sc->sc_queue[i]; 6498 mask |= ICR_TXQ(wmq->wmq_id); 6499 mask |= ICR_RXQ(wmq->wmq_id); 6500 } 6501 mask |= ICR_OTHER; 6502 CSR_WRITE(sc, WMREG_EIAC_82574, mask); 6503 CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC); 6504 break; 6505 default: 6506 if (sc->sc_type == WM_T_82575) { 6507 mask = 0; 6508 for (i = 0; i < sc->sc_nqueues; i++) { 6509 wmq = &sc->sc_queue[i]; 6510 mask |= EITR_TX_QUEUE(wmq->wmq_id); 6511 mask |= EITR_RX_QUEUE(wmq->wmq_id); 6512 } 6513 mask |= EITR_OTHER; 6514 } else { 6515 mask = 0; 6516 for (i = 0; i < sc->sc_nqueues; i++) { 6517 wmq = &sc->sc_queue[i]; 6518 mask |= 1 << wmq->wmq_intr_idx; 6519 } 6520 mask |= 1 << sc->sc_link_intr_idx; 6521 } 6522 CSR_WRITE(sc, WMREG_EIAC, mask); 6523 CSR_WRITE(sc, WMREG_EIAM, mask); 6524 CSR_WRITE(sc, WMREG_EIMS, mask); 6525 6526 /* For other interrupts */ 6527 CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask); 6528 break; 6529 } 6530 } else { 6531 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 | 6532 ICR_RXO | ICR_RXT0 | sfp_mask; 6533 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr); 6534 } 6535 6536 /* Set up the inter-packet gap. */ 6537 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); 6538 6539 if (sc->sc_type >= WM_T_82543) { 6540 for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) { 6541 struct wm_queue *wmq = &sc->sc_queue[qidx]; 6542 wm_itrs_writereg(sc, wmq); 6543 } 6544 /* 6545 * Link interrupts occur much less than TX 6546 * interrupts and RX interrupts. So, we don't 6547 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like 6548 * FreeBSD's if_igb. 6549 */ 6550 } 6551 6552 /* Set the VLAN ethernetype. */ 6553 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN); 6554 6555 /* 6556 * Set up the transmit control register; we start out with 6557 * a collision distance suitable for FDX, but update it whe 6558 * we resolve the media type. 6559 */ 6560 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC 6561 | TCTL_CT(TX_COLLISION_THRESHOLD) 6562 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 6563 if (sc->sc_type >= WM_T_82571) 6564 sc->sc_tctl |= TCTL_MULR; 6565 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 6566 6567 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 6568 /* Write TDT after TCTL.EN is set. See the document. */ 6569 CSR_WRITE(sc, WMREG_TDT(0), 0); 6570 } 6571 6572 if (sc->sc_type == WM_T_80003) { 6573 reg = CSR_READ(sc, WMREG_TCTL_EXT); 6574 reg &= ~TCTL_EXT_GCEX_MASK; 6575 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX; 6576 CSR_WRITE(sc, WMREG_TCTL_EXT, reg); 6577 } 6578 6579 /* Set the media. */ 6580 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0) 6581 goto out; 6582 6583 /* Configure for OS presence */ 6584 wm_init_manageability(sc); 6585 6586 /* 6587 * Set up the receive control register; we actually program the 6588 * register when we set the receive filter. Use multicast address 6589 * offset type 0. 6590 * 6591 * Only the i82544 has the ability to strip the incoming CRC, so we 6592 * don't enable that feature. 6593 */ 6594 sc->sc_mchash_type = 0; 6595 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF 6596 | __SHIFTIN(sc->sc_mchash_type, RCTL_MO); 6597 6598 /* 82574 use one buffer extended Rx descriptor. */ 6599 if (sc->sc_type == WM_T_82574) 6600 sc->sc_rctl |= RCTL_DTYP_ONEBUF; 6601 6602 if ((sc->sc_flags & WM_F_CRC_STRIP) != 0) 6603 sc->sc_rctl |= RCTL_SECRC; 6604 6605 if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0) 6606 && (ifp->if_mtu > ETHERMTU)) { 6607 sc->sc_rctl |= RCTL_LPE; 6608 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 6609 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO); 6610 } 6611 6612 if (MCLBYTES == 2048) 6613 sc->sc_rctl |= RCTL_2k; 6614 else { 6615 if (sc->sc_type >= WM_T_82543) { 6616 switch (MCLBYTES) { 6617 case 4096: 6618 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k; 6619 break; 6620 case 8192: 6621 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k; 6622 break; 6623 case 16384: 6624 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k; 6625 break; 6626 default: 6627 panic("wm_init: MCLBYTES %d unsupported", 6628 MCLBYTES); 6629 break; 6630 } 6631 } else 6632 panic("wm_init: i82542 requires MCLBYTES = 2048"); 6633 } 6634 6635 /* Enable ECC */ 6636 switch (sc->sc_type) { 6637 case WM_T_82571: 6638 reg = CSR_READ(sc, WMREG_PBA_ECC); 6639 reg |= PBA_ECC_CORR_EN; 6640 CSR_WRITE(sc, WMREG_PBA_ECC, reg); 6641 break; 6642 case WM_T_PCH_LPT: 6643 case WM_T_PCH_SPT: 6644 case WM_T_PCH_CNP: 6645 reg = CSR_READ(sc, WMREG_PBECCSTS); 6646 reg |= PBECCSTS_UNCORR_ECC_ENABLE; 6647 CSR_WRITE(sc, WMREG_PBECCSTS, reg); 6648 6649 sc->sc_ctrl |= CTRL_MEHE; 6650 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 6651 break; 6652 default: 6653 break; 6654 } 6655 6656 /* 6657 * Set the receive filter. 6658 * 6659 * For 82575 and 82576, the RX descriptors must be initialized after 6660 * the setting of RCTL.EN in wm_set_filter() 6661 */ 6662 wm_set_filter(sc); 6663 6664 /* On 575 and later set RDT only if RX enabled */ 6665 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 6666 int qidx; 6667 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) { 6668 struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq; 6669 for (i = 0; i < WM_NRXDESC; i++) { 6670 mutex_enter(rxq->rxq_lock); 6671 wm_init_rxdesc(rxq, i); 6672 mutex_exit(rxq->rxq_lock); 6673 6674 } 6675 } 6676 } 6677 6678 wm_unset_stopping_flags(sc); 6679 6680 /* Start the one second link check clock. */ 6681 callout_schedule(&sc->sc_tick_ch, hz); 6682 6683 /* ...all done! */ 6684 ifp->if_flags |= IFF_RUNNING; 6685 6686 out: 6687 /* Save last flags for the callback */ 6688 sc->sc_if_flags = ifp->if_flags; 6689 sc->sc_ec_capenable = ec->ec_capenable; 6690 if (error) 6691 log(LOG_ERR, "%s: interface not running\n", 6692 device_xname(sc->sc_dev)); 6693 return error; 6694 } 6695 6696 /* 6697 * wm_stop: [ifnet interface function] 6698 * 6699 * Stop transmission on the interface. 6700 */ 6701 static void 6702 wm_stop(struct ifnet *ifp, int disable) 6703 { 6704 struct wm_softc *sc = ifp->if_softc; 6705 6706 ASSERT_SLEEPABLE(); 6707 6708 WM_CORE_LOCK(sc); 6709 wm_stop_locked(ifp, disable ? true : false, true); 6710 WM_CORE_UNLOCK(sc); 6711 6712 /* 6713 * After wm_set_stopping_flags(), it is guaranteed 6714 * wm_handle_queue_work() does not call workqueue_enqueue(). 6715 * However, workqueue_wait() cannot call in wm_stop_locked() 6716 * because it can sleep... 6717 * so, call workqueue_wait() here. 6718 */ 6719 for (int i = 0; i < sc->sc_nqueues; i++) 6720 workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie); 6721 } 6722 6723 static void 6724 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait) 6725 { 6726 struct wm_softc *sc = ifp->if_softc; 6727 struct wm_txsoft *txs; 6728 int i, qidx; 6729 6730 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 6731 device_xname(sc->sc_dev), __func__)); 6732 KASSERT(WM_CORE_LOCKED(sc)); 6733 6734 wm_set_stopping_flags(sc); 6735 6736 if (sc->sc_flags & WM_F_HAS_MII) { 6737 /* Down the MII. */ 6738 mii_down(&sc->sc_mii); 6739 } else { 6740 #if 0 6741 /* Should we clear PHY's status properly? */ 6742 wm_reset(sc); 6743 #endif 6744 } 6745 6746 /* Stop the transmit and receive processes. */ 6747 CSR_WRITE(sc, WMREG_TCTL, 0); 6748 CSR_WRITE(sc, WMREG_RCTL, 0); 6749 sc->sc_rctl &= ~RCTL_EN; 6750 6751 /* 6752 * Clear the interrupt mask to ensure the device cannot assert its 6753 * interrupt line. 6754 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to 6755 * service any currently pending or shared interrupt. 6756 */ 6757 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 6758 sc->sc_icr = 0; 6759 if (wm_is_using_msix(sc)) { 6760 if (sc->sc_type != WM_T_82574) { 6761 CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU); 6762 CSR_WRITE(sc, WMREG_EIAC, 0); 6763 } else 6764 CSR_WRITE(sc, WMREG_EIAC_82574, 0); 6765 } 6766 6767 /* 6768 * Stop callouts after interrupts are disabled; if we have 6769 * to wait for them, we will be releasing the CORE_LOCK 6770 * briefly, which will unblock interrupts on the current CPU. 6771 */ 6772 6773 /* Stop the one second clock. */ 6774 if (wait) 6775 callout_halt(&sc->sc_tick_ch, sc->sc_core_lock); 6776 else 6777 callout_stop(&sc->sc_tick_ch); 6778 6779 /* Stop the 82547 Tx FIFO stall check timer. */ 6780 if (sc->sc_type == WM_T_82547) { 6781 if (wait) 6782 callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock); 6783 else 6784 callout_stop(&sc->sc_txfifo_ch); 6785 } 6786 6787 /* Release any queued transmit buffers. */ 6788 for (qidx = 0; qidx < sc->sc_nqueues; qidx++) { 6789 struct wm_queue *wmq = &sc->sc_queue[qidx]; 6790 struct wm_txqueue *txq = &wmq->wmq_txq; 6791 struct mbuf *m; 6792 6793 mutex_enter(txq->txq_lock); 6794 txq->txq_sending = false; /* Ensure watchdog disabled */ 6795 for (i = 0; i < WM_TXQUEUELEN(txq); i++) { 6796 txs = &txq->txq_soft[i]; 6797 if (txs->txs_mbuf != NULL) { 6798 bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap); 6799 m_freem(txs->txs_mbuf); 6800 txs->txs_mbuf = NULL; 6801 } 6802 } 6803 /* Drain txq_interq */ 6804 while ((m = pcq_get(txq->txq_interq)) != NULL) 6805 m_freem(m); 6806 mutex_exit(txq->txq_lock); 6807 } 6808 6809 /* Mark the interface as down and cancel the watchdog timer. */ 6810 ifp->if_flags &= ~IFF_RUNNING; 6811 6812 if (disable) { 6813 for (i = 0; i < sc->sc_nqueues; i++) { 6814 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 6815 mutex_enter(rxq->rxq_lock); 6816 wm_rxdrain(rxq); 6817 mutex_exit(rxq->rxq_lock); 6818 } 6819 } 6820 6821 #if 0 /* notyet */ 6822 if (sc->sc_type >= WM_T_82544) 6823 CSR_WRITE(sc, WMREG_WUC, 0); 6824 #endif 6825 } 6826 6827 static void 6828 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0) 6829 { 6830 struct mbuf *m; 6831 int i; 6832 6833 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev)); 6834 for (m = m0, i = 0; m != NULL; m = m->m_next, i++) 6835 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, " 6836 "m_flags = 0x%08x\n", device_xname(sc->sc_dev), 6837 m->m_data, m->m_len, m->m_flags); 6838 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev), 6839 i, i == 1 ? "" : "s"); 6840 } 6841 6842 /* 6843 * wm_82547_txfifo_stall: 6844 * 6845 * Callout used to wait for the 82547 Tx FIFO to drain, 6846 * reset the FIFO pointers, and restart packet transmission. 6847 */ 6848 static void 6849 wm_82547_txfifo_stall(void *arg) 6850 { 6851 struct wm_softc *sc = arg; 6852 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; 6853 6854 mutex_enter(txq->txq_lock); 6855 6856 if (txq->txq_stopping) 6857 goto out; 6858 6859 if (txq->txq_fifo_stall) { 6860 if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) && 6861 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) && 6862 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) { 6863 /* 6864 * Packets have drained. Stop transmitter, reset 6865 * FIFO pointers, restart transmitter, and kick 6866 * the packet queue. 6867 */ 6868 uint32_t tctl = CSR_READ(sc, WMREG_TCTL); 6869 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN); 6870 CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr); 6871 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr); 6872 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr); 6873 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr); 6874 CSR_WRITE(sc, WMREG_TCTL, tctl); 6875 CSR_WRITE_FLUSH(sc); 6876 6877 txq->txq_fifo_head = 0; 6878 txq->txq_fifo_stall = 0; 6879 wm_start_locked(&sc->sc_ethercom.ec_if); 6880 } else { 6881 /* 6882 * Still waiting for packets to drain; try again in 6883 * another tick. 6884 */ 6885 callout_schedule(&sc->sc_txfifo_ch, 1); 6886 } 6887 } 6888 6889 out: 6890 mutex_exit(txq->txq_lock); 6891 } 6892 6893 /* 6894 * wm_82547_txfifo_bugchk: 6895 * 6896 * Check for bug condition in the 82547 Tx FIFO. We need to 6897 * prevent enqueueing a packet that would wrap around the end 6898 * if the Tx FIFO ring buffer, otherwise the chip will croak. 6899 * 6900 * We do this by checking the amount of space before the end 6901 * of the Tx FIFO buffer. If the packet will not fit, we "stall" 6902 * the Tx FIFO, wait for all remaining packets to drain, reset 6903 * the internal FIFO pointers to the beginning, and restart 6904 * transmission on the interface. 6905 */ 6906 #define WM_FIFO_HDR 0x10 6907 #define WM_82547_PAD_LEN 0x3e0 6908 static int 6909 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0) 6910 { 6911 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; 6912 int space = txq->txq_fifo_size - txq->txq_fifo_head; 6913 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR); 6914 6915 /* Just return if already stalled. */ 6916 if (txq->txq_fifo_stall) 6917 return 1; 6918 6919 if (sc->sc_mii.mii_media_active & IFM_FDX) { 6920 /* Stall only occurs in half-duplex mode. */ 6921 goto send_packet; 6922 } 6923 6924 if (len >= WM_82547_PAD_LEN + space) { 6925 txq->txq_fifo_stall = 1; 6926 callout_schedule(&sc->sc_txfifo_ch, 1); 6927 return 1; 6928 } 6929 6930 send_packet: 6931 txq->txq_fifo_head += len; 6932 if (txq->txq_fifo_head >= txq->txq_fifo_size) 6933 txq->txq_fifo_head -= txq->txq_fifo_size; 6934 6935 return 0; 6936 } 6937 6938 static int 6939 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq) 6940 { 6941 int error; 6942 6943 /* 6944 * Allocate the control data structures, and create and load the 6945 * DMA map for it. 6946 * 6947 * NOTE: All Tx descriptors must be in the same 4G segment of 6948 * memory. So must Rx descriptors. We simplify by allocating 6949 * both sets within the same 4G segment. 6950 */ 6951 if (sc->sc_type < WM_T_82544) 6952 WM_NTXDESC(txq) = WM_NTXDESC_82542; 6953 else 6954 WM_NTXDESC(txq) = WM_NTXDESC_82544; 6955 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 6956 txq->txq_descsize = sizeof(nq_txdesc_t); 6957 else 6958 txq->txq_descsize = sizeof(wiseman_txdesc_t); 6959 6960 if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 6961 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg, 6962 1, &txq->txq_desc_rseg, 0)) != 0) { 6963 aprint_error_dev(sc->sc_dev, 6964 "unable to allocate TX control data, error = %d\n", 6965 error); 6966 goto fail_0; 6967 } 6968 6969 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg, 6970 txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq), 6971 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) { 6972 aprint_error_dev(sc->sc_dev, 6973 "unable to map TX control data, error = %d\n", error); 6974 goto fail_1; 6975 } 6976 6977 if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1, 6978 WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) { 6979 aprint_error_dev(sc->sc_dev, 6980 "unable to create TX control data DMA map, error = %d\n", 6981 error); 6982 goto fail_2; 6983 } 6984 6985 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap, 6986 txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) { 6987 aprint_error_dev(sc->sc_dev, 6988 "unable to load TX control data DMA map, error = %d\n", 6989 error); 6990 goto fail_3; 6991 } 6992 6993 return 0; 6994 6995 fail_3: 6996 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap); 6997 fail_2: 6998 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u, 6999 WM_TXDESCS_SIZE(txq)); 7000 fail_1: 7001 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg); 7002 fail_0: 7003 return error; 7004 } 7005 7006 static void 7007 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq) 7008 { 7009 7010 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap); 7011 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap); 7012 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u, 7013 WM_TXDESCS_SIZE(txq)); 7014 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg); 7015 } 7016 7017 static int 7018 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq) 7019 { 7020 int error; 7021 size_t rxq_descs_size; 7022 7023 /* 7024 * Allocate the control data structures, and create and load the 7025 * DMA map for it. 7026 * 7027 * NOTE: All Tx descriptors must be in the same 4G segment of 7028 * memory. So must Rx descriptors. We simplify by allocating 7029 * both sets within the same 4G segment. 7030 */ 7031 rxq->rxq_ndesc = WM_NRXDESC; 7032 if (sc->sc_type == WM_T_82574) 7033 rxq->rxq_descsize = sizeof(ext_rxdesc_t); 7034 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 7035 rxq->rxq_descsize = sizeof(nq_rxdesc_t); 7036 else 7037 rxq->rxq_descsize = sizeof(wiseman_rxdesc_t); 7038 rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc; 7039 7040 if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size, 7041 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg, 7042 1, &rxq->rxq_desc_rseg, 0)) != 0) { 7043 aprint_error_dev(sc->sc_dev, 7044 "unable to allocate RX control data, error = %d\n", 7045 error); 7046 goto fail_0; 7047 } 7048 7049 if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg, 7050 rxq->rxq_desc_rseg, rxq_descs_size, 7051 (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) { 7052 aprint_error_dev(sc->sc_dev, 7053 "unable to map RX control data, error = %d\n", error); 7054 goto fail_1; 7055 } 7056 7057 if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1, 7058 rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) { 7059 aprint_error_dev(sc->sc_dev, 7060 "unable to create RX control data DMA map, error = %d\n", 7061 error); 7062 goto fail_2; 7063 } 7064 7065 if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap, 7066 rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) { 7067 aprint_error_dev(sc->sc_dev, 7068 "unable to load RX control data DMA map, error = %d\n", 7069 error); 7070 goto fail_3; 7071 } 7072 7073 return 0; 7074 7075 fail_3: 7076 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap); 7077 fail_2: 7078 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u, 7079 rxq_descs_size); 7080 fail_1: 7081 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg); 7082 fail_0: 7083 return error; 7084 } 7085 7086 static void 7087 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq) 7088 { 7089 7090 bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap); 7091 bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap); 7092 bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u, 7093 rxq->rxq_descsize * rxq->rxq_ndesc); 7094 bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg); 7095 } 7096 7097 7098 static int 7099 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq) 7100 { 7101 int i, error; 7102 7103 /* Create the transmit buffer DMA maps. */ 7104 WM_TXQUEUELEN(txq) = 7105 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ? 7106 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX; 7107 for (i = 0; i < WM_TXQUEUELEN(txq); i++) { 7108 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA, 7109 WM_NTXSEGS, WTX_MAX_LEN, 0, 0, 7110 &txq->txq_soft[i].txs_dmamap)) != 0) { 7111 aprint_error_dev(sc->sc_dev, 7112 "unable to create Tx DMA map %d, error = %d\n", 7113 i, error); 7114 goto fail; 7115 } 7116 } 7117 7118 return 0; 7119 7120 fail: 7121 for (i = 0; i < WM_TXQUEUELEN(txq); i++) { 7122 if (txq->txq_soft[i].txs_dmamap != NULL) 7123 bus_dmamap_destroy(sc->sc_dmat, 7124 txq->txq_soft[i].txs_dmamap); 7125 } 7126 return error; 7127 } 7128 7129 static void 7130 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq) 7131 { 7132 int i; 7133 7134 for (i = 0; i < WM_TXQUEUELEN(txq); i++) { 7135 if (txq->txq_soft[i].txs_dmamap != NULL) 7136 bus_dmamap_destroy(sc->sc_dmat, 7137 txq->txq_soft[i].txs_dmamap); 7138 } 7139 } 7140 7141 static int 7142 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq) 7143 { 7144 int i, error; 7145 7146 /* Create the receive buffer DMA maps. */ 7147 for (i = 0; i < rxq->rxq_ndesc; i++) { 7148 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 7149 MCLBYTES, 0, 0, 7150 &rxq->rxq_soft[i].rxs_dmamap)) != 0) { 7151 aprint_error_dev(sc->sc_dev, 7152 "unable to create Rx DMA map %d error = %d\n", 7153 i, error); 7154 goto fail; 7155 } 7156 rxq->rxq_soft[i].rxs_mbuf = NULL; 7157 } 7158 7159 return 0; 7160 7161 fail: 7162 for (i = 0; i < rxq->rxq_ndesc; i++) { 7163 if (rxq->rxq_soft[i].rxs_dmamap != NULL) 7164 bus_dmamap_destroy(sc->sc_dmat, 7165 rxq->rxq_soft[i].rxs_dmamap); 7166 } 7167 return error; 7168 } 7169 7170 static void 7171 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq) 7172 { 7173 int i; 7174 7175 for (i = 0; i < rxq->rxq_ndesc; i++) { 7176 if (rxq->rxq_soft[i].rxs_dmamap != NULL) 7177 bus_dmamap_destroy(sc->sc_dmat, 7178 rxq->rxq_soft[i].rxs_dmamap); 7179 } 7180 } 7181 7182 /* 7183 * wm_alloc_quques: 7184 * Allocate {tx,rx}descs and {tx,rx} buffers 7185 */ 7186 static int 7187 wm_alloc_txrx_queues(struct wm_softc *sc) 7188 { 7189 int i, error, tx_done, rx_done; 7190 7191 sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues, 7192 KM_SLEEP); 7193 if (sc->sc_queue == NULL) { 7194 aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n"); 7195 error = ENOMEM; 7196 goto fail_0; 7197 } 7198 7199 /* For transmission */ 7200 error = 0; 7201 tx_done = 0; 7202 for (i = 0; i < sc->sc_nqueues; i++) { 7203 #ifdef WM_EVENT_COUNTERS 7204 int j; 7205 const char *xname; 7206 #endif 7207 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; 7208 txq->txq_sc = sc; 7209 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 7210 7211 error = wm_alloc_tx_descs(sc, txq); 7212 if (error) 7213 break; 7214 error = wm_alloc_tx_buffer(sc, txq); 7215 if (error) { 7216 wm_free_tx_descs(sc, txq); 7217 break; 7218 } 7219 txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP); 7220 if (txq->txq_interq == NULL) { 7221 wm_free_tx_descs(sc, txq); 7222 wm_free_tx_buffer(sc, txq); 7223 error = ENOMEM; 7224 break; 7225 } 7226 7227 #ifdef WM_EVENT_COUNTERS 7228 xname = device_xname(sc->sc_dev); 7229 7230 WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname); 7231 WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname); 7232 WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname); 7233 WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname); 7234 WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname); 7235 WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname); 7236 WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname); 7237 WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname); 7238 WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname); 7239 WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname); 7240 WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname); 7241 7242 for (j = 0; j < WM_NTXSEGS; j++) { 7243 snprintf(txq->txq_txseg_evcnt_names[j], 7244 sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j); 7245 evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC, 7246 NULL, xname, txq->txq_txseg_evcnt_names[j]); 7247 } 7248 7249 WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname); 7250 WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname); 7251 WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname); 7252 WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname); 7253 WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname); 7254 WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname); 7255 #endif /* WM_EVENT_COUNTERS */ 7256 7257 tx_done++; 7258 } 7259 if (error) 7260 goto fail_1; 7261 7262 /* For receive */ 7263 error = 0; 7264 rx_done = 0; 7265 for (i = 0; i < sc->sc_nqueues; i++) { 7266 #ifdef WM_EVENT_COUNTERS 7267 const char *xname; 7268 #endif 7269 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 7270 rxq->rxq_sc = sc; 7271 rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 7272 7273 error = wm_alloc_rx_descs(sc, rxq); 7274 if (error) 7275 break; 7276 7277 error = wm_alloc_rx_buffer(sc, rxq); 7278 if (error) { 7279 wm_free_rx_descs(sc, rxq); 7280 break; 7281 } 7282 7283 #ifdef WM_EVENT_COUNTERS 7284 xname = device_xname(sc->sc_dev); 7285 7286 WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname); 7287 WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname); 7288 7289 WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname); 7290 WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname); 7291 #endif /* WM_EVENT_COUNTERS */ 7292 7293 rx_done++; 7294 } 7295 if (error) 7296 goto fail_2; 7297 7298 return 0; 7299 7300 fail_2: 7301 for (i = 0; i < rx_done; i++) { 7302 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 7303 wm_free_rx_buffer(sc, rxq); 7304 wm_free_rx_descs(sc, rxq); 7305 if (rxq->rxq_lock) 7306 mutex_obj_free(rxq->rxq_lock); 7307 } 7308 fail_1: 7309 for (i = 0; i < tx_done; i++) { 7310 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; 7311 pcq_destroy(txq->txq_interq); 7312 wm_free_tx_buffer(sc, txq); 7313 wm_free_tx_descs(sc, txq); 7314 if (txq->txq_lock) 7315 mutex_obj_free(txq->txq_lock); 7316 } 7317 7318 kmem_free(sc->sc_queue, 7319 sizeof(struct wm_queue) * sc->sc_nqueues); 7320 fail_0: 7321 return error; 7322 } 7323 7324 /* 7325 * wm_free_quques: 7326 * Free {tx,rx}descs and {tx,rx} buffers 7327 */ 7328 static void 7329 wm_free_txrx_queues(struct wm_softc *sc) 7330 { 7331 int i; 7332 7333 for (i = 0; i < sc->sc_nqueues; i++) { 7334 struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq; 7335 7336 #ifdef WM_EVENT_COUNTERS 7337 WM_Q_EVCNT_DETACH(rxq, intr, rxq, i); 7338 WM_Q_EVCNT_DETACH(rxq, defer, rxq, i); 7339 WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i); 7340 WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i); 7341 #endif /* WM_EVENT_COUNTERS */ 7342 7343 wm_free_rx_buffer(sc, rxq); 7344 wm_free_rx_descs(sc, rxq); 7345 if (rxq->rxq_lock) 7346 mutex_obj_free(rxq->rxq_lock); 7347 } 7348 7349 for (i = 0; i < sc->sc_nqueues; i++) { 7350 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq; 7351 struct mbuf *m; 7352 #ifdef WM_EVENT_COUNTERS 7353 int j; 7354 7355 WM_Q_EVCNT_DETACH(txq, txsstall, txq, i); 7356 WM_Q_EVCNT_DETACH(txq, txdstall, txq, i); 7357 WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i); 7358 WM_Q_EVCNT_DETACH(txq, txdw, txq, i); 7359 WM_Q_EVCNT_DETACH(txq, txqe, txq, i); 7360 WM_Q_EVCNT_DETACH(txq, ipsum, txq, i); 7361 WM_Q_EVCNT_DETACH(txq, tusum, txq, i); 7362 WM_Q_EVCNT_DETACH(txq, tusum6, txq, i); 7363 WM_Q_EVCNT_DETACH(txq, tso, txq, i); 7364 WM_Q_EVCNT_DETACH(txq, tso6, txq, i); 7365 WM_Q_EVCNT_DETACH(txq, tsopain, txq, i); 7366 7367 for (j = 0; j < WM_NTXSEGS; j++) 7368 evcnt_detach(&txq->txq_ev_txseg[j]); 7369 7370 WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i); 7371 WM_Q_EVCNT_DETACH(txq, descdrop, txq, i); 7372 WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i); 7373 WM_Q_EVCNT_DETACH(txq, defrag, txq, i); 7374 WM_Q_EVCNT_DETACH(txq, underrun, txq, i); 7375 WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i); 7376 #endif /* WM_EVENT_COUNTERS */ 7377 7378 /* Drain txq_interq */ 7379 while ((m = pcq_get(txq->txq_interq)) != NULL) 7380 m_freem(m); 7381 pcq_destroy(txq->txq_interq); 7382 7383 wm_free_tx_buffer(sc, txq); 7384 wm_free_tx_descs(sc, txq); 7385 if (txq->txq_lock) 7386 mutex_obj_free(txq->txq_lock); 7387 } 7388 7389 kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues); 7390 } 7391 7392 static void 7393 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq) 7394 { 7395 7396 KASSERT(mutex_owned(txq->txq_lock)); 7397 7398 /* Initialize the transmit descriptor ring. */ 7399 memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq)); 7400 wm_cdtxsync(txq, 0, WM_NTXDESC(txq), 7401 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 7402 txq->txq_free = WM_NTXDESC(txq); 7403 txq->txq_next = 0; 7404 } 7405 7406 static void 7407 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq, 7408 struct wm_txqueue *txq) 7409 { 7410 7411 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 7412 device_xname(sc->sc_dev), __func__)); 7413 KASSERT(mutex_owned(txq->txq_lock)); 7414 7415 if (sc->sc_type < WM_T_82543) { 7416 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0)); 7417 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0)); 7418 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq)); 7419 CSR_WRITE(sc, WMREG_OLD_TDH, 0); 7420 CSR_WRITE(sc, WMREG_OLD_TDT, 0); 7421 CSR_WRITE(sc, WMREG_OLD_TIDV, 128); 7422 } else { 7423 int qid = wmq->wmq_id; 7424 7425 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0)); 7426 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0)); 7427 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq)); 7428 CSR_WRITE(sc, WMREG_TDH(qid), 0); 7429 7430 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 7431 /* 7432 * Don't write TDT before TCTL.EN is set. 7433 * See the document. 7434 */ 7435 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE 7436 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0) 7437 | TXDCTL_WTHRESH(0)); 7438 else { 7439 /* XXX should update with AIM? */ 7440 CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4); 7441 if (sc->sc_type >= WM_T_82540) { 7442 /* Should be the same */ 7443 CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4); 7444 } 7445 7446 CSR_WRITE(sc, WMREG_TDT(qid), 0); 7447 CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) | 7448 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0)); 7449 } 7450 } 7451 } 7452 7453 static void 7454 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq) 7455 { 7456 int i; 7457 7458 KASSERT(mutex_owned(txq->txq_lock)); 7459 7460 /* Initialize the transmit job descriptors. */ 7461 for (i = 0; i < WM_TXQUEUELEN(txq); i++) 7462 txq->txq_soft[i].txs_mbuf = NULL; 7463 txq->txq_sfree = WM_TXQUEUELEN(txq); 7464 txq->txq_snext = 0; 7465 txq->txq_sdirty = 0; 7466 } 7467 7468 static void 7469 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq, 7470 struct wm_txqueue *txq) 7471 { 7472 7473 KASSERT(mutex_owned(txq->txq_lock)); 7474 7475 /* 7476 * Set up some register offsets that are different between 7477 * the i82542 and the i82543 and later chips. 7478 */ 7479 if (sc->sc_type < WM_T_82543) 7480 txq->txq_tdt_reg = WMREG_OLD_TDT; 7481 else 7482 txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id); 7483 7484 wm_init_tx_descs(sc, txq); 7485 wm_init_tx_regs(sc, wmq, txq); 7486 wm_init_tx_buffer(sc, txq); 7487 7488 /* Clear other than WM_TXQ_LINKDOWN_DISCARD */ 7489 txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD; 7490 7491 txq->txq_sending = false; 7492 } 7493 7494 static void 7495 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq, 7496 struct wm_rxqueue *rxq) 7497 { 7498 7499 KASSERT(mutex_owned(rxq->rxq_lock)); 7500 7501 /* 7502 * Initialize the receive descriptor and receive job 7503 * descriptor rings. 7504 */ 7505 if (sc->sc_type < WM_T_82543) { 7506 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0)); 7507 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0)); 7508 CSR_WRITE(sc, WMREG_OLD_RDLEN0, 7509 rxq->rxq_descsize * rxq->rxq_ndesc); 7510 CSR_WRITE(sc, WMREG_OLD_RDH0, 0); 7511 CSR_WRITE(sc, WMREG_OLD_RDT0, 0); 7512 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD); 7513 7514 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0); 7515 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0); 7516 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0); 7517 CSR_WRITE(sc, WMREG_OLD_RDH1, 0); 7518 CSR_WRITE(sc, WMREG_OLD_RDT1, 0); 7519 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0); 7520 } else { 7521 int qid = wmq->wmq_id; 7522 7523 CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0)); 7524 CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0)); 7525 CSR_WRITE(sc, WMREG_RDLEN(qid), 7526 rxq->rxq_descsize * rxq->rxq_ndesc); 7527 7528 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 7529 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1)) 7530 panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES); 7531 7532 /* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */ 7533 CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF 7534 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT)); 7535 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE 7536 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8) 7537 | RXDCTL_WTHRESH(1)); 7538 CSR_WRITE(sc, WMREG_RDH(qid), 0); 7539 CSR_WRITE(sc, WMREG_RDT(qid), 0); 7540 } else { 7541 CSR_WRITE(sc, WMREG_RDH(qid), 0); 7542 CSR_WRITE(sc, WMREG_RDT(qid), 0); 7543 /* XXX should update with AIM? */ 7544 CSR_WRITE(sc, WMREG_RDTR, 7545 (wmq->wmq_itr / 4) | RDTR_FPD); 7546 /* MUST be same */ 7547 CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4); 7548 CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) | 7549 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1)); 7550 } 7551 } 7552 } 7553 7554 static int 7555 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq) 7556 { 7557 struct wm_rxsoft *rxs; 7558 int error, i; 7559 7560 KASSERT(mutex_owned(rxq->rxq_lock)); 7561 7562 for (i = 0; i < rxq->rxq_ndesc; i++) { 7563 rxs = &rxq->rxq_soft[i]; 7564 if (rxs->rxs_mbuf == NULL) { 7565 if ((error = wm_add_rxbuf(rxq, i)) != 0) { 7566 log(LOG_ERR, "%s: unable to allocate or map " 7567 "rx buffer %d, error = %d\n", 7568 device_xname(sc->sc_dev), i, error); 7569 /* 7570 * XXX Should attempt to run with fewer receive 7571 * XXX buffers instead of just failing. 7572 */ 7573 wm_rxdrain(rxq); 7574 return ENOMEM; 7575 } 7576 } else { 7577 /* 7578 * For 82575 and 82576, the RX descriptors must be 7579 * initialized after the setting of RCTL.EN in 7580 * wm_set_filter() 7581 */ 7582 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0) 7583 wm_init_rxdesc(rxq, i); 7584 } 7585 } 7586 rxq->rxq_ptr = 0; 7587 rxq->rxq_discard = 0; 7588 WM_RXCHAIN_RESET(rxq); 7589 7590 return 0; 7591 } 7592 7593 static int 7594 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq, 7595 struct wm_rxqueue *rxq) 7596 { 7597 7598 KASSERT(mutex_owned(rxq->rxq_lock)); 7599 7600 /* 7601 * Set up some register offsets that are different between 7602 * the i82542 and the i82543 and later chips. 7603 */ 7604 if (sc->sc_type < WM_T_82543) 7605 rxq->rxq_rdt_reg = WMREG_OLD_RDT0; 7606 else 7607 rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id); 7608 7609 wm_init_rx_regs(sc, wmq, rxq); 7610 return wm_init_rx_buffer(sc, rxq); 7611 } 7612 7613 /* 7614 * wm_init_quques: 7615 * Initialize {tx,rx}descs and {tx,rx} buffers 7616 */ 7617 static int 7618 wm_init_txrx_queues(struct wm_softc *sc) 7619 { 7620 int i, error = 0; 7621 7622 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 7623 device_xname(sc->sc_dev), __func__)); 7624 7625 for (i = 0; i < sc->sc_nqueues; i++) { 7626 struct wm_queue *wmq = &sc->sc_queue[i]; 7627 struct wm_txqueue *txq = &wmq->wmq_txq; 7628 struct wm_rxqueue *rxq = &wmq->wmq_rxq; 7629 7630 /* 7631 * TODO 7632 * Currently, use constant variable instead of AIM. 7633 * Furthermore, the interrupt interval of multiqueue which use 7634 * polling mode is less than default value. 7635 * More tuning and AIM are required. 7636 */ 7637 if (wm_is_using_multiqueue(sc)) 7638 wmq->wmq_itr = 50; 7639 else 7640 wmq->wmq_itr = sc->sc_itr_init; 7641 wmq->wmq_set_itr = true; 7642 7643 mutex_enter(txq->txq_lock); 7644 wm_init_tx_queue(sc, wmq, txq); 7645 mutex_exit(txq->txq_lock); 7646 7647 mutex_enter(rxq->rxq_lock); 7648 error = wm_init_rx_queue(sc, wmq, rxq); 7649 mutex_exit(rxq->rxq_lock); 7650 if (error) 7651 break; 7652 } 7653 7654 return error; 7655 } 7656 7657 /* 7658 * wm_tx_offload: 7659 * 7660 * Set up TCP/IP checksumming parameters for the 7661 * specified packet. 7662 */ 7663 static void 7664 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq, 7665 struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp) 7666 { 7667 struct mbuf *m0 = txs->txs_mbuf; 7668 struct livengood_tcpip_ctxdesc *t; 7669 uint32_t ipcs, tucs, cmd, cmdlen, seg; 7670 uint32_t ipcse; 7671 struct ether_header *eh; 7672 int offset, iphl; 7673 uint8_t fields; 7674 7675 /* 7676 * XXX It would be nice if the mbuf pkthdr had offset 7677 * fields for the protocol headers. 7678 */ 7679 7680 eh = mtod(m0, struct ether_header *); 7681 switch (htons(eh->ether_type)) { 7682 case ETHERTYPE_IP: 7683 case ETHERTYPE_IPV6: 7684 offset = ETHER_HDR_LEN; 7685 break; 7686 7687 case ETHERTYPE_VLAN: 7688 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 7689 break; 7690 7691 default: 7692 /* Don't support this protocol or encapsulation. */ 7693 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0; 7694 txq->txq_last_hw_ipcs = 0; 7695 txq->txq_last_hw_tucs = 0; 7696 *fieldsp = 0; 7697 *cmdp = 0; 7698 return; 7699 } 7700 7701 if ((m0->m_pkthdr.csum_flags & 7702 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) { 7703 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 7704 } else 7705 iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data); 7706 7707 ipcse = offset + iphl - 1; 7708 7709 cmd = WTX_CMD_DEXT | WTX_DTYP_D; 7710 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE; 7711 seg = 0; 7712 fields = 0; 7713 7714 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) { 7715 int hlen = offset + iphl; 7716 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 7717 7718 if (__predict_false(m0->m_len < 7719 (hlen + sizeof(struct tcphdr)))) { 7720 /* 7721 * TCP/IP headers are not in the first mbuf; we need 7722 * to do this the slow and painful way. Let's just 7723 * hope this doesn't happen very often. 7724 */ 7725 struct tcphdr th; 7726 7727 WM_Q_EVCNT_INCR(txq, tsopain); 7728 7729 m_copydata(m0, hlen, sizeof(th), &th); 7730 if (v4) { 7731 struct ip ip; 7732 7733 m_copydata(m0, offset, sizeof(ip), &ip); 7734 ip.ip_len = 0; 7735 m_copyback(m0, 7736 offset + offsetof(struct ip, ip_len), 7737 sizeof(ip.ip_len), &ip.ip_len); 7738 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 7739 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 7740 } else { 7741 struct ip6_hdr ip6; 7742 7743 m_copydata(m0, offset, sizeof(ip6), &ip6); 7744 ip6.ip6_plen = 0; 7745 m_copyback(m0, 7746 offset + offsetof(struct ip6_hdr, ip6_plen), 7747 sizeof(ip6.ip6_plen), &ip6.ip6_plen); 7748 th.th_sum = in6_cksum_phdr(&ip6.ip6_src, 7749 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP)); 7750 } 7751 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 7752 sizeof(th.th_sum), &th.th_sum); 7753 7754 hlen += th.th_off << 2; 7755 } else { 7756 /* 7757 * TCP/IP headers are in the first mbuf; we can do 7758 * this the easy way. 7759 */ 7760 struct tcphdr *th; 7761 7762 if (v4) { 7763 struct ip *ip = 7764 (void *)(mtod(m0, char *) + offset); 7765 th = (void *)(mtod(m0, char *) + hlen); 7766 7767 ip->ip_len = 0; 7768 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 7769 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 7770 } else { 7771 struct ip6_hdr *ip6 = 7772 (void *)(mtod(m0, char *) + offset); 7773 th = (void *)(mtod(m0, char *) + hlen); 7774 7775 ip6->ip6_plen = 0; 7776 th->th_sum = in6_cksum_phdr(&ip6->ip6_src, 7777 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); 7778 } 7779 hlen += th->th_off << 2; 7780 } 7781 7782 if (v4) { 7783 WM_Q_EVCNT_INCR(txq, tso); 7784 cmdlen |= WTX_TCPIP_CMD_IP; 7785 } else { 7786 WM_Q_EVCNT_INCR(txq, tso6); 7787 ipcse = 0; 7788 } 7789 cmd |= WTX_TCPIP_CMD_TSE; 7790 cmdlen |= WTX_TCPIP_CMD_TSE | 7791 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen); 7792 seg = WTX_TCPIP_SEG_HDRLEN(hlen) | 7793 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz); 7794 } 7795 7796 /* 7797 * NOTE: Even if we're not using the IP or TCP/UDP checksum 7798 * offload feature, if we load the context descriptor, we 7799 * MUST provide valid values for IPCSS and TUCSS fields. 7800 */ 7801 7802 ipcs = WTX_TCPIP_IPCSS(offset) | 7803 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) | 7804 WTX_TCPIP_IPCSE(ipcse); 7805 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) { 7806 WM_Q_EVCNT_INCR(txq, ipsum); 7807 fields |= WTX_IXSM; 7808 } 7809 7810 offset += iphl; 7811 7812 if (m0->m_pkthdr.csum_flags & 7813 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) { 7814 WM_Q_EVCNT_INCR(txq, tusum); 7815 fields |= WTX_TXSM; 7816 tucs = WTX_TCPIP_TUCSS(offset) | 7817 WTX_TCPIP_TUCSO(offset + 7818 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) | 7819 WTX_TCPIP_TUCSE(0) /* Rest of packet */; 7820 } else if ((m0->m_pkthdr.csum_flags & 7821 (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) { 7822 WM_Q_EVCNT_INCR(txq, tusum6); 7823 fields |= WTX_TXSM; 7824 tucs = WTX_TCPIP_TUCSS(offset) | 7825 WTX_TCPIP_TUCSO(offset + 7826 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) | 7827 WTX_TCPIP_TUCSE(0) /* Rest of packet */; 7828 } else { 7829 /* Just initialize it to a valid TCP context. */ 7830 tucs = WTX_TCPIP_TUCSS(offset) | 7831 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) | 7832 WTX_TCPIP_TUCSE(0) /* Rest of packet */; 7833 } 7834 7835 *cmdp = cmd; 7836 *fieldsp = fields; 7837 7838 /* 7839 * We don't have to write context descriptor for every packet 7840 * except for 82574. For 82574, we must write context descriptor 7841 * for every packet when we use two descriptor queues. 7842 * 7843 * The 82574L can only remember the *last* context used 7844 * regardless of queue that it was use for. We cannot reuse 7845 * contexts on this hardware platform and must generate a new 7846 * context every time. 82574L hardware spec, section 7.2.6, 7847 * second note. 7848 */ 7849 if (sc->sc_nqueues < 2) { 7850 /* 7851 * Setting up new checksum offload context for every 7852 * frames takes a lot of processing time for hardware. 7853 * This also reduces performance a lot for small sized 7854 * frames so avoid it if driver can use previously 7855 * configured checksum offload context. 7856 * For TSO, in theory we can use the same TSO context only if 7857 * frame is the same type(IP/TCP) and the same MSS. However 7858 * checking whether a frame has the same IP/TCP structure is 7859 * hard thing so just ignore that and always restablish a 7860 * new TSO context. 7861 */ 7862 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) 7863 == 0) { 7864 if (txq->txq_last_hw_cmd == cmd && 7865 txq->txq_last_hw_fields == fields && 7866 txq->txq_last_hw_ipcs == (ipcs & 0xffff) && 7867 txq->txq_last_hw_tucs == (tucs & 0xffff)) { 7868 WM_Q_EVCNT_INCR(txq, skipcontext); 7869 return; 7870 } 7871 } 7872 7873 txq->txq_last_hw_cmd = cmd; 7874 txq->txq_last_hw_fields = fields; 7875 txq->txq_last_hw_ipcs = (ipcs & 0xffff); 7876 txq->txq_last_hw_tucs = (tucs & 0xffff); 7877 } 7878 7879 /* Fill in the context descriptor. */ 7880 t = (struct livengood_tcpip_ctxdesc *) 7881 &txq->txq_descs[txq->txq_next]; 7882 t->tcpip_ipcs = htole32(ipcs); 7883 t->tcpip_tucs = htole32(tucs); 7884 t->tcpip_cmdlen = htole32(cmdlen); 7885 t->tcpip_seg = htole32(seg); 7886 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE); 7887 7888 txq->txq_next = WM_NEXTTX(txq, txq->txq_next); 7889 txs->txs_ndesc++; 7890 } 7891 7892 static inline int 7893 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m) 7894 { 7895 struct wm_softc *sc = ifp->if_softc; 7896 u_int cpuid = cpu_index(curcpu()); 7897 7898 /* 7899 * Currently, simple distribute strategy. 7900 * TODO: 7901 * distribute by flowid(RSS has value). 7902 */ 7903 return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues; 7904 } 7905 7906 static inline bool 7907 wm_linkdown_discard(struct wm_txqueue *txq) 7908 { 7909 7910 if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0) 7911 return true; 7912 7913 return false; 7914 } 7915 7916 /* 7917 * wm_start: [ifnet interface function] 7918 * 7919 * Start packet transmission on the interface. 7920 */ 7921 static void 7922 wm_start(struct ifnet *ifp) 7923 { 7924 struct wm_softc *sc = ifp->if_softc; 7925 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; 7926 7927 #ifdef WM_MPSAFE 7928 KASSERT(if_is_mpsafe(ifp)); 7929 #endif 7930 /* 7931 * if_obytes and if_omcasts are added in if_transmit()@if.c. 7932 */ 7933 7934 mutex_enter(txq->txq_lock); 7935 if (!txq->txq_stopping) 7936 wm_start_locked(ifp); 7937 mutex_exit(txq->txq_lock); 7938 } 7939 7940 static void 7941 wm_start_locked(struct ifnet *ifp) 7942 { 7943 struct wm_softc *sc = ifp->if_softc; 7944 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; 7945 7946 wm_send_common_locked(ifp, txq, false); 7947 } 7948 7949 static int 7950 wm_transmit(struct ifnet *ifp, struct mbuf *m) 7951 { 7952 int qid; 7953 struct wm_softc *sc = ifp->if_softc; 7954 struct wm_txqueue *txq; 7955 7956 qid = wm_select_txqueue(ifp, m); 7957 txq = &sc->sc_queue[qid].wmq_txq; 7958 7959 if (__predict_false(!pcq_put(txq->txq_interq, m))) { 7960 m_freem(m); 7961 WM_Q_EVCNT_INCR(txq, pcqdrop); 7962 return ENOBUFS; 7963 } 7964 7965 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 7966 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len); 7967 if (m->m_flags & M_MCAST) 7968 if_statinc_ref(nsr, if_omcasts); 7969 IF_STAT_PUTREF(ifp); 7970 7971 if (mutex_tryenter(txq->txq_lock)) { 7972 if (!txq->txq_stopping) 7973 wm_transmit_locked(ifp, txq); 7974 mutex_exit(txq->txq_lock); 7975 } 7976 7977 return 0; 7978 } 7979 7980 static void 7981 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq) 7982 { 7983 7984 wm_send_common_locked(ifp, txq, true); 7985 } 7986 7987 static void 7988 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq, 7989 bool is_transmit) 7990 { 7991 struct wm_softc *sc = ifp->if_softc; 7992 struct mbuf *m0; 7993 struct wm_txsoft *txs; 7994 bus_dmamap_t dmamap; 7995 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso; 7996 bus_addr_t curaddr; 7997 bus_size_t seglen, curlen; 7998 uint32_t cksumcmd; 7999 uint8_t cksumfields; 8000 bool remap = true; 8001 8002 KASSERT(mutex_owned(txq->txq_lock)); 8003 8004 if ((ifp->if_flags & IFF_RUNNING) == 0) 8005 return; 8006 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0) 8007 return; 8008 8009 if (__predict_false(wm_linkdown_discard(txq))) { 8010 do { 8011 if (is_transmit) 8012 m0 = pcq_get(txq->txq_interq); 8013 else 8014 IFQ_DEQUEUE(&ifp->if_snd, m0); 8015 /* 8016 * increment successed packet counter as in the case 8017 * which the packet is discarded by link down PHY. 8018 */ 8019 if (m0 != NULL) 8020 if_statinc(ifp, if_opackets); 8021 m_freem(m0); 8022 } while (m0 != NULL); 8023 return; 8024 } 8025 8026 /* Remember the previous number of free descriptors. */ 8027 ofree = txq->txq_free; 8028 8029 /* 8030 * Loop through the send queue, setting up transmit descriptors 8031 * until we drain the queue, or use up all available transmit 8032 * descriptors. 8033 */ 8034 for (;;) { 8035 m0 = NULL; 8036 8037 /* Get a work queue entry. */ 8038 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) { 8039 wm_txeof(txq, UINT_MAX); 8040 if (txq->txq_sfree == 0) { 8041 DPRINTF(sc, WM_DEBUG_TX, 8042 ("%s: TX: no free job descriptors\n", 8043 device_xname(sc->sc_dev))); 8044 WM_Q_EVCNT_INCR(txq, txsstall); 8045 break; 8046 } 8047 } 8048 8049 /* Grab a packet off the queue. */ 8050 if (is_transmit) 8051 m0 = pcq_get(txq->txq_interq); 8052 else 8053 IFQ_DEQUEUE(&ifp->if_snd, m0); 8054 if (m0 == NULL) 8055 break; 8056 8057 DPRINTF(sc, WM_DEBUG_TX, 8058 ("%s: TX: have packet to transmit: %p\n", 8059 device_xname(sc->sc_dev), m0)); 8060 8061 txs = &txq->txq_soft[txq->txq_snext]; 8062 dmamap = txs->txs_dmamap; 8063 8064 use_tso = (m0->m_pkthdr.csum_flags & 8065 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0; 8066 8067 /* 8068 * So says the Linux driver: 8069 * The controller does a simple calculation to make sure 8070 * there is enough room in the FIFO before initiating the 8071 * DMA for each buffer. The calc is: 8072 * 4 = ceil(buffer len / MSS) 8073 * To make sure we don't overrun the FIFO, adjust the max 8074 * buffer len if the MSS drops. 8075 */ 8076 dmamap->dm_maxsegsz = 8077 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN) 8078 ? m0->m_pkthdr.segsz << 2 8079 : WTX_MAX_LEN; 8080 8081 /* 8082 * Load the DMA map. If this fails, the packet either 8083 * didn't fit in the allotted number of segments, or we 8084 * were short on resources. For the too-many-segments 8085 * case, we simply report an error and drop the packet, 8086 * since we can't sanely copy a jumbo packet to a single 8087 * buffer. 8088 */ 8089 retry: 8090 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 8091 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 8092 if (__predict_false(error)) { 8093 if (error == EFBIG) { 8094 if (remap == true) { 8095 struct mbuf *m; 8096 8097 remap = false; 8098 m = m_defrag(m0, M_NOWAIT); 8099 if (m != NULL) { 8100 WM_Q_EVCNT_INCR(txq, defrag); 8101 m0 = m; 8102 goto retry; 8103 } 8104 } 8105 WM_Q_EVCNT_INCR(txq, toomanyseg); 8106 log(LOG_ERR, "%s: Tx packet consumes too many " 8107 "DMA segments, dropping...\n", 8108 device_xname(sc->sc_dev)); 8109 wm_dump_mbuf_chain(sc, m0); 8110 m_freem(m0); 8111 continue; 8112 } 8113 /* Short on resources, just stop for now. */ 8114 DPRINTF(sc, WM_DEBUG_TX, 8115 ("%s: TX: dmamap load failed: %d\n", 8116 device_xname(sc->sc_dev), error)); 8117 break; 8118 } 8119 8120 segs_needed = dmamap->dm_nsegs; 8121 if (use_tso) { 8122 /* For sentinel descriptor; see below. */ 8123 segs_needed++; 8124 } 8125 8126 /* 8127 * Ensure we have enough descriptors free to describe 8128 * the packet. Note, we always reserve one descriptor 8129 * at the end of the ring due to the semantics of the 8130 * TDT register, plus one more in the event we need 8131 * to load offload context. 8132 */ 8133 if (segs_needed > txq->txq_free - 2) { 8134 /* 8135 * Not enough free descriptors to transmit this 8136 * packet. We haven't committed anything yet, 8137 * so just unload the DMA map, put the packet 8138 * pack on the queue, and punt. Notify the upper 8139 * layer that there are no more slots left. 8140 */ 8141 DPRINTF(sc, WM_DEBUG_TX, 8142 ("%s: TX: need %d (%d) descriptors, have %d\n", 8143 device_xname(sc->sc_dev), dmamap->dm_nsegs, 8144 segs_needed, txq->txq_free - 1)); 8145 txq->txq_flags |= WM_TXQ_NO_SPACE; 8146 bus_dmamap_unload(sc->sc_dmat, dmamap); 8147 WM_Q_EVCNT_INCR(txq, txdstall); 8148 break; 8149 } 8150 8151 /* 8152 * Check for 82547 Tx FIFO bug. We need to do this 8153 * once we know we can transmit the packet, since we 8154 * do some internal FIFO space accounting here. 8155 */ 8156 if (sc->sc_type == WM_T_82547 && 8157 wm_82547_txfifo_bugchk(sc, m0)) { 8158 DPRINTF(sc, WM_DEBUG_TX, 8159 ("%s: TX: 82547 Tx FIFO bug detected\n", 8160 device_xname(sc->sc_dev))); 8161 txq->txq_flags |= WM_TXQ_NO_SPACE; 8162 bus_dmamap_unload(sc->sc_dmat, dmamap); 8163 WM_Q_EVCNT_INCR(txq, fifo_stall); 8164 break; 8165 } 8166 8167 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */ 8168 8169 DPRINTF(sc, WM_DEBUG_TX, 8170 ("%s: TX: packet has %d (%d) DMA segments\n", 8171 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed)); 8172 8173 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]); 8174 8175 /* 8176 * Store a pointer to the packet so that we can free it 8177 * later. 8178 * 8179 * Initially, we consider the number of descriptors the 8180 * packet uses the number of DMA segments. This may be 8181 * incremented by 1 if we do checksum offload (a descriptor 8182 * is used to set the checksum context). 8183 */ 8184 txs->txs_mbuf = m0; 8185 txs->txs_firstdesc = txq->txq_next; 8186 txs->txs_ndesc = segs_needed; 8187 8188 /* Set up offload parameters for this packet. */ 8189 if (m0->m_pkthdr.csum_flags & 8190 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | 8191 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 | 8192 M_CSUM_TCPv6 | M_CSUM_UDPv6)) { 8193 wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields); 8194 } else { 8195 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0; 8196 txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0; 8197 cksumcmd = 0; 8198 cksumfields = 0; 8199 } 8200 8201 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS; 8202 8203 /* Sync the DMA map. */ 8204 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 8205 BUS_DMASYNC_PREWRITE); 8206 8207 /* Initialize the transmit descriptor. */ 8208 for (nexttx = txq->txq_next, seg = 0; 8209 seg < dmamap->dm_nsegs; seg++) { 8210 for (seglen = dmamap->dm_segs[seg].ds_len, 8211 curaddr = dmamap->dm_segs[seg].ds_addr; 8212 seglen != 0; 8213 curaddr += curlen, seglen -= curlen, 8214 nexttx = WM_NEXTTX(txq, nexttx)) { 8215 curlen = seglen; 8216 8217 /* 8218 * So says the Linux driver: 8219 * Work around for premature descriptor 8220 * write-backs in TSO mode. Append a 8221 * 4-byte sentinel descriptor. 8222 */ 8223 if (use_tso && seg == dmamap->dm_nsegs - 1 && 8224 curlen > 8) 8225 curlen -= 4; 8226 8227 wm_set_dma_addr( 8228 &txq->txq_descs[nexttx].wtx_addr, curaddr); 8229 txq->txq_descs[nexttx].wtx_cmdlen 8230 = htole32(cksumcmd | curlen); 8231 txq->txq_descs[nexttx].wtx_fields.wtxu_status 8232 = 0; 8233 txq->txq_descs[nexttx].wtx_fields.wtxu_options 8234 = cksumfields; 8235 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0; 8236 lasttx = nexttx; 8237 8238 DPRINTF(sc, WM_DEBUG_TX, 8239 ("%s: TX: desc %d: low %#" PRIx64 ", " 8240 "len %#04zx\n", 8241 device_xname(sc->sc_dev), nexttx, 8242 (uint64_t)curaddr, curlen)); 8243 } 8244 } 8245 8246 KASSERT(lasttx != -1); 8247 8248 /* 8249 * Set up the command byte on the last descriptor of 8250 * the packet. If we're in the interrupt delay window, 8251 * delay the interrupt. 8252 */ 8253 txq->txq_descs[lasttx].wtx_cmdlen |= 8254 htole32(WTX_CMD_EOP | WTX_CMD_RS); 8255 8256 /* 8257 * If VLANs are enabled and the packet has a VLAN tag, set 8258 * up the descriptor to encapsulate the packet for us. 8259 * 8260 * This is only valid on the last descriptor of the packet. 8261 */ 8262 if (vlan_has_tag(m0)) { 8263 txq->txq_descs[lasttx].wtx_cmdlen |= 8264 htole32(WTX_CMD_VLE); 8265 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan 8266 = htole16(vlan_get_tag(m0)); 8267 } 8268 8269 txs->txs_lastdesc = lasttx; 8270 8271 DPRINTF(sc, WM_DEBUG_TX, 8272 ("%s: TX: desc %d: cmdlen 0x%08x\n", 8273 device_xname(sc->sc_dev), 8274 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen))); 8275 8276 /* Sync the descriptors we're using. */ 8277 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc, 8278 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 8279 8280 /* Give the packet to the chip. */ 8281 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx); 8282 8283 DPRINTF(sc, WM_DEBUG_TX, 8284 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx)); 8285 8286 DPRINTF(sc, WM_DEBUG_TX, 8287 ("%s: TX: finished transmitting packet, job %d\n", 8288 device_xname(sc->sc_dev), txq->txq_snext)); 8289 8290 /* Advance the tx pointer. */ 8291 txq->txq_free -= txs->txs_ndesc; 8292 txq->txq_next = nexttx; 8293 8294 txq->txq_sfree--; 8295 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext); 8296 8297 /* Pass the packet to any BPF listeners. */ 8298 bpf_mtap(ifp, m0, BPF_D_OUT); 8299 } 8300 8301 if (m0 != NULL) { 8302 txq->txq_flags |= WM_TXQ_NO_SPACE; 8303 WM_Q_EVCNT_INCR(txq, descdrop); 8304 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", 8305 __func__)); 8306 m_freem(m0); 8307 } 8308 8309 if (txq->txq_sfree == 0 || txq->txq_free <= 2) { 8310 /* No more slots; notify upper layer. */ 8311 txq->txq_flags |= WM_TXQ_NO_SPACE; 8312 } 8313 8314 if (txq->txq_free != ofree) { 8315 /* Set a watchdog timer in case the chip flakes out. */ 8316 txq->txq_lastsent = time_uptime; 8317 txq->txq_sending = true; 8318 } 8319 } 8320 8321 /* 8322 * wm_nq_tx_offload: 8323 * 8324 * Set up TCP/IP checksumming parameters for the 8325 * specified packet, for NEWQUEUE devices 8326 */ 8327 static void 8328 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq, 8329 struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum) 8330 { 8331 struct mbuf *m0 = txs->txs_mbuf; 8332 uint32_t vl_len, mssidx, cmdc; 8333 struct ether_header *eh; 8334 int offset, iphl; 8335 8336 /* 8337 * XXX It would be nice if the mbuf pkthdr had offset 8338 * fields for the protocol headers. 8339 */ 8340 *cmdlenp = 0; 8341 *fieldsp = 0; 8342 8343 eh = mtod(m0, struct ether_header *); 8344 switch (htons(eh->ether_type)) { 8345 case ETHERTYPE_IP: 8346 case ETHERTYPE_IPV6: 8347 offset = ETHER_HDR_LEN; 8348 break; 8349 8350 case ETHERTYPE_VLAN: 8351 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 8352 break; 8353 8354 default: 8355 /* Don't support this protocol or encapsulation. */ 8356 *do_csum = false; 8357 return; 8358 } 8359 *do_csum = true; 8360 *cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS; 8361 cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT; 8362 8363 vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT); 8364 KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0); 8365 8366 if ((m0->m_pkthdr.csum_flags & 8367 (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) { 8368 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 8369 } else { 8370 iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data); 8371 } 8372 vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT); 8373 KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0); 8374 8375 if (vlan_has_tag(m0)) { 8376 vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK) 8377 << NQTXC_VLLEN_VLAN_SHIFT); 8378 *cmdlenp |= NQTX_CMD_VLE; 8379 } 8380 8381 mssidx = 0; 8382 8383 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) { 8384 int hlen = offset + iphl; 8385 int tcp_hlen; 8386 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0; 8387 8388 if (__predict_false(m0->m_len < 8389 (hlen + sizeof(struct tcphdr)))) { 8390 /* 8391 * TCP/IP headers are not in the first mbuf; we need 8392 * to do this the slow and painful way. Let's just 8393 * hope this doesn't happen very often. 8394 */ 8395 struct tcphdr th; 8396 8397 WM_Q_EVCNT_INCR(txq, tsopain); 8398 8399 m_copydata(m0, hlen, sizeof(th), &th); 8400 if (v4) { 8401 struct ip ip; 8402 8403 m_copydata(m0, offset, sizeof(ip), &ip); 8404 ip.ip_len = 0; 8405 m_copyback(m0, 8406 offset + offsetof(struct ip, ip_len), 8407 sizeof(ip.ip_len), &ip.ip_len); 8408 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr, 8409 ip.ip_dst.s_addr, htons(IPPROTO_TCP)); 8410 } else { 8411 struct ip6_hdr ip6; 8412 8413 m_copydata(m0, offset, sizeof(ip6), &ip6); 8414 ip6.ip6_plen = 0; 8415 m_copyback(m0, 8416 offset + offsetof(struct ip6_hdr, ip6_plen), 8417 sizeof(ip6.ip6_plen), &ip6.ip6_plen); 8418 th.th_sum = in6_cksum_phdr(&ip6.ip6_src, 8419 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP)); 8420 } 8421 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum), 8422 sizeof(th.th_sum), &th.th_sum); 8423 8424 tcp_hlen = th.th_off << 2; 8425 } else { 8426 /* 8427 * TCP/IP headers are in the first mbuf; we can do 8428 * this the easy way. 8429 */ 8430 struct tcphdr *th; 8431 8432 if (v4) { 8433 struct ip *ip = 8434 (void *)(mtod(m0, char *) + offset); 8435 th = (void *)(mtod(m0, char *) + hlen); 8436 8437 ip->ip_len = 0; 8438 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, 8439 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 8440 } else { 8441 struct ip6_hdr *ip6 = 8442 (void *)(mtod(m0, char *) + offset); 8443 th = (void *)(mtod(m0, char *) + hlen); 8444 8445 ip6->ip6_plen = 0; 8446 th->th_sum = in6_cksum_phdr(&ip6->ip6_src, 8447 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP)); 8448 } 8449 tcp_hlen = th->th_off << 2; 8450 } 8451 hlen += tcp_hlen; 8452 *cmdlenp |= NQTX_CMD_TSE; 8453 8454 if (v4) { 8455 WM_Q_EVCNT_INCR(txq, tso); 8456 *fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM; 8457 } else { 8458 WM_Q_EVCNT_INCR(txq, tso6); 8459 *fieldsp |= NQTXD_FIELDS_TUXSM; 8460 } 8461 *fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT); 8462 KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0); 8463 mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT); 8464 KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0); 8465 mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT); 8466 KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0); 8467 } else { 8468 *fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT); 8469 KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0); 8470 } 8471 8472 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) { 8473 *fieldsp |= NQTXD_FIELDS_IXSM; 8474 cmdc |= NQTXC_CMD_IP4; 8475 } 8476 8477 if (m0->m_pkthdr.csum_flags & 8478 (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) { 8479 WM_Q_EVCNT_INCR(txq, tusum); 8480 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) 8481 cmdc |= NQTXC_CMD_TCP; 8482 else 8483 cmdc |= NQTXC_CMD_UDP; 8484 8485 cmdc |= NQTXC_CMD_IP4; 8486 *fieldsp |= NQTXD_FIELDS_TUXSM; 8487 } 8488 if (m0->m_pkthdr.csum_flags & 8489 (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) { 8490 WM_Q_EVCNT_INCR(txq, tusum6); 8491 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) 8492 cmdc |= NQTXC_CMD_TCP; 8493 else 8494 cmdc |= NQTXC_CMD_UDP; 8495 8496 cmdc |= NQTXC_CMD_IP6; 8497 *fieldsp |= NQTXD_FIELDS_TUXSM; 8498 } 8499 8500 /* 8501 * We don't have to write context descriptor for every packet to 8502 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354, 8503 * I210 and I211. It is enough to write once per a Tx queue for these 8504 * controllers. 8505 * It would be overhead to write context descriptor for every packet, 8506 * however it does not cause problems. 8507 */ 8508 /* Fill in the context descriptor. */ 8509 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len = 8510 htole32(vl_len); 8511 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0; 8512 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd = 8513 htole32(cmdc); 8514 txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx = 8515 htole32(mssidx); 8516 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE); 8517 DPRINTF(sc, WM_DEBUG_TX, 8518 ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev), 8519 txq->txq_next, 0, vl_len)); 8520 DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc)); 8521 txq->txq_next = WM_NEXTTX(txq, txq->txq_next); 8522 txs->txs_ndesc++; 8523 } 8524 8525 /* 8526 * wm_nq_start: [ifnet interface function] 8527 * 8528 * Start packet transmission on the interface for NEWQUEUE devices 8529 */ 8530 static void 8531 wm_nq_start(struct ifnet *ifp) 8532 { 8533 struct wm_softc *sc = ifp->if_softc; 8534 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; 8535 8536 #ifdef WM_MPSAFE 8537 KASSERT(if_is_mpsafe(ifp)); 8538 #endif 8539 /* 8540 * if_obytes and if_omcasts are added in if_transmit()@if.c. 8541 */ 8542 8543 mutex_enter(txq->txq_lock); 8544 if (!txq->txq_stopping) 8545 wm_nq_start_locked(ifp); 8546 mutex_exit(txq->txq_lock); 8547 } 8548 8549 static void 8550 wm_nq_start_locked(struct ifnet *ifp) 8551 { 8552 struct wm_softc *sc = ifp->if_softc; 8553 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq; 8554 8555 wm_nq_send_common_locked(ifp, txq, false); 8556 } 8557 8558 static int 8559 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m) 8560 { 8561 int qid; 8562 struct wm_softc *sc = ifp->if_softc; 8563 struct wm_txqueue *txq; 8564 8565 qid = wm_select_txqueue(ifp, m); 8566 txq = &sc->sc_queue[qid].wmq_txq; 8567 8568 if (__predict_false(!pcq_put(txq->txq_interq, m))) { 8569 m_freem(m); 8570 WM_Q_EVCNT_INCR(txq, pcqdrop); 8571 return ENOBUFS; 8572 } 8573 8574 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 8575 if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len); 8576 if (m->m_flags & M_MCAST) 8577 if_statinc_ref(nsr, if_omcasts); 8578 IF_STAT_PUTREF(ifp); 8579 8580 /* 8581 * The situations which this mutex_tryenter() fails at running time 8582 * are below two patterns. 8583 * (1) contention with interrupt handler(wm_txrxintr_msix()) 8584 * (2) contention with deferred if_start softint(wm_handle_queue()) 8585 * In the case of (1), the last packet enqueued to txq->txq_interq is 8586 * dequeued by wm_deferred_start_locked(). So, it does not get stuck. 8587 * In the case of (2), the last packet enqueued to txq->txq_interq is 8588 * also dequeued by wm_deferred_start_locked(). So, it does not get 8589 * stuck, either. 8590 */ 8591 if (mutex_tryenter(txq->txq_lock)) { 8592 if (!txq->txq_stopping) 8593 wm_nq_transmit_locked(ifp, txq); 8594 mutex_exit(txq->txq_lock); 8595 } 8596 8597 return 0; 8598 } 8599 8600 static void 8601 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq) 8602 { 8603 8604 wm_nq_send_common_locked(ifp, txq, true); 8605 } 8606 8607 static void 8608 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq, 8609 bool is_transmit) 8610 { 8611 struct wm_softc *sc = ifp->if_softc; 8612 struct mbuf *m0; 8613 struct wm_txsoft *txs; 8614 bus_dmamap_t dmamap; 8615 int error, nexttx, lasttx = -1, seg, segs_needed; 8616 bool do_csum, sent; 8617 bool remap = true; 8618 8619 KASSERT(mutex_owned(txq->txq_lock)); 8620 8621 if ((ifp->if_flags & IFF_RUNNING) == 0) 8622 return; 8623 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0) 8624 return; 8625 8626 if (__predict_false(wm_linkdown_discard(txq))) { 8627 do { 8628 if (is_transmit) 8629 m0 = pcq_get(txq->txq_interq); 8630 else 8631 IFQ_DEQUEUE(&ifp->if_snd, m0); 8632 /* 8633 * increment successed packet counter as in the case 8634 * which the packet is discarded by link down PHY. 8635 */ 8636 if (m0 != NULL) 8637 if_statinc(ifp, if_opackets); 8638 m_freem(m0); 8639 } while (m0 != NULL); 8640 return; 8641 } 8642 8643 sent = false; 8644 8645 /* 8646 * Loop through the send queue, setting up transmit descriptors 8647 * until we drain the queue, or use up all available transmit 8648 * descriptors. 8649 */ 8650 for (;;) { 8651 m0 = NULL; 8652 8653 /* Get a work queue entry. */ 8654 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) { 8655 wm_txeof(txq, UINT_MAX); 8656 if (txq->txq_sfree == 0) { 8657 DPRINTF(sc, WM_DEBUG_TX, 8658 ("%s: TX: no free job descriptors\n", 8659 device_xname(sc->sc_dev))); 8660 WM_Q_EVCNT_INCR(txq, txsstall); 8661 break; 8662 } 8663 } 8664 8665 /* Grab a packet off the queue. */ 8666 if (is_transmit) 8667 m0 = pcq_get(txq->txq_interq); 8668 else 8669 IFQ_DEQUEUE(&ifp->if_snd, m0); 8670 if (m0 == NULL) 8671 break; 8672 8673 DPRINTF(sc, WM_DEBUG_TX, 8674 ("%s: TX: have packet to transmit: %p\n", 8675 device_xname(sc->sc_dev), m0)); 8676 8677 txs = &txq->txq_soft[txq->txq_snext]; 8678 dmamap = txs->txs_dmamap; 8679 8680 /* 8681 * Load the DMA map. If this fails, the packet either 8682 * didn't fit in the allotted number of segments, or we 8683 * were short on resources. For the too-many-segments 8684 * case, we simply report an error and drop the packet, 8685 * since we can't sanely copy a jumbo packet to a single 8686 * buffer. 8687 */ 8688 retry: 8689 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 8690 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 8691 if (__predict_false(error)) { 8692 if (error == EFBIG) { 8693 if (remap == true) { 8694 struct mbuf *m; 8695 8696 remap = false; 8697 m = m_defrag(m0, M_NOWAIT); 8698 if (m != NULL) { 8699 WM_Q_EVCNT_INCR(txq, defrag); 8700 m0 = m; 8701 goto retry; 8702 } 8703 } 8704 WM_Q_EVCNT_INCR(txq, toomanyseg); 8705 log(LOG_ERR, "%s: Tx packet consumes too many " 8706 "DMA segments, dropping...\n", 8707 device_xname(sc->sc_dev)); 8708 wm_dump_mbuf_chain(sc, m0); 8709 m_freem(m0); 8710 continue; 8711 } 8712 /* Short on resources, just stop for now. */ 8713 DPRINTF(sc, WM_DEBUG_TX, 8714 ("%s: TX: dmamap load failed: %d\n", 8715 device_xname(sc->sc_dev), error)); 8716 break; 8717 } 8718 8719 segs_needed = dmamap->dm_nsegs; 8720 8721 /* 8722 * Ensure we have enough descriptors free to describe 8723 * the packet. Note, we always reserve one descriptor 8724 * at the end of the ring due to the semantics of the 8725 * TDT register, plus one more in the event we need 8726 * to load offload context. 8727 */ 8728 if (segs_needed > txq->txq_free - 2) { 8729 /* 8730 * Not enough free descriptors to transmit this 8731 * packet. We haven't committed anything yet, 8732 * so just unload the DMA map, put the packet 8733 * pack on the queue, and punt. Notify the upper 8734 * layer that there are no more slots left. 8735 */ 8736 DPRINTF(sc, WM_DEBUG_TX, 8737 ("%s: TX: need %d (%d) descriptors, have %d\n", 8738 device_xname(sc->sc_dev), dmamap->dm_nsegs, 8739 segs_needed, txq->txq_free - 1)); 8740 txq->txq_flags |= WM_TXQ_NO_SPACE; 8741 bus_dmamap_unload(sc->sc_dmat, dmamap); 8742 WM_Q_EVCNT_INCR(txq, txdstall); 8743 break; 8744 } 8745 8746 /* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */ 8747 8748 DPRINTF(sc, WM_DEBUG_TX, 8749 ("%s: TX: packet has %d (%d) DMA segments\n", 8750 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed)); 8751 8752 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]); 8753 8754 /* 8755 * Store a pointer to the packet so that we can free it 8756 * later. 8757 * 8758 * Initially, we consider the number of descriptors the 8759 * packet uses the number of DMA segments. This may be 8760 * incremented by 1 if we do checksum offload (a descriptor 8761 * is used to set the checksum context). 8762 */ 8763 txs->txs_mbuf = m0; 8764 txs->txs_firstdesc = txq->txq_next; 8765 txs->txs_ndesc = segs_needed; 8766 8767 /* Set up offload parameters for this packet. */ 8768 uint32_t cmdlen, fields, dcmdlen; 8769 if (m0->m_pkthdr.csum_flags & 8770 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | 8771 M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 | 8772 M_CSUM_TCPv6 | M_CSUM_UDPv6)) { 8773 wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields, 8774 &do_csum); 8775 } else { 8776 do_csum = false; 8777 cmdlen = 0; 8778 fields = 0; 8779 } 8780 8781 /* Sync the DMA map. */ 8782 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 8783 BUS_DMASYNC_PREWRITE); 8784 8785 /* Initialize the first transmit descriptor. */ 8786 nexttx = txq->txq_next; 8787 if (!do_csum) { 8788 /* Setup a legacy descriptor */ 8789 wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr, 8790 dmamap->dm_segs[0].ds_addr); 8791 txq->txq_descs[nexttx].wtx_cmdlen = 8792 htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len); 8793 txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0; 8794 txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0; 8795 if (vlan_has_tag(m0)) { 8796 txq->txq_descs[nexttx].wtx_cmdlen |= 8797 htole32(WTX_CMD_VLE); 8798 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan = 8799 htole16(vlan_get_tag(m0)); 8800 } else 8801 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0; 8802 8803 dcmdlen = 0; 8804 } else { 8805 /* Setup an advanced data descriptor */ 8806 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr = 8807 htole64(dmamap->dm_segs[0].ds_addr); 8808 KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0); 8809 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen = 8810 htole32(dmamap->dm_segs[0].ds_len | cmdlen); 8811 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 8812 htole32(fields); 8813 DPRINTF(sc, WM_DEBUG_TX, 8814 ("%s: TX: adv data desc %d 0x%" PRIx64 "\n", 8815 device_xname(sc->sc_dev), nexttx, 8816 (uint64_t)dmamap->dm_segs[0].ds_addr)); 8817 DPRINTF(sc, WM_DEBUG_TX, 8818 ("\t 0x%08x%08x\n", fields, 8819 (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen)); 8820 dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT; 8821 } 8822 8823 lasttx = nexttx; 8824 nexttx = WM_NEXTTX(txq, nexttx); 8825 /* 8826 * Fill in the next descriptors. legacy or advanced format 8827 * is the same here 8828 */ 8829 for (seg = 1; seg < dmamap->dm_nsegs; 8830 seg++, nexttx = WM_NEXTTX(txq, nexttx)) { 8831 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr = 8832 htole64(dmamap->dm_segs[seg].ds_addr); 8833 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen = 8834 htole32(dcmdlen | dmamap->dm_segs[seg].ds_len); 8835 KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0); 8836 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0; 8837 lasttx = nexttx; 8838 8839 DPRINTF(sc, WM_DEBUG_TX, 8840 ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n", 8841 device_xname(sc->sc_dev), nexttx, 8842 (uint64_t)dmamap->dm_segs[seg].ds_addr, 8843 dmamap->dm_segs[seg].ds_len)); 8844 } 8845 8846 KASSERT(lasttx != -1); 8847 8848 /* 8849 * Set up the command byte on the last descriptor of 8850 * the packet. If we're in the interrupt delay window, 8851 * delay the interrupt. 8852 */ 8853 KASSERT((WTX_CMD_EOP | WTX_CMD_RS) == 8854 (NQTX_CMD_EOP | NQTX_CMD_RS)); 8855 txq->txq_descs[lasttx].wtx_cmdlen |= 8856 htole32(WTX_CMD_EOP | WTX_CMD_RS); 8857 8858 txs->txs_lastdesc = lasttx; 8859 8860 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n", 8861 device_xname(sc->sc_dev), 8862 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen))); 8863 8864 /* Sync the descriptors we're using. */ 8865 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc, 8866 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 8867 8868 /* Give the packet to the chip. */ 8869 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx); 8870 sent = true; 8871 8872 DPRINTF(sc, WM_DEBUG_TX, 8873 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx)); 8874 8875 DPRINTF(sc, WM_DEBUG_TX, 8876 ("%s: TX: finished transmitting packet, job %d\n", 8877 device_xname(sc->sc_dev), txq->txq_snext)); 8878 8879 /* Advance the tx pointer. */ 8880 txq->txq_free -= txs->txs_ndesc; 8881 txq->txq_next = nexttx; 8882 8883 txq->txq_sfree--; 8884 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext); 8885 8886 /* Pass the packet to any BPF listeners. */ 8887 bpf_mtap(ifp, m0, BPF_D_OUT); 8888 } 8889 8890 if (m0 != NULL) { 8891 txq->txq_flags |= WM_TXQ_NO_SPACE; 8892 WM_Q_EVCNT_INCR(txq, descdrop); 8893 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", 8894 __func__)); 8895 m_freem(m0); 8896 } 8897 8898 if (txq->txq_sfree == 0 || txq->txq_free <= 2) { 8899 /* No more slots; notify upper layer. */ 8900 txq->txq_flags |= WM_TXQ_NO_SPACE; 8901 } 8902 8903 if (sent) { 8904 /* Set a watchdog timer in case the chip flakes out. */ 8905 txq->txq_lastsent = time_uptime; 8906 txq->txq_sending = true; 8907 } 8908 } 8909 8910 static void 8911 wm_deferred_start_locked(struct wm_txqueue *txq) 8912 { 8913 struct wm_softc *sc = txq->txq_sc; 8914 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 8915 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq); 8916 int qid = wmq->wmq_id; 8917 8918 KASSERT(mutex_owned(txq->txq_lock)); 8919 8920 if (txq->txq_stopping) { 8921 mutex_exit(txq->txq_lock); 8922 return; 8923 } 8924 8925 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) { 8926 /* XXX need for ALTQ or one CPU system */ 8927 if (qid == 0) 8928 wm_nq_start_locked(ifp); 8929 wm_nq_transmit_locked(ifp, txq); 8930 } else { 8931 /* XXX need for ALTQ or one CPU system */ 8932 if (qid == 0) 8933 wm_start_locked(ifp); 8934 wm_transmit_locked(ifp, txq); 8935 } 8936 } 8937 8938 /* Interrupt */ 8939 8940 /* 8941 * wm_txeof: 8942 * 8943 * Helper; handle transmit interrupts. 8944 */ 8945 static bool 8946 wm_txeof(struct wm_txqueue *txq, u_int limit) 8947 { 8948 struct wm_softc *sc = txq->txq_sc; 8949 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 8950 struct wm_txsoft *txs; 8951 int count = 0; 8952 int i; 8953 uint8_t status; 8954 bool more = false; 8955 8956 KASSERT(mutex_owned(txq->txq_lock)); 8957 8958 if (txq->txq_stopping) 8959 return false; 8960 8961 txq->txq_flags &= ~WM_TXQ_NO_SPACE; 8962 8963 /* 8964 * Go through the Tx list and free mbufs for those 8965 * frames which have been transmitted. 8966 */ 8967 for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq); 8968 i = WM_NEXTTXS(txq, i), txq->txq_sfree++) { 8969 if (limit-- == 0) { 8970 more = true; 8971 DPRINTF(sc, WM_DEBUG_TX, 8972 ("%s: TX: loop limited, job %d is not processed\n", 8973 device_xname(sc->sc_dev), i)); 8974 break; 8975 } 8976 8977 txs = &txq->txq_soft[i]; 8978 8979 DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n", 8980 device_xname(sc->sc_dev), i)); 8981 8982 wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc, 8983 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 8984 8985 status = 8986 txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status; 8987 if ((status & WTX_ST_DD) == 0) { 8988 wm_cdtxsync(txq, txs->txs_lastdesc, 1, 8989 BUS_DMASYNC_PREREAD); 8990 break; 8991 } 8992 8993 count++; 8994 DPRINTF(sc, WM_DEBUG_TX, 8995 ("%s: TX: job %d done: descs %d..%d\n", 8996 device_xname(sc->sc_dev), i, txs->txs_firstdesc, 8997 txs->txs_lastdesc)); 8998 8999 /* 9000 * XXX We should probably be using the statistics 9001 * XXX registers, but I don't know if they exist 9002 * XXX on chips before the i82544. 9003 */ 9004 9005 #ifdef WM_EVENT_COUNTERS 9006 if (status & WTX_ST_TU) 9007 WM_Q_EVCNT_INCR(txq, underrun); 9008 #endif /* WM_EVENT_COUNTERS */ 9009 9010 /* 9011 * 82574 and newer's document says the status field has neither 9012 * EC (Excessive Collision) bit nor LC (Late Collision) bit 9013 * (reserved). Refer "PCIe GbE Controller Open Source Software 9014 * Developer's Manual", 82574 datasheet and newer. 9015 * 9016 * XXX I saw the LC bit was set on I218 even though the media 9017 * was full duplex, so the bit might be used for other 9018 * meaning ...(I have no document). 9019 */ 9020 9021 if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0) 9022 && ((sc->sc_type < WM_T_82574) 9023 || (sc->sc_type == WM_T_80003))) { 9024 if_statinc(ifp, if_oerrors); 9025 if (status & WTX_ST_LC) 9026 log(LOG_WARNING, "%s: late collision\n", 9027 device_xname(sc->sc_dev)); 9028 else if (status & WTX_ST_EC) { 9029 if_statadd(ifp, if_collisions, 9030 TX_COLLISION_THRESHOLD + 1); 9031 log(LOG_WARNING, "%s: excessive collisions\n", 9032 device_xname(sc->sc_dev)); 9033 } 9034 } else 9035 if_statinc(ifp, if_opackets); 9036 9037 txq->txq_packets++; 9038 txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len; 9039 9040 txq->txq_free += txs->txs_ndesc; 9041 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 9042 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 9043 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 9044 m_freem(txs->txs_mbuf); 9045 txs->txs_mbuf = NULL; 9046 } 9047 9048 /* Update the dirty transmit buffer pointer. */ 9049 txq->txq_sdirty = i; 9050 DPRINTF(sc, WM_DEBUG_TX, 9051 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i)); 9052 9053 if (count != 0) 9054 rnd_add_uint32(&sc->rnd_source, count); 9055 9056 /* 9057 * If there are no more pending transmissions, cancel the watchdog 9058 * timer. 9059 */ 9060 if (txq->txq_sfree == WM_TXQUEUELEN(txq)) 9061 txq->txq_sending = false; 9062 9063 return more; 9064 } 9065 9066 static inline uint32_t 9067 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx) 9068 { 9069 struct wm_softc *sc = rxq->rxq_sc; 9070 9071 if (sc->sc_type == WM_T_82574) 9072 return EXTRXC_STATUS( 9073 le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat)); 9074 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 9075 return NQRXC_STATUS( 9076 le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat)); 9077 else 9078 return rxq->rxq_descs[idx].wrx_status; 9079 } 9080 9081 static inline uint32_t 9082 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx) 9083 { 9084 struct wm_softc *sc = rxq->rxq_sc; 9085 9086 if (sc->sc_type == WM_T_82574) 9087 return EXTRXC_ERROR( 9088 le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat)); 9089 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 9090 return NQRXC_ERROR( 9091 le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat)); 9092 else 9093 return rxq->rxq_descs[idx].wrx_errors; 9094 } 9095 9096 static inline uint16_t 9097 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx) 9098 { 9099 struct wm_softc *sc = rxq->rxq_sc; 9100 9101 if (sc->sc_type == WM_T_82574) 9102 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan; 9103 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 9104 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan; 9105 else 9106 return rxq->rxq_descs[idx].wrx_special; 9107 } 9108 9109 static inline int 9110 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx) 9111 { 9112 struct wm_softc *sc = rxq->rxq_sc; 9113 9114 if (sc->sc_type == WM_T_82574) 9115 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen; 9116 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 9117 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen; 9118 else 9119 return rxq->rxq_descs[idx].wrx_len; 9120 } 9121 9122 #ifdef WM_DEBUG 9123 static inline uint32_t 9124 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx) 9125 { 9126 struct wm_softc *sc = rxq->rxq_sc; 9127 9128 if (sc->sc_type == WM_T_82574) 9129 return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash; 9130 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 9131 return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash; 9132 else 9133 return 0; 9134 } 9135 9136 static inline uint8_t 9137 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx) 9138 { 9139 struct wm_softc *sc = rxq->rxq_sc; 9140 9141 if (sc->sc_type == WM_T_82574) 9142 return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq); 9143 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 9144 return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc); 9145 else 9146 return 0; 9147 } 9148 #endif /* WM_DEBUG */ 9149 9150 static inline bool 9151 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status, 9152 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit) 9153 { 9154 9155 if (sc->sc_type == WM_T_82574) 9156 return (status & ext_bit) != 0; 9157 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 9158 return (status & nq_bit) != 0; 9159 else 9160 return (status & legacy_bit) != 0; 9161 } 9162 9163 static inline bool 9164 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error, 9165 uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit) 9166 { 9167 9168 if (sc->sc_type == WM_T_82574) 9169 return (error & ext_bit) != 0; 9170 else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) 9171 return (error & nq_bit) != 0; 9172 else 9173 return (error & legacy_bit) != 0; 9174 } 9175 9176 static inline bool 9177 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status) 9178 { 9179 9180 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status, 9181 WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP)) 9182 return true; 9183 else 9184 return false; 9185 } 9186 9187 static inline bool 9188 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors) 9189 { 9190 struct wm_softc *sc = rxq->rxq_sc; 9191 9192 /* XXX missing error bit for newqueue? */ 9193 if (wm_rxdesc_is_set_error(sc, errors, 9194 WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE, 9195 EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ 9196 | EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE, 9197 NQRXC_ERROR_RXE)) { 9198 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, 9199 EXTRXC_ERROR_SE, 0)) 9200 log(LOG_WARNING, "%s: symbol error\n", 9201 device_xname(sc->sc_dev)); 9202 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, 9203 EXTRXC_ERROR_SEQ, 0)) 9204 log(LOG_WARNING, "%s: receive sequence error\n", 9205 device_xname(sc->sc_dev)); 9206 else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, 9207 EXTRXC_ERROR_CE, 0)) 9208 log(LOG_WARNING, "%s: CRC error\n", 9209 device_xname(sc->sc_dev)); 9210 return true; 9211 } 9212 9213 return false; 9214 } 9215 9216 static inline bool 9217 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status) 9218 { 9219 struct wm_softc *sc = rxq->rxq_sc; 9220 9221 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD, 9222 NQRXC_STATUS_DD)) { 9223 /* We have processed all of the receive descriptors. */ 9224 wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD); 9225 return false; 9226 } 9227 9228 return true; 9229 } 9230 9231 static inline bool 9232 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, 9233 uint16_t vlantag, struct mbuf *m) 9234 { 9235 9236 if (wm_rxdesc_is_set_status(rxq->rxq_sc, status, 9237 WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) { 9238 vlan_set_tag(m, le16toh(vlantag)); 9239 } 9240 9241 return true; 9242 } 9243 9244 static inline void 9245 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status, 9246 uint32_t errors, struct mbuf *m) 9247 { 9248 struct wm_softc *sc = rxq->rxq_sc; 9249 9250 if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) { 9251 if (wm_rxdesc_is_set_status(sc, status, 9252 WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) { 9253 WM_Q_EVCNT_INCR(rxq, ipsum); 9254 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 9255 if (wm_rxdesc_is_set_error(sc, errors, 9256 WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE)) 9257 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 9258 } 9259 if (wm_rxdesc_is_set_status(sc, status, 9260 WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) { 9261 /* 9262 * Note: we don't know if this was TCP or UDP, 9263 * so we just set both bits, and expect the 9264 * upper layers to deal. 9265 */ 9266 WM_Q_EVCNT_INCR(rxq, tusum); 9267 m->m_pkthdr.csum_flags |= 9268 M_CSUM_TCPv4 | M_CSUM_UDPv4 | 9269 M_CSUM_TCPv6 | M_CSUM_UDPv6; 9270 if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE, 9271 EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E)) 9272 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 9273 } 9274 } 9275 } 9276 9277 /* 9278 * wm_rxeof: 9279 * 9280 * Helper; handle receive interrupts. 9281 */ 9282 static bool 9283 wm_rxeof(struct wm_rxqueue *rxq, u_int limit) 9284 { 9285 struct wm_softc *sc = rxq->rxq_sc; 9286 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 9287 struct wm_rxsoft *rxs; 9288 struct mbuf *m; 9289 int i, len; 9290 int count = 0; 9291 uint32_t status, errors; 9292 uint16_t vlantag; 9293 bool more = false; 9294 9295 KASSERT(mutex_owned(rxq->rxq_lock)); 9296 9297 for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) { 9298 if (limit-- == 0) { 9299 more = true; 9300 DPRINTF(sc, WM_DEBUG_RX, 9301 ("%s: RX: loop limited, descriptor %d is not processed\n", 9302 device_xname(sc->sc_dev), i)); 9303 break; 9304 } 9305 9306 rxs = &rxq->rxq_soft[i]; 9307 9308 DPRINTF(sc, WM_DEBUG_RX, 9309 ("%s: RX: checking descriptor %d\n", 9310 device_xname(sc->sc_dev), i)); 9311 wm_cdrxsync(rxq, i, 9312 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 9313 9314 status = wm_rxdesc_get_status(rxq, i); 9315 errors = wm_rxdesc_get_errors(rxq, i); 9316 len = le16toh(wm_rxdesc_get_pktlen(rxq, i)); 9317 vlantag = wm_rxdesc_get_vlantag(rxq, i); 9318 #ifdef WM_DEBUG 9319 uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i)); 9320 uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i); 9321 #endif 9322 9323 if (!wm_rxdesc_dd(rxq, i, status)) { 9324 break; 9325 } 9326 9327 count++; 9328 if (__predict_false(rxq->rxq_discard)) { 9329 DPRINTF(sc, WM_DEBUG_RX, 9330 ("%s: RX: discarding contents of descriptor %d\n", 9331 device_xname(sc->sc_dev), i)); 9332 wm_init_rxdesc(rxq, i); 9333 if (wm_rxdesc_is_eop(rxq, status)) { 9334 /* Reset our state. */ 9335 DPRINTF(sc, WM_DEBUG_RX, 9336 ("%s: RX: resetting rxdiscard -> 0\n", 9337 device_xname(sc->sc_dev))); 9338 rxq->rxq_discard = 0; 9339 } 9340 continue; 9341 } 9342 9343 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 9344 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 9345 9346 m = rxs->rxs_mbuf; 9347 9348 /* 9349 * Add a new receive buffer to the ring, unless of 9350 * course the length is zero. Treat the latter as a 9351 * failed mapping. 9352 */ 9353 if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) { 9354 /* 9355 * Failed, throw away what we've done so 9356 * far, and discard the rest of the packet. 9357 */ 9358 if_statinc(ifp, if_ierrors); 9359 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 9360 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 9361 wm_init_rxdesc(rxq, i); 9362 if (!wm_rxdesc_is_eop(rxq, status)) 9363 rxq->rxq_discard = 1; 9364 if (rxq->rxq_head != NULL) 9365 m_freem(rxq->rxq_head); 9366 WM_RXCHAIN_RESET(rxq); 9367 DPRINTF(sc, WM_DEBUG_RX, 9368 ("%s: RX: Rx buffer allocation failed, " 9369 "dropping packet%s\n", device_xname(sc->sc_dev), 9370 rxq->rxq_discard ? " (discard)" : "")); 9371 continue; 9372 } 9373 9374 m->m_len = len; 9375 rxq->rxq_len += len; 9376 DPRINTF(sc, WM_DEBUG_RX, 9377 ("%s: RX: buffer at %p len %d\n", 9378 device_xname(sc->sc_dev), m->m_data, len)); 9379 9380 /* If this is not the end of the packet, keep looking. */ 9381 if (!wm_rxdesc_is_eop(rxq, status)) { 9382 WM_RXCHAIN_LINK(rxq, m); 9383 DPRINTF(sc, WM_DEBUG_RX, 9384 ("%s: RX: not yet EOP, rxlen -> %d\n", 9385 device_xname(sc->sc_dev), rxq->rxq_len)); 9386 continue; 9387 } 9388 9389 /* 9390 * Okay, we have the entire packet now. The chip is 9391 * configured to include the FCS except I35[04], I21[01]. 9392 * (not all chips can be configured to strip it), so we need 9393 * to trim it. Those chips have an eratta, the RCTL_SECRC bit 9394 * in RCTL register is always set, so we don't trim it. 9395 * PCH2 and newer chip also not include FCS when jumbo 9396 * frame is used to do workaround an errata. 9397 * May need to adjust length of previous mbuf in the 9398 * chain if the current mbuf is too short. 9399 */ 9400 if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) { 9401 if (m->m_len < ETHER_CRC_LEN) { 9402 rxq->rxq_tail->m_len 9403 -= (ETHER_CRC_LEN - m->m_len); 9404 m->m_len = 0; 9405 } else 9406 m->m_len -= ETHER_CRC_LEN; 9407 len = rxq->rxq_len - ETHER_CRC_LEN; 9408 } else 9409 len = rxq->rxq_len; 9410 9411 WM_RXCHAIN_LINK(rxq, m); 9412 9413 *rxq->rxq_tailp = NULL; 9414 m = rxq->rxq_head; 9415 9416 WM_RXCHAIN_RESET(rxq); 9417 9418 DPRINTF(sc, WM_DEBUG_RX, 9419 ("%s: RX: have entire packet, len -> %d\n", 9420 device_xname(sc->sc_dev), len)); 9421 9422 /* If an error occurred, update stats and drop the packet. */ 9423 if (wm_rxdesc_has_errors(rxq, errors)) { 9424 m_freem(m); 9425 continue; 9426 } 9427 9428 /* No errors. Receive the packet. */ 9429 m_set_rcvif(m, ifp); 9430 m->m_pkthdr.len = len; 9431 /* 9432 * TODO 9433 * should be save rsshash and rsstype to this mbuf. 9434 */ 9435 DPRINTF(sc, WM_DEBUG_RX, 9436 ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n", 9437 device_xname(sc->sc_dev), rsstype, rsshash)); 9438 9439 /* 9440 * If VLANs are enabled, VLAN packets have been unwrapped 9441 * for us. Associate the tag with the packet. 9442 */ 9443 if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m)) 9444 continue; 9445 9446 /* Set up checksum info for this packet. */ 9447 wm_rxdesc_ensure_checksum(rxq, status, errors, m); 9448 9449 rxq->rxq_packets++; 9450 rxq->rxq_bytes += len; 9451 /* Pass it on. */ 9452 if_percpuq_enqueue(sc->sc_ipq, m); 9453 9454 if (rxq->rxq_stopping) 9455 break; 9456 } 9457 rxq->rxq_ptr = i; 9458 9459 if (count != 0) 9460 rnd_add_uint32(&sc->rnd_source, count); 9461 9462 DPRINTF(sc, WM_DEBUG_RX, 9463 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i)); 9464 9465 return more; 9466 } 9467 9468 /* 9469 * wm_linkintr_gmii: 9470 * 9471 * Helper; handle link interrupts for GMII. 9472 */ 9473 static void 9474 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr) 9475 { 9476 device_t dev = sc->sc_dev; 9477 uint32_t status, reg; 9478 bool link; 9479 int rv; 9480 9481 KASSERT(WM_CORE_LOCKED(sc)); 9482 9483 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev), 9484 __func__)); 9485 9486 if ((icr & ICR_LSC) == 0) { 9487 if (icr & ICR_RXSEQ) 9488 DPRINTF(sc, WM_DEBUG_LINK, 9489 ("%s: LINK Receive sequence error\n", 9490 device_xname(dev))); 9491 return; 9492 } 9493 9494 /* Link status changed */ 9495 status = CSR_READ(sc, WMREG_STATUS); 9496 link = status & STATUS_LU; 9497 if (link) { 9498 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n", 9499 device_xname(dev), 9500 (status & STATUS_FD) ? "FDX" : "HDX")); 9501 if (wm_phy_need_linkdown_discard(sc)) { 9502 DPRINTF(sc, WM_DEBUG_LINK, 9503 ("%s: linkintr: Clear linkdown discard flag\n", 9504 device_xname(dev))); 9505 wm_clear_linkdown_discard(sc); 9506 } 9507 } else { 9508 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", 9509 device_xname(dev))); 9510 if (wm_phy_need_linkdown_discard(sc)) { 9511 DPRINTF(sc, WM_DEBUG_LINK, 9512 ("%s: linkintr: Set linkdown discard flag\n", 9513 device_xname(dev))); 9514 wm_set_linkdown_discard(sc); 9515 } 9516 } 9517 if ((sc->sc_type == WM_T_ICH8) && (link == false)) 9518 wm_gig_downshift_workaround_ich8lan(sc); 9519 9520 if ((sc->sc_type == WM_T_ICH8) 9521 && (sc->sc_phytype == WMPHY_IGP_3)) { 9522 wm_kmrn_lock_loss_workaround_ich8lan(sc); 9523 } 9524 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n", 9525 device_xname(dev))); 9526 mii_pollstat(&sc->sc_mii); 9527 if (sc->sc_type == WM_T_82543) { 9528 int miistatus, active; 9529 9530 /* 9531 * With 82543, we need to force speed and 9532 * duplex on the MAC equal to what the PHY 9533 * speed and duplex configuration is. 9534 */ 9535 miistatus = sc->sc_mii.mii_media_status; 9536 9537 if (miistatus & IFM_ACTIVE) { 9538 active = sc->sc_mii.mii_media_active; 9539 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); 9540 switch (IFM_SUBTYPE(active)) { 9541 case IFM_10_T: 9542 sc->sc_ctrl |= CTRL_SPEED_10; 9543 break; 9544 case IFM_100_TX: 9545 sc->sc_ctrl |= CTRL_SPEED_100; 9546 break; 9547 case IFM_1000_T: 9548 sc->sc_ctrl |= CTRL_SPEED_1000; 9549 break; 9550 default: 9551 /* 9552 * Fiber? 9553 * Shoud not enter here. 9554 */ 9555 device_printf(dev, "unknown media (%x)\n", 9556 active); 9557 break; 9558 } 9559 if (active & IFM_FDX) 9560 sc->sc_ctrl |= CTRL_FD; 9561 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 9562 } 9563 } else if (sc->sc_type == WM_T_PCH) { 9564 wm_k1_gig_workaround_hv(sc, 9565 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0)); 9566 } 9567 9568 /* 9569 * When connected at 10Mbps half-duplex, some parts are excessively 9570 * aggressive resulting in many collisions. To avoid this, increase 9571 * the IPG and reduce Rx latency in the PHY. 9572 */ 9573 if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP) 9574 && link) { 9575 uint32_t tipg_reg; 9576 uint32_t speed = __SHIFTOUT(status, STATUS_SPEED); 9577 bool fdx; 9578 uint16_t emi_addr, emi_val; 9579 9580 tipg_reg = CSR_READ(sc, WMREG_TIPG); 9581 tipg_reg &= ~TIPG_IPGT_MASK; 9582 fdx = status & STATUS_FD; 9583 9584 if (!fdx && (speed == STATUS_SPEED_10)) { 9585 tipg_reg |= 0xff; 9586 /* Reduce Rx latency in analog PHY */ 9587 emi_val = 0; 9588 } else if ((sc->sc_type >= WM_T_PCH_SPT) && 9589 fdx && speed != STATUS_SPEED_1000) { 9590 tipg_reg |= 0xc; 9591 emi_val = 1; 9592 } else { 9593 /* Roll back the default values */ 9594 tipg_reg |= 0x08; 9595 emi_val = 1; 9596 } 9597 9598 CSR_WRITE(sc, WMREG_TIPG, tipg_reg); 9599 9600 rv = sc->phy.acquire(sc); 9601 if (rv) 9602 return; 9603 9604 if (sc->sc_type == WM_T_PCH2) 9605 emi_addr = I82579_RX_CONFIG; 9606 else 9607 emi_addr = I217_RX_CONFIG; 9608 rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val); 9609 9610 if (sc->sc_type >= WM_T_PCH_LPT) { 9611 uint16_t phy_reg; 9612 9613 sc->phy.readreg_locked(dev, 2, 9614 I217_PLL_CLOCK_GATE_REG, &phy_reg); 9615 phy_reg &= ~I217_PLL_CLOCK_GATE_MASK; 9616 if (speed == STATUS_SPEED_100 9617 || speed == STATUS_SPEED_10) 9618 phy_reg |= 0x3e8; 9619 else 9620 phy_reg |= 0xfa; 9621 sc->phy.writereg_locked(dev, 2, 9622 I217_PLL_CLOCK_GATE_REG, phy_reg); 9623 9624 if (speed == STATUS_SPEED_1000) { 9625 sc->phy.readreg_locked(dev, 2, 9626 HV_PM_CTRL, &phy_reg); 9627 9628 phy_reg |= HV_PM_CTRL_K1_CLK_REQ; 9629 9630 sc->phy.writereg_locked(dev, 2, 9631 HV_PM_CTRL, phy_reg); 9632 } 9633 } 9634 sc->phy.release(sc); 9635 9636 if (rv) 9637 return; 9638 9639 if (sc->sc_type >= WM_T_PCH_SPT) { 9640 uint16_t data, ptr_gap; 9641 9642 if (speed == STATUS_SPEED_1000) { 9643 rv = sc->phy.acquire(sc); 9644 if (rv) 9645 return; 9646 9647 rv = sc->phy.readreg_locked(dev, 2, 9648 I82579_UNKNOWN1, &data); 9649 if (rv) { 9650 sc->phy.release(sc); 9651 return; 9652 } 9653 9654 ptr_gap = (data & (0x3ff << 2)) >> 2; 9655 if (ptr_gap < 0x18) { 9656 data &= ~(0x3ff << 2); 9657 data |= (0x18 << 2); 9658 rv = sc->phy.writereg_locked(dev, 9659 2, I82579_UNKNOWN1, data); 9660 } 9661 sc->phy.release(sc); 9662 if (rv) 9663 return; 9664 } else { 9665 rv = sc->phy.acquire(sc); 9666 if (rv) 9667 return; 9668 9669 rv = sc->phy.writereg_locked(dev, 2, 9670 I82579_UNKNOWN1, 0xc023); 9671 sc->phy.release(sc); 9672 if (rv) 9673 return; 9674 9675 } 9676 } 9677 } 9678 9679 /* 9680 * I217 Packet Loss issue: 9681 * ensure that FEXTNVM4 Beacon Duration is set correctly 9682 * on power up. 9683 * Set the Beacon Duration for I217 to 8 usec 9684 */ 9685 if (sc->sc_type >= WM_T_PCH_LPT) { 9686 reg = CSR_READ(sc, WMREG_FEXTNVM4); 9687 reg &= ~FEXTNVM4_BEACON_DURATION; 9688 reg |= FEXTNVM4_BEACON_DURATION_8US; 9689 CSR_WRITE(sc, WMREG_FEXTNVM4, reg); 9690 } 9691 9692 /* Work-around I218 hang issue */ 9693 if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) || 9694 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) || 9695 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) || 9696 (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3)) 9697 wm_k1_workaround_lpt_lp(sc, link); 9698 9699 if (sc->sc_type >= WM_T_PCH_LPT) { 9700 /* 9701 * Set platform power management values for Latency 9702 * Tolerance Reporting (LTR) 9703 */ 9704 wm_platform_pm_pch_lpt(sc, 9705 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0)); 9706 } 9707 9708 /* Clear link partner's EEE ability */ 9709 sc->eee_lp_ability = 0; 9710 9711 /* FEXTNVM6 K1-off workaround */ 9712 if (sc->sc_type == WM_T_PCH_SPT) { 9713 reg = CSR_READ(sc, WMREG_FEXTNVM6); 9714 if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE) 9715 reg |= FEXTNVM6_K1_OFF_ENABLE; 9716 else 9717 reg &= ~FEXTNVM6_K1_OFF_ENABLE; 9718 CSR_WRITE(sc, WMREG_FEXTNVM6, reg); 9719 } 9720 9721 if (!link) 9722 return; 9723 9724 switch (sc->sc_type) { 9725 case WM_T_PCH2: 9726 wm_k1_workaround_lv(sc); 9727 /* FALLTHROUGH */ 9728 case WM_T_PCH: 9729 if (sc->sc_phytype == WMPHY_82578) 9730 wm_link_stall_workaround_hv(sc); 9731 break; 9732 default: 9733 break; 9734 } 9735 9736 /* Enable/Disable EEE after link up */ 9737 if (sc->sc_phytype > WMPHY_82579) 9738 wm_set_eee_pchlan(sc); 9739 } 9740 9741 /* 9742 * wm_linkintr_tbi: 9743 * 9744 * Helper; handle link interrupts for TBI mode. 9745 */ 9746 static void 9747 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr) 9748 { 9749 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 9750 uint32_t status; 9751 9752 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), 9753 __func__)); 9754 9755 status = CSR_READ(sc, WMREG_STATUS); 9756 if (icr & ICR_LSC) { 9757 wm_check_for_link(sc); 9758 if (status & STATUS_LU) { 9759 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n", 9760 device_xname(sc->sc_dev), 9761 (status & STATUS_FD) ? "FDX" : "HDX")); 9762 /* 9763 * NOTE: CTRL will update TFCE and RFCE automatically, 9764 * so we should update sc->sc_ctrl 9765 */ 9766 9767 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); 9768 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 9769 sc->sc_fcrtl &= ~FCRTL_XONE; 9770 if (status & STATUS_FD) 9771 sc->sc_tctl |= 9772 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 9773 else 9774 sc->sc_tctl |= 9775 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 9776 if (sc->sc_ctrl & CTRL_TFCE) 9777 sc->sc_fcrtl |= FCRTL_XONE; 9778 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 9779 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 9780 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl); 9781 sc->sc_tbi_linkup = 1; 9782 if_link_state_change(ifp, LINK_STATE_UP); 9783 } else { 9784 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", 9785 device_xname(sc->sc_dev))); 9786 sc->sc_tbi_linkup = 0; 9787 if_link_state_change(ifp, LINK_STATE_DOWN); 9788 } 9789 /* Update LED */ 9790 wm_tbi_serdes_set_linkled(sc); 9791 } else if (icr & ICR_RXSEQ) 9792 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n", 9793 device_xname(sc->sc_dev))); 9794 } 9795 9796 /* 9797 * wm_linkintr_serdes: 9798 * 9799 * Helper; handle link interrupts for TBI mode. 9800 */ 9801 static void 9802 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr) 9803 { 9804 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 9805 struct mii_data *mii = &sc->sc_mii; 9806 struct ifmedia_entry *ife = mii->mii_media.ifm_cur; 9807 uint32_t pcs_adv, pcs_lpab, reg; 9808 9809 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev), 9810 __func__)); 9811 9812 if (icr & ICR_LSC) { 9813 /* Check PCS */ 9814 reg = CSR_READ(sc, WMREG_PCS_LSTS); 9815 if ((reg & PCS_LSTS_LINKOK) != 0) { 9816 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n", 9817 device_xname(sc->sc_dev))); 9818 mii->mii_media_status |= IFM_ACTIVE; 9819 sc->sc_tbi_linkup = 1; 9820 if_link_state_change(ifp, LINK_STATE_UP); 9821 } else { 9822 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n", 9823 device_xname(sc->sc_dev))); 9824 mii->mii_media_status |= IFM_NONE; 9825 sc->sc_tbi_linkup = 0; 9826 if_link_state_change(ifp, LINK_STATE_DOWN); 9827 wm_tbi_serdes_set_linkled(sc); 9828 return; 9829 } 9830 mii->mii_media_active |= IFM_1000_SX; 9831 if ((reg & PCS_LSTS_FDX) != 0) 9832 mii->mii_media_active |= IFM_FDX; 9833 else 9834 mii->mii_media_active |= IFM_HDX; 9835 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 9836 /* Check flow */ 9837 reg = CSR_READ(sc, WMREG_PCS_LSTS); 9838 if ((reg & PCS_LSTS_AN_COMP) == 0) { 9839 DPRINTF(sc, WM_DEBUG_LINK, 9840 ("XXX LINKOK but not ACOMP\n")); 9841 return; 9842 } 9843 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV); 9844 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB); 9845 DPRINTF(sc, WM_DEBUG_LINK, 9846 ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab)); 9847 if ((pcs_adv & TXCW_SYM_PAUSE) 9848 && (pcs_lpab & TXCW_SYM_PAUSE)) { 9849 mii->mii_media_active |= IFM_FLOW 9850 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 9851 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0) 9852 && (pcs_adv & TXCW_ASYM_PAUSE) 9853 && (pcs_lpab & TXCW_SYM_PAUSE) 9854 && (pcs_lpab & TXCW_ASYM_PAUSE)) 9855 mii->mii_media_active |= IFM_FLOW 9856 | IFM_ETH_TXPAUSE; 9857 else if ((pcs_adv & TXCW_SYM_PAUSE) 9858 && (pcs_adv & TXCW_ASYM_PAUSE) 9859 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0) 9860 && (pcs_lpab & TXCW_ASYM_PAUSE)) 9861 mii->mii_media_active |= IFM_FLOW 9862 | IFM_ETH_RXPAUSE; 9863 } 9864 /* Update LED */ 9865 wm_tbi_serdes_set_linkled(sc); 9866 } else 9867 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n", 9868 device_xname(sc->sc_dev))); 9869 } 9870 9871 /* 9872 * wm_linkintr: 9873 * 9874 * Helper; handle link interrupts. 9875 */ 9876 static void 9877 wm_linkintr(struct wm_softc *sc, uint32_t icr) 9878 { 9879 9880 KASSERT(WM_CORE_LOCKED(sc)); 9881 9882 if (sc->sc_flags & WM_F_HAS_MII) 9883 wm_linkintr_gmii(sc, icr); 9884 else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES) 9885 && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))) 9886 wm_linkintr_serdes(sc, icr); 9887 else 9888 wm_linkintr_tbi(sc, icr); 9889 } 9890 9891 9892 static inline void 9893 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq) 9894 { 9895 9896 if (wmq->wmq_txrx_use_workqueue) 9897 workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu()); 9898 else 9899 softint_schedule(wmq->wmq_si); 9900 } 9901 9902 static inline void 9903 wm_legacy_intr_disable(struct wm_softc *sc) 9904 { 9905 9906 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU); 9907 } 9908 9909 static inline void 9910 wm_legacy_intr_enable(struct wm_softc *sc) 9911 { 9912 9913 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr); 9914 } 9915 9916 /* 9917 * wm_intr_legacy: 9918 * 9919 * Interrupt service routine for INTx and MSI. 9920 */ 9921 static int 9922 wm_intr_legacy(void *arg) 9923 { 9924 struct wm_softc *sc = arg; 9925 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 9926 struct wm_queue *wmq = &sc->sc_queue[0]; 9927 struct wm_txqueue *txq = &wmq->wmq_txq; 9928 struct wm_rxqueue *rxq = &wmq->wmq_rxq; 9929 u_int txlimit = sc->sc_tx_intr_process_limit; 9930 u_int rxlimit = sc->sc_rx_intr_process_limit; 9931 uint32_t icr, rndval = 0; 9932 bool more = false; 9933 9934 icr = CSR_READ(sc, WMREG_ICR); 9935 if ((icr & sc->sc_icr) == 0) 9936 return 0; 9937 9938 DPRINTF(sc, WM_DEBUG_TX, 9939 ("%s: INTx: got intr\n",device_xname(sc->sc_dev))); 9940 if (rndval == 0) 9941 rndval = icr; 9942 9943 mutex_enter(rxq->rxq_lock); 9944 9945 if (rxq->rxq_stopping) { 9946 mutex_exit(rxq->rxq_lock); 9947 return 1; 9948 } 9949 9950 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 9951 if (icr & (ICR_RXDMT0 | ICR_RXT0)) { 9952 DPRINTF(sc, WM_DEBUG_RX, 9953 ("%s: RX: got Rx intr 0x%08x\n", 9954 device_xname(sc->sc_dev), 9955 icr & (uint32_t)(ICR_RXDMT0 | ICR_RXT0))); 9956 WM_Q_EVCNT_INCR(rxq, intr); 9957 } 9958 #endif 9959 /* 9960 * wm_rxeof() does *not* call upper layer functions directly, 9961 * as if_percpuq_enqueue() just call softint_schedule(). 9962 * So, we can call wm_rxeof() in interrupt context. 9963 */ 9964 more = wm_rxeof(rxq, rxlimit); 9965 9966 mutex_exit(rxq->rxq_lock); 9967 mutex_enter(txq->txq_lock); 9968 9969 if (txq->txq_stopping) { 9970 mutex_exit(txq->txq_lock); 9971 return 1; 9972 } 9973 9974 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS) 9975 if (icr & ICR_TXDW) { 9976 DPRINTF(sc, WM_DEBUG_TX, 9977 ("%s: TX: got TXDW interrupt\n", 9978 device_xname(sc->sc_dev))); 9979 WM_Q_EVCNT_INCR(txq, txdw); 9980 } 9981 #endif 9982 more |= wm_txeof(txq, txlimit); 9983 if (!IF_IS_EMPTY(&ifp->if_snd)) 9984 more = true; 9985 9986 mutex_exit(txq->txq_lock); 9987 WM_CORE_LOCK(sc); 9988 9989 if (sc->sc_core_stopping) { 9990 WM_CORE_UNLOCK(sc); 9991 return 1; 9992 } 9993 9994 if (icr & (ICR_LSC | ICR_RXSEQ)) { 9995 WM_EVCNT_INCR(&sc->sc_ev_linkintr); 9996 wm_linkintr(sc, icr); 9997 } 9998 if ((icr & ICR_GPI(0)) != 0) 9999 device_printf(sc->sc_dev, "got module interrupt\n"); 10000 10001 WM_CORE_UNLOCK(sc); 10002 10003 if (icr & ICR_RXO) { 10004 #if defined(WM_DEBUG) 10005 log(LOG_WARNING, "%s: Receive overrun\n", 10006 device_xname(sc->sc_dev)); 10007 #endif /* defined(WM_DEBUG) */ 10008 } 10009 10010 rnd_add_uint32(&sc->rnd_source, rndval); 10011 10012 if (more) { 10013 /* Try to get more packets going. */ 10014 wm_legacy_intr_disable(sc); 10015 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue; 10016 wm_sched_handle_queue(sc, wmq); 10017 } 10018 10019 return 1; 10020 } 10021 10022 static inline void 10023 wm_txrxintr_disable(struct wm_queue *wmq) 10024 { 10025 struct wm_softc *sc = wmq->wmq_txq.txq_sc; 10026 10027 if (__predict_false(!wm_is_using_msix(sc))) { 10028 wm_legacy_intr_disable(sc); 10029 return; 10030 } 10031 10032 if (sc->sc_type == WM_T_82574) 10033 CSR_WRITE(sc, WMREG_IMC, 10034 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id)); 10035 else if (sc->sc_type == WM_T_82575) 10036 CSR_WRITE(sc, WMREG_EIMC, 10037 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id)); 10038 else 10039 CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx); 10040 } 10041 10042 static inline void 10043 wm_txrxintr_enable(struct wm_queue *wmq) 10044 { 10045 struct wm_softc *sc = wmq->wmq_txq.txq_sc; 10046 10047 wm_itrs_calculate(sc, wmq); 10048 10049 if (__predict_false(!wm_is_using_msix(sc))) { 10050 wm_legacy_intr_enable(sc); 10051 return; 10052 } 10053 10054 /* 10055 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here. 10056 * There is no need to care about which of RXQ(0) and RXQ(1) enable 10057 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled 10058 * while each wm_handle_queue(wmq) is runnig. 10059 */ 10060 if (sc->sc_type == WM_T_82574) 10061 CSR_WRITE(sc, WMREG_IMS, 10062 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER); 10063 else if (sc->sc_type == WM_T_82575) 10064 CSR_WRITE(sc, WMREG_EIMS, 10065 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id)); 10066 else 10067 CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx); 10068 } 10069 10070 static int 10071 wm_txrxintr_msix(void *arg) 10072 { 10073 struct wm_queue *wmq = arg; 10074 struct wm_txqueue *txq = &wmq->wmq_txq; 10075 struct wm_rxqueue *rxq = &wmq->wmq_rxq; 10076 struct wm_softc *sc = txq->txq_sc; 10077 u_int txlimit = sc->sc_tx_intr_process_limit; 10078 u_int rxlimit = sc->sc_rx_intr_process_limit; 10079 bool txmore; 10080 bool rxmore; 10081 10082 KASSERT(wmq->wmq_intr_idx == wmq->wmq_id); 10083 10084 DPRINTF(sc, WM_DEBUG_TX, 10085 ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev))); 10086 10087 wm_txrxintr_disable(wmq); 10088 10089 mutex_enter(txq->txq_lock); 10090 10091 if (txq->txq_stopping) { 10092 mutex_exit(txq->txq_lock); 10093 return 1; 10094 } 10095 10096 WM_Q_EVCNT_INCR(txq, txdw); 10097 txmore = wm_txeof(txq, txlimit); 10098 /* wm_deferred start() is done in wm_handle_queue(). */ 10099 mutex_exit(txq->txq_lock); 10100 10101 DPRINTF(sc, WM_DEBUG_RX, 10102 ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev))); 10103 mutex_enter(rxq->rxq_lock); 10104 10105 if (rxq->rxq_stopping) { 10106 mutex_exit(rxq->rxq_lock); 10107 return 1; 10108 } 10109 10110 WM_Q_EVCNT_INCR(rxq, intr); 10111 rxmore = wm_rxeof(rxq, rxlimit); 10112 mutex_exit(rxq->rxq_lock); 10113 10114 wm_itrs_writereg(sc, wmq); 10115 10116 if (txmore || rxmore) { 10117 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue; 10118 wm_sched_handle_queue(sc, wmq); 10119 } else 10120 wm_txrxintr_enable(wmq); 10121 10122 return 1; 10123 } 10124 10125 static void 10126 wm_handle_queue(void *arg) 10127 { 10128 struct wm_queue *wmq = arg; 10129 struct wm_txqueue *txq = &wmq->wmq_txq; 10130 struct wm_rxqueue *rxq = &wmq->wmq_rxq; 10131 struct wm_softc *sc = txq->txq_sc; 10132 u_int txlimit = sc->sc_tx_process_limit; 10133 u_int rxlimit = sc->sc_rx_process_limit; 10134 bool txmore; 10135 bool rxmore; 10136 10137 mutex_enter(txq->txq_lock); 10138 if (txq->txq_stopping) { 10139 mutex_exit(txq->txq_lock); 10140 return; 10141 } 10142 txmore = wm_txeof(txq, txlimit); 10143 wm_deferred_start_locked(txq); 10144 mutex_exit(txq->txq_lock); 10145 10146 mutex_enter(rxq->rxq_lock); 10147 if (rxq->rxq_stopping) { 10148 mutex_exit(rxq->rxq_lock); 10149 return; 10150 } 10151 WM_Q_EVCNT_INCR(rxq, defer); 10152 rxmore = wm_rxeof(rxq, rxlimit); 10153 mutex_exit(rxq->rxq_lock); 10154 10155 if (txmore || rxmore) { 10156 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue; 10157 wm_sched_handle_queue(sc, wmq); 10158 } else 10159 wm_txrxintr_enable(wmq); 10160 } 10161 10162 static void 10163 wm_handle_queue_work(struct work *wk, void *context) 10164 { 10165 struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie); 10166 10167 /* 10168 * "enqueued flag" is not required here. 10169 */ 10170 wm_handle_queue(wmq); 10171 } 10172 10173 /* 10174 * wm_linkintr_msix: 10175 * 10176 * Interrupt service routine for link status change for MSI-X. 10177 */ 10178 static int 10179 wm_linkintr_msix(void *arg) 10180 { 10181 struct wm_softc *sc = arg; 10182 uint32_t reg; 10183 bool has_rxo; 10184 10185 reg = CSR_READ(sc, WMREG_ICR); 10186 WM_CORE_LOCK(sc); 10187 DPRINTF(sc, WM_DEBUG_LINK, 10188 ("%s: LINK: got link intr. ICR = %08x\n", 10189 device_xname(sc->sc_dev), reg)); 10190 10191 if (sc->sc_core_stopping) 10192 goto out; 10193 10194 if ((reg & ICR_LSC) != 0) { 10195 WM_EVCNT_INCR(&sc->sc_ev_linkintr); 10196 wm_linkintr(sc, ICR_LSC); 10197 } 10198 if ((reg & ICR_GPI(0)) != 0) 10199 device_printf(sc->sc_dev, "got module interrupt\n"); 10200 10201 /* 10202 * XXX 82574 MSI-X mode workaround 10203 * 10204 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER 10205 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor 10206 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1) 10207 * interrupts by writing WMREG_ICS to process receive packets. 10208 */ 10209 if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) { 10210 #if defined(WM_DEBUG) 10211 log(LOG_WARNING, "%s: Receive overrun\n", 10212 device_xname(sc->sc_dev)); 10213 #endif /* defined(WM_DEBUG) */ 10214 10215 has_rxo = true; 10216 /* 10217 * The RXO interrupt is very high rate when receive traffic is 10218 * high rate. We use polling mode for ICR_OTHER like Tx/Rx 10219 * interrupts. ICR_OTHER will be enabled at the end of 10220 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and 10221 * ICR_RXQ(1) interrupts. 10222 */ 10223 CSR_WRITE(sc, WMREG_IMC, ICR_OTHER); 10224 10225 CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1)); 10226 } 10227 10228 10229 10230 out: 10231 WM_CORE_UNLOCK(sc); 10232 10233 if (sc->sc_type == WM_T_82574) { 10234 if (!has_rxo) 10235 CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); 10236 else 10237 CSR_WRITE(sc, WMREG_IMS, ICR_LSC); 10238 } else if (sc->sc_type == WM_T_82575) 10239 CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER); 10240 else 10241 CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx); 10242 10243 return 1; 10244 } 10245 10246 /* 10247 * Media related. 10248 * GMII, SGMII, TBI (and SERDES) 10249 */ 10250 10251 /* Common */ 10252 10253 /* 10254 * wm_tbi_serdes_set_linkled: 10255 * 10256 * Update the link LED on TBI and SERDES devices. 10257 */ 10258 static void 10259 wm_tbi_serdes_set_linkled(struct wm_softc *sc) 10260 { 10261 10262 if (sc->sc_tbi_linkup) 10263 sc->sc_ctrl |= CTRL_SWDPIN(0); 10264 else 10265 sc->sc_ctrl &= ~CTRL_SWDPIN(0); 10266 10267 /* 82540 or newer devices are active low */ 10268 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0; 10269 10270 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 10271 } 10272 10273 /* GMII related */ 10274 10275 /* 10276 * wm_gmii_reset: 10277 * 10278 * Reset the PHY. 10279 */ 10280 static void 10281 wm_gmii_reset(struct wm_softc *sc) 10282 { 10283 uint32_t reg; 10284 int rv; 10285 10286 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 10287 device_xname(sc->sc_dev), __func__)); 10288 10289 rv = sc->phy.acquire(sc); 10290 if (rv != 0) { 10291 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n", 10292 __func__); 10293 return; 10294 } 10295 10296 switch (sc->sc_type) { 10297 case WM_T_82542_2_0: 10298 case WM_T_82542_2_1: 10299 /* null */ 10300 break; 10301 case WM_T_82543: 10302 /* 10303 * With 82543, we need to force speed and duplex on the MAC 10304 * equal to what the PHY speed and duplex configuration is. 10305 * In addition, we need to perform a hardware reset on the PHY 10306 * to take it out of reset. 10307 */ 10308 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; 10309 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 10310 10311 /* The PHY reset pin is active-low. */ 10312 reg = CSR_READ(sc, WMREG_CTRL_EXT); 10313 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) | 10314 CTRL_EXT_SWDPIN(4)); 10315 reg |= CTRL_EXT_SWDPIO(4); 10316 10317 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 10318 CSR_WRITE_FLUSH(sc); 10319 delay(10*1000); 10320 10321 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4)); 10322 CSR_WRITE_FLUSH(sc); 10323 delay(150); 10324 #if 0 10325 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4); 10326 #endif 10327 delay(20*1000); /* XXX extra delay to get PHY ID? */ 10328 break; 10329 case WM_T_82544: /* Reset 10000us */ 10330 case WM_T_82540: 10331 case WM_T_82545: 10332 case WM_T_82545_3: 10333 case WM_T_82546: 10334 case WM_T_82546_3: 10335 case WM_T_82541: 10336 case WM_T_82541_2: 10337 case WM_T_82547: 10338 case WM_T_82547_2: 10339 case WM_T_82571: /* Reset 100us */ 10340 case WM_T_82572: 10341 case WM_T_82573: 10342 case WM_T_82574: 10343 case WM_T_82575: 10344 case WM_T_82576: 10345 case WM_T_82580: 10346 case WM_T_I350: 10347 case WM_T_I354: 10348 case WM_T_I210: 10349 case WM_T_I211: 10350 case WM_T_82583: 10351 case WM_T_80003: 10352 /* Generic reset */ 10353 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 10354 CSR_WRITE_FLUSH(sc); 10355 delay(20000); 10356 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 10357 CSR_WRITE_FLUSH(sc); 10358 delay(20000); 10359 10360 if ((sc->sc_type == WM_T_82541) 10361 || (sc->sc_type == WM_T_82541_2) 10362 || (sc->sc_type == WM_T_82547) 10363 || (sc->sc_type == WM_T_82547_2)) { 10364 /* Workaround for igp are done in igp_reset() */ 10365 /* XXX add code to set LED after phy reset */ 10366 } 10367 break; 10368 case WM_T_ICH8: 10369 case WM_T_ICH9: 10370 case WM_T_ICH10: 10371 case WM_T_PCH: 10372 case WM_T_PCH2: 10373 case WM_T_PCH_LPT: 10374 case WM_T_PCH_SPT: 10375 case WM_T_PCH_CNP: 10376 /* Generic reset */ 10377 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 10378 CSR_WRITE_FLUSH(sc); 10379 delay(100); 10380 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 10381 CSR_WRITE_FLUSH(sc); 10382 delay(150); 10383 break; 10384 default: 10385 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev), 10386 __func__); 10387 break; 10388 } 10389 10390 sc->phy.release(sc); 10391 10392 /* get_cfg_done */ 10393 wm_get_cfg_done(sc); 10394 10395 /* Extra setup */ 10396 switch (sc->sc_type) { 10397 case WM_T_82542_2_0: 10398 case WM_T_82542_2_1: 10399 case WM_T_82543: 10400 case WM_T_82544: 10401 case WM_T_82540: 10402 case WM_T_82545: 10403 case WM_T_82545_3: 10404 case WM_T_82546: 10405 case WM_T_82546_3: 10406 case WM_T_82541_2: 10407 case WM_T_82547_2: 10408 case WM_T_82571: 10409 case WM_T_82572: 10410 case WM_T_82573: 10411 case WM_T_82574: 10412 case WM_T_82583: 10413 case WM_T_82575: 10414 case WM_T_82576: 10415 case WM_T_82580: 10416 case WM_T_I350: 10417 case WM_T_I354: 10418 case WM_T_I210: 10419 case WM_T_I211: 10420 case WM_T_80003: 10421 /* Null */ 10422 break; 10423 case WM_T_82541: 10424 case WM_T_82547: 10425 /* XXX Configure actively LED after PHY reset */ 10426 break; 10427 case WM_T_ICH8: 10428 case WM_T_ICH9: 10429 case WM_T_ICH10: 10430 case WM_T_PCH: 10431 case WM_T_PCH2: 10432 case WM_T_PCH_LPT: 10433 case WM_T_PCH_SPT: 10434 case WM_T_PCH_CNP: 10435 wm_phy_post_reset(sc); 10436 break; 10437 default: 10438 panic("%s: unknown type\n", __func__); 10439 break; 10440 } 10441 } 10442 10443 /* 10444 * Setup sc_phytype and mii_{read|write}reg. 10445 * 10446 * To identify PHY type, correct read/write function should be selected. 10447 * To select correct read/write function, PCI ID or MAC type are required 10448 * without accessing PHY registers. 10449 * 10450 * On the first call of this function, PHY ID is not known yet. Check 10451 * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the 10452 * result might be incorrect. 10453 * 10454 * In the second call, PHY OUI and model is used to identify PHY type. 10455 * It might not be perfect because of the lack of compared entry, but it 10456 * would be better than the first call. 10457 * 10458 * If the detected new result and previous assumption is different, 10459 * diagnous message will be printed. 10460 */ 10461 static void 10462 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui, 10463 uint16_t phy_model) 10464 { 10465 device_t dev = sc->sc_dev; 10466 struct mii_data *mii = &sc->sc_mii; 10467 uint16_t new_phytype = WMPHY_UNKNOWN; 10468 uint16_t doubt_phytype = WMPHY_UNKNOWN; 10469 mii_readreg_t new_readreg; 10470 mii_writereg_t new_writereg; 10471 bool dodiag = true; 10472 10473 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 10474 device_xname(sc->sc_dev), __func__)); 10475 10476 /* 10477 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always 10478 * incorrect. So don't print diag output when it's 2nd call. 10479 */ 10480 if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0)) 10481 dodiag = false; 10482 10483 if (mii->mii_readreg == NULL) { 10484 /* 10485 * This is the first call of this function. For ICH and PCH 10486 * variants, it's difficult to determine the PHY access method 10487 * by sc_type, so use the PCI product ID for some devices. 10488 */ 10489 10490 switch (sc->sc_pcidevid) { 10491 case PCI_PRODUCT_INTEL_PCH_M_LM: 10492 case PCI_PRODUCT_INTEL_PCH_M_LC: 10493 /* 82577 */ 10494 new_phytype = WMPHY_82577; 10495 break; 10496 case PCI_PRODUCT_INTEL_PCH_D_DM: 10497 case PCI_PRODUCT_INTEL_PCH_D_DC: 10498 /* 82578 */ 10499 new_phytype = WMPHY_82578; 10500 break; 10501 case PCI_PRODUCT_INTEL_PCH2_LV_LM: 10502 case PCI_PRODUCT_INTEL_PCH2_LV_V: 10503 /* 82579 */ 10504 new_phytype = WMPHY_82579; 10505 break; 10506 case PCI_PRODUCT_INTEL_82801H_82567V_3: 10507 case PCI_PRODUCT_INTEL_82801I_BM: 10508 case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */ 10509 case PCI_PRODUCT_INTEL_82801J_R_BM_LM: 10510 case PCI_PRODUCT_INTEL_82801J_R_BM_LF: 10511 case PCI_PRODUCT_INTEL_82801J_D_BM_LM: 10512 case PCI_PRODUCT_INTEL_82801J_D_BM_LF: 10513 case PCI_PRODUCT_INTEL_82801J_R_BM_V: 10514 /* ICH8, 9, 10 with 82567 */ 10515 new_phytype = WMPHY_BM; 10516 break; 10517 default: 10518 break; 10519 } 10520 } else { 10521 /* It's not the first call. Use PHY OUI and model */ 10522 switch (phy_oui) { 10523 case MII_OUI_ATTANSIC: /* atphy(4) */ 10524 switch (phy_model) { 10525 case MII_MODEL_ATTANSIC_AR8021: 10526 new_phytype = WMPHY_82578; 10527 break; 10528 default: 10529 break; 10530 } 10531 break; 10532 case MII_OUI_xxMARVELL: 10533 switch (phy_model) { 10534 case MII_MODEL_xxMARVELL_I210: 10535 new_phytype = WMPHY_I210; 10536 break; 10537 case MII_MODEL_xxMARVELL_E1011: 10538 case MII_MODEL_xxMARVELL_E1000_3: 10539 case MII_MODEL_xxMARVELL_E1000_5: 10540 case MII_MODEL_xxMARVELL_E1112: 10541 new_phytype = WMPHY_M88; 10542 break; 10543 case MII_MODEL_xxMARVELL_E1149: 10544 new_phytype = WMPHY_BM; 10545 break; 10546 case MII_MODEL_xxMARVELL_E1111: 10547 case MII_MODEL_xxMARVELL_I347: 10548 case MII_MODEL_xxMARVELL_E1512: 10549 case MII_MODEL_xxMARVELL_E1340M: 10550 case MII_MODEL_xxMARVELL_E1543: 10551 new_phytype = WMPHY_M88; 10552 break; 10553 case MII_MODEL_xxMARVELL_I82563: 10554 new_phytype = WMPHY_GG82563; 10555 break; 10556 default: 10557 break; 10558 } 10559 break; 10560 case MII_OUI_INTEL: 10561 switch (phy_model) { 10562 case MII_MODEL_INTEL_I82577: 10563 new_phytype = WMPHY_82577; 10564 break; 10565 case MII_MODEL_INTEL_I82579: 10566 new_phytype = WMPHY_82579; 10567 break; 10568 case MII_MODEL_INTEL_I217: 10569 new_phytype = WMPHY_I217; 10570 break; 10571 case MII_MODEL_INTEL_I82580: 10572 new_phytype = WMPHY_82580; 10573 break; 10574 case MII_MODEL_INTEL_I350: 10575 new_phytype = WMPHY_I350; 10576 break; 10577 default: 10578 break; 10579 } 10580 break; 10581 case MII_OUI_yyINTEL: 10582 switch (phy_model) { 10583 case MII_MODEL_yyINTEL_I82562G: 10584 case MII_MODEL_yyINTEL_I82562EM: 10585 case MII_MODEL_yyINTEL_I82562ET: 10586 new_phytype = WMPHY_IFE; 10587 break; 10588 case MII_MODEL_yyINTEL_IGP01E1000: 10589 new_phytype = WMPHY_IGP; 10590 break; 10591 case MII_MODEL_yyINTEL_I82566: 10592 new_phytype = WMPHY_IGP_3; 10593 break; 10594 default: 10595 break; 10596 } 10597 break; 10598 default: 10599 break; 10600 } 10601 10602 if (dodiag) { 10603 if (new_phytype == WMPHY_UNKNOWN) 10604 aprint_verbose_dev(dev, 10605 "%s: Unknown PHY model. OUI=%06x, " 10606 "model=%04x\n", __func__, phy_oui, 10607 phy_model); 10608 10609 if ((sc->sc_phytype != WMPHY_UNKNOWN) 10610 && (sc->sc_phytype != new_phytype)) { 10611 aprint_error_dev(dev, "Previously assumed PHY " 10612 "type(%u) was incorrect. PHY type from PHY" 10613 "ID = %u\n", sc->sc_phytype, new_phytype); 10614 } 10615 } 10616 } 10617 10618 /* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */ 10619 if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) { 10620 /* SGMII */ 10621 new_readreg = wm_sgmii_readreg; 10622 new_writereg = wm_sgmii_writereg; 10623 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){ 10624 /* BM2 (phyaddr == 1) */ 10625 if ((sc->sc_phytype != WMPHY_UNKNOWN) 10626 && (new_phytype != WMPHY_BM) 10627 && (new_phytype != WMPHY_UNKNOWN)) 10628 doubt_phytype = new_phytype; 10629 new_phytype = WMPHY_BM; 10630 new_readreg = wm_gmii_bm_readreg; 10631 new_writereg = wm_gmii_bm_writereg; 10632 } else if (sc->sc_type >= WM_T_PCH) { 10633 /* All PCH* use _hv_ */ 10634 new_readreg = wm_gmii_hv_readreg; 10635 new_writereg = wm_gmii_hv_writereg; 10636 } else if (sc->sc_type >= WM_T_ICH8) { 10637 /* non-82567 ICH8, 9 and 10 */ 10638 new_readreg = wm_gmii_i82544_readreg; 10639 new_writereg = wm_gmii_i82544_writereg; 10640 } else if (sc->sc_type >= WM_T_80003) { 10641 /* 80003 */ 10642 if ((sc->sc_phytype != WMPHY_UNKNOWN) 10643 && (new_phytype != WMPHY_GG82563) 10644 && (new_phytype != WMPHY_UNKNOWN)) 10645 doubt_phytype = new_phytype; 10646 new_phytype = WMPHY_GG82563; 10647 new_readreg = wm_gmii_i80003_readreg; 10648 new_writereg = wm_gmii_i80003_writereg; 10649 } else if (sc->sc_type >= WM_T_I210) { 10650 /* I210 and I211 */ 10651 if ((sc->sc_phytype != WMPHY_UNKNOWN) 10652 && (new_phytype != WMPHY_I210) 10653 && (new_phytype != WMPHY_UNKNOWN)) 10654 doubt_phytype = new_phytype; 10655 new_phytype = WMPHY_I210; 10656 new_readreg = wm_gmii_gs40g_readreg; 10657 new_writereg = wm_gmii_gs40g_writereg; 10658 } else if (sc->sc_type >= WM_T_82580) { 10659 /* 82580, I350 and I354 */ 10660 new_readreg = wm_gmii_82580_readreg; 10661 new_writereg = wm_gmii_82580_writereg; 10662 } else if (sc->sc_type >= WM_T_82544) { 10663 /* 82544, 0, [56], [17], 8257[1234] and 82583 */ 10664 new_readreg = wm_gmii_i82544_readreg; 10665 new_writereg = wm_gmii_i82544_writereg; 10666 } else { 10667 new_readreg = wm_gmii_i82543_readreg; 10668 new_writereg = wm_gmii_i82543_writereg; 10669 } 10670 10671 if (new_phytype == WMPHY_BM) { 10672 /* All BM use _bm_ */ 10673 new_readreg = wm_gmii_bm_readreg; 10674 new_writereg = wm_gmii_bm_writereg; 10675 } 10676 if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) { 10677 /* All PCH* use _hv_ */ 10678 new_readreg = wm_gmii_hv_readreg; 10679 new_writereg = wm_gmii_hv_writereg; 10680 } 10681 10682 /* Diag output */ 10683 if (dodiag) { 10684 if (doubt_phytype != WMPHY_UNKNOWN) 10685 aprint_error_dev(dev, "Assumed new PHY type was " 10686 "incorrect. old = %u, new = %u\n", sc->sc_phytype, 10687 new_phytype); 10688 else if ((sc->sc_phytype != WMPHY_UNKNOWN) 10689 && (sc->sc_phytype != new_phytype)) 10690 aprint_error_dev(dev, "Previously assumed PHY type(%u)" 10691 "was incorrect. New PHY type = %u\n", 10692 sc->sc_phytype, new_phytype); 10693 10694 if ((mii->mii_readreg != NULL) && 10695 (new_phytype == WMPHY_UNKNOWN)) 10696 aprint_error_dev(dev, "PHY type is still unknown.\n"); 10697 10698 if ((mii->mii_readreg != NULL) && 10699 (mii->mii_readreg != new_readreg)) 10700 aprint_error_dev(dev, "Previously assumed PHY " 10701 "read/write function was incorrect.\n"); 10702 } 10703 10704 /* Update now */ 10705 sc->sc_phytype = new_phytype; 10706 mii->mii_readreg = new_readreg; 10707 mii->mii_writereg = new_writereg; 10708 if (new_readreg == wm_gmii_hv_readreg) { 10709 sc->phy.readreg_locked = wm_gmii_hv_readreg_locked; 10710 sc->phy.writereg_locked = wm_gmii_hv_writereg_locked; 10711 } else if (new_readreg == wm_sgmii_readreg) { 10712 sc->phy.readreg_locked = wm_sgmii_readreg_locked; 10713 sc->phy.writereg_locked = wm_sgmii_writereg_locked; 10714 } else if (new_readreg == wm_gmii_i82544_readreg) { 10715 sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked; 10716 sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked; 10717 } 10718 } 10719 10720 /* 10721 * wm_get_phy_id_82575: 10722 * 10723 * Return PHY ID. Return -1 if it failed. 10724 */ 10725 static int 10726 wm_get_phy_id_82575(struct wm_softc *sc) 10727 { 10728 uint32_t reg; 10729 int phyid = -1; 10730 10731 /* XXX */ 10732 if ((sc->sc_flags & WM_F_SGMII) == 0) 10733 return -1; 10734 10735 if (wm_sgmii_uses_mdio(sc)) { 10736 switch (sc->sc_type) { 10737 case WM_T_82575: 10738 case WM_T_82576: 10739 reg = CSR_READ(sc, WMREG_MDIC); 10740 phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT; 10741 break; 10742 case WM_T_82580: 10743 case WM_T_I350: 10744 case WM_T_I354: 10745 case WM_T_I210: 10746 case WM_T_I211: 10747 reg = CSR_READ(sc, WMREG_MDICNFG); 10748 phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT; 10749 break; 10750 default: 10751 return -1; 10752 } 10753 } 10754 10755 return phyid; 10756 } 10757 10758 /* 10759 * wm_gmii_mediainit: 10760 * 10761 * Initialize media for use on 1000BASE-T devices. 10762 */ 10763 static void 10764 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid) 10765 { 10766 device_t dev = sc->sc_dev; 10767 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 10768 struct mii_data *mii = &sc->sc_mii; 10769 10770 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n", 10771 device_xname(sc->sc_dev), __func__)); 10772 10773 /* We have GMII. */ 10774 sc->sc_flags |= WM_F_HAS_MII; 10775 10776 if (sc->sc_type == WM_T_80003) 10777 sc->sc_tipg = TIPG_1000T_80003_DFLT; 10778 else 10779 sc->sc_tipg = TIPG_1000T_DFLT; 10780 10781 /* 10782 * Let the chip set speed/duplex on its own based on 10783 * signals from the PHY. 10784 * XXXbouyer - I'm not sure this is right for the 80003, 10785 * the em driver only sets CTRL_SLU here - but it seems to work. 10786 */ 10787 sc->sc_ctrl |= CTRL_SLU; 10788 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 10789 10790 /* Initialize our media structures and probe the GMII. */ 10791 mii->mii_ifp = ifp; 10792 10793 mii->mii_statchg = wm_gmii_statchg; 10794 10795 /* get PHY control from SMBus to PCIe */ 10796 if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2) 10797 || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT) 10798 || (sc->sc_type == WM_T_PCH_CNP)) 10799 wm_init_phy_workarounds_pchlan(sc); 10800 10801 wm_gmii_reset(sc); 10802 10803 sc->sc_ethercom.ec_mii = &sc->sc_mii; 10804 ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange, 10805 wm_gmii_mediastatus, sc->sc_core_lock); 10806 10807 /* Setup internal SGMII PHY for SFP */ 10808 wm_sgmii_sfp_preconfig(sc); 10809 10810 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576) 10811 || (sc->sc_type == WM_T_82580) 10812 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354) 10813 || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) { 10814 if ((sc->sc_flags & WM_F_SGMII) == 0) { 10815 /* Attach only one port */ 10816 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1, 10817 MII_OFFSET_ANY, MIIF_DOPAUSE); 10818 } else { 10819 int i, id; 10820 uint32_t ctrl_ext; 10821 10822 id = wm_get_phy_id_82575(sc); 10823 if (id != -1) { 10824 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 10825 id, MII_OFFSET_ANY, MIIF_DOPAUSE); 10826 } 10827 if ((id == -1) 10828 || (LIST_FIRST(&mii->mii_phys) == NULL)) { 10829 /* Power on sgmii phy if it is disabled */ 10830 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT); 10831 CSR_WRITE(sc, WMREG_CTRL_EXT, 10832 ctrl_ext &~ CTRL_EXT_SWDPIN(3)); 10833 CSR_WRITE_FLUSH(sc); 10834 delay(300*1000); /* XXX too long */ 10835 10836 /* 10837 * From 1 to 8. 10838 * 10839 * I2C access fails with I2C register's ERROR 10840 * bit set, so prevent error message while 10841 * scanning. 10842 */ 10843 sc->phy.no_errprint = true; 10844 for (i = 1; i < 8; i++) 10845 mii_attach(sc->sc_dev, &sc->sc_mii, 10846 0xffffffff, i, MII_OFFSET_ANY, 10847 MIIF_DOPAUSE); 10848 sc->phy.no_errprint = false; 10849 10850 /* Restore previous sfp cage power state */ 10851 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext); 10852 } 10853 } 10854 } else 10855 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 10856 MII_OFFSET_ANY, MIIF_DOPAUSE); 10857 10858 /* 10859 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call 10860 * wm_set_mdio_slow_mode_hv() for a workaround and retry. 10861 */ 10862 if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) 10863 || (sc->sc_type == WM_T_PCH_SPT) 10864 || (sc->sc_type == WM_T_PCH_CNP)) 10865 && (LIST_FIRST(&mii->mii_phys) == NULL)) { 10866 wm_set_mdio_slow_mode_hv(sc); 10867 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 10868 MII_OFFSET_ANY, MIIF_DOPAUSE); 10869 } 10870 10871 /* 10872 * (For ICH8 variants) 10873 * If PHY detection failed, use BM's r/w function and retry. 10874 */ 10875 if (LIST_FIRST(&mii->mii_phys) == NULL) { 10876 /* if failed, retry with *_bm_* */ 10877 aprint_verbose_dev(dev, "Assumed PHY access function " 10878 "(type = %d) might be incorrect. Use BM and retry.\n", 10879 sc->sc_phytype); 10880 sc->sc_phytype = WMPHY_BM; 10881 mii->mii_readreg = wm_gmii_bm_readreg; 10882 mii->mii_writereg = wm_gmii_bm_writereg; 10883 10884 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 10885 MII_OFFSET_ANY, MIIF_DOPAUSE); 10886 } 10887 10888 if (LIST_FIRST(&mii->mii_phys) == NULL) { 10889 /* Any PHY wasn't find */ 10890 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL); 10891 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); 10892 sc->sc_phytype = WMPHY_NONE; 10893 } else { 10894 struct mii_softc *child = LIST_FIRST(&mii->mii_phys); 10895 10896 /* 10897 * PHY Found! Check PHY type again by the second call of 10898 * wm_gmii_setup_phytype. 10899 */ 10900 wm_gmii_setup_phytype(sc, child->mii_mpd_oui, 10901 child->mii_mpd_model); 10902 10903 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 10904 } 10905 } 10906 10907 /* 10908 * wm_gmii_mediachange: [ifmedia interface function] 10909 * 10910 * Set hardware to newly-selected media on a 1000BASE-T device. 10911 */ 10912 static int 10913 wm_gmii_mediachange(struct ifnet *ifp) 10914 { 10915 struct wm_softc *sc = ifp->if_softc; 10916 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 10917 uint32_t reg; 10918 int rc; 10919 10920 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n", 10921 device_xname(sc->sc_dev), __func__)); 10922 if ((ifp->if_flags & IFF_UP) == 0) 10923 return 0; 10924 10925 /* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */ 10926 if ((sc->sc_type == WM_T_82580) 10927 || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210) 10928 || (sc->sc_type == WM_T_I211)) { 10929 reg = CSR_READ(sc, WMREG_PHPM); 10930 reg &= ~PHPM_GO_LINK_D; 10931 CSR_WRITE(sc, WMREG_PHPM, reg); 10932 } 10933 10934 /* Disable D0 LPLU. */ 10935 wm_lplu_d0_disable(sc); 10936 10937 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD); 10938 sc->sc_ctrl |= CTRL_SLU; 10939 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) 10940 || (sc->sc_type > WM_T_82543)) { 10941 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX); 10942 } else { 10943 sc->sc_ctrl &= ~CTRL_ASDE; 10944 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX; 10945 if (ife->ifm_media & IFM_FDX) 10946 sc->sc_ctrl |= CTRL_FD; 10947 switch (IFM_SUBTYPE(ife->ifm_media)) { 10948 case IFM_10_T: 10949 sc->sc_ctrl |= CTRL_SPEED_10; 10950 break; 10951 case IFM_100_TX: 10952 sc->sc_ctrl |= CTRL_SPEED_100; 10953 break; 10954 case IFM_1000_T: 10955 sc->sc_ctrl |= CTRL_SPEED_1000; 10956 break; 10957 case IFM_NONE: 10958 /* There is no specific setting for IFM_NONE */ 10959 break; 10960 default: 10961 panic("wm_gmii_mediachange: bad media 0x%x", 10962 ife->ifm_media); 10963 } 10964 } 10965 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 10966 CSR_WRITE_FLUSH(sc); 10967 10968 if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) 10969 wm_serdes_mediachange(ifp); 10970 10971 if (sc->sc_type <= WM_T_82543) 10972 wm_gmii_reset(sc); 10973 else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211) 10974 && ((sc->sc_flags & WM_F_SGMII) != 0)) { 10975 /* allow time for SFP cage time to power up phy */ 10976 delay(300 * 1000); 10977 wm_gmii_reset(sc); 10978 } 10979 10980 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO) 10981 return 0; 10982 return rc; 10983 } 10984 10985 /* 10986 * wm_gmii_mediastatus: [ifmedia interface function] 10987 * 10988 * Get the current interface media status on a 1000BASE-T device. 10989 */ 10990 static void 10991 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 10992 { 10993 struct wm_softc *sc = ifp->if_softc; 10994 10995 ether_mediastatus(ifp, ifmr); 10996 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) 10997 | sc->sc_flowflags; 10998 } 10999 11000 #define MDI_IO CTRL_SWDPIN(2) 11001 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */ 11002 #define MDI_CLK CTRL_SWDPIN(3) 11003 11004 static void 11005 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits) 11006 { 11007 uint32_t i, v; 11008 11009 v = CSR_READ(sc, WMREG_CTRL); 11010 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 11011 v |= MDI_DIR | CTRL_SWDPIO(3); 11012 11013 for (i = __BIT(nbits - 1); i != 0; i >>= 1) { 11014 if (data & i) 11015 v |= MDI_IO; 11016 else 11017 v &= ~MDI_IO; 11018 CSR_WRITE(sc, WMREG_CTRL, v); 11019 CSR_WRITE_FLUSH(sc); 11020 delay(10); 11021 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 11022 CSR_WRITE_FLUSH(sc); 11023 delay(10); 11024 CSR_WRITE(sc, WMREG_CTRL, v); 11025 CSR_WRITE_FLUSH(sc); 11026 delay(10); 11027 } 11028 } 11029 11030 static uint16_t 11031 wm_i82543_mii_recvbits(struct wm_softc *sc) 11032 { 11033 uint32_t v, i; 11034 uint16_t data = 0; 11035 11036 v = CSR_READ(sc, WMREG_CTRL); 11037 v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT)); 11038 v |= CTRL_SWDPIO(3); 11039 11040 CSR_WRITE(sc, WMREG_CTRL, v); 11041 CSR_WRITE_FLUSH(sc); 11042 delay(10); 11043 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 11044 CSR_WRITE_FLUSH(sc); 11045 delay(10); 11046 CSR_WRITE(sc, WMREG_CTRL, v); 11047 CSR_WRITE_FLUSH(sc); 11048 delay(10); 11049 11050 for (i = 0; i < 16; i++) { 11051 data <<= 1; 11052 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 11053 CSR_WRITE_FLUSH(sc); 11054 delay(10); 11055 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO) 11056 data |= 1; 11057 CSR_WRITE(sc, WMREG_CTRL, v); 11058 CSR_WRITE_FLUSH(sc); 11059 delay(10); 11060 } 11061 11062 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK); 11063 CSR_WRITE_FLUSH(sc); 11064 delay(10); 11065 CSR_WRITE(sc, WMREG_CTRL, v); 11066 CSR_WRITE_FLUSH(sc); 11067 delay(10); 11068 11069 return data; 11070 } 11071 11072 #undef MDI_IO 11073 #undef MDI_DIR 11074 #undef MDI_CLK 11075 11076 /* 11077 * wm_gmii_i82543_readreg: [mii interface function] 11078 * 11079 * Read a PHY register on the GMII (i82543 version). 11080 */ 11081 static int 11082 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val) 11083 { 11084 struct wm_softc *sc = device_private(dev); 11085 11086 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32); 11087 wm_i82543_mii_sendbits(sc, reg | (phy << 5) | 11088 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14); 11089 *val = wm_i82543_mii_recvbits(sc) & 0xffff; 11090 11091 DPRINTF(sc, WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n", 11092 device_xname(dev), phy, reg, *val)); 11093 11094 return 0; 11095 } 11096 11097 /* 11098 * wm_gmii_i82543_writereg: [mii interface function] 11099 * 11100 * Write a PHY register on the GMII (i82543 version). 11101 */ 11102 static int 11103 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val) 11104 { 11105 struct wm_softc *sc = device_private(dev); 11106 11107 wm_i82543_mii_sendbits(sc, 0xffffffffU, 32); 11108 wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) | 11109 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) | 11110 (MII_COMMAND_START << 30), 32); 11111 11112 return 0; 11113 } 11114 11115 /* 11116 * wm_gmii_mdic_readreg: [mii interface function] 11117 * 11118 * Read a PHY register on the GMII. 11119 */ 11120 static int 11121 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val) 11122 { 11123 struct wm_softc *sc = device_private(dev); 11124 uint32_t mdic = 0; 11125 int i; 11126 11127 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217) 11128 && (reg > MII_ADDRMASK)) { 11129 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n", 11130 __func__, sc->sc_phytype, reg); 11131 reg &= MII_ADDRMASK; 11132 } 11133 11134 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) | 11135 MDIC_REGADD(reg)); 11136 11137 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) { 11138 delay(50); 11139 mdic = CSR_READ(sc, WMREG_MDIC); 11140 if (mdic & MDIC_READY) 11141 break; 11142 } 11143 11144 if ((mdic & MDIC_READY) == 0) { 11145 DPRINTF(sc, WM_DEBUG_GMII, 11146 ("%s: MDIC read timed out: phy %d reg %d\n", 11147 device_xname(dev), phy, reg)); 11148 return ETIMEDOUT; 11149 } else if (mdic & MDIC_E) { 11150 /* This is normal if no PHY is present. */ 11151 DPRINTF(sc, WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n", 11152 device_xname(sc->sc_dev), phy, reg)); 11153 return -1; 11154 } else 11155 *val = MDIC_DATA(mdic); 11156 11157 /* 11158 * Allow some time after each MDIC transaction to avoid 11159 * reading duplicate data in the next MDIC transaction. 11160 */ 11161 if (sc->sc_type == WM_T_PCH2) 11162 delay(100); 11163 11164 return 0; 11165 } 11166 11167 /* 11168 * wm_gmii_mdic_writereg: [mii interface function] 11169 * 11170 * Write a PHY register on the GMII. 11171 */ 11172 static int 11173 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val) 11174 { 11175 struct wm_softc *sc = device_private(dev); 11176 uint32_t mdic = 0; 11177 int i; 11178 11179 if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217) 11180 && (reg > MII_ADDRMASK)) { 11181 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n", 11182 __func__, sc->sc_phytype, reg); 11183 reg &= MII_ADDRMASK; 11184 } 11185 11186 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) | 11187 MDIC_REGADD(reg) | MDIC_DATA(val)); 11188 11189 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) { 11190 delay(50); 11191 mdic = CSR_READ(sc, WMREG_MDIC); 11192 if (mdic & MDIC_READY) 11193 break; 11194 } 11195 11196 if ((mdic & MDIC_READY) == 0) { 11197 DPRINTF(sc, WM_DEBUG_GMII, 11198 ("%s: MDIC write timed out: phy %d reg %d\n", 11199 device_xname(dev), phy, reg)); 11200 return ETIMEDOUT; 11201 } else if (mdic & MDIC_E) { 11202 DPRINTF(sc, WM_DEBUG_GMII, 11203 ("%s: MDIC write error: phy %d reg %d\n", 11204 device_xname(dev), phy, reg)); 11205 return -1; 11206 } 11207 11208 /* 11209 * Allow some time after each MDIC transaction to avoid 11210 * reading duplicate data in the next MDIC transaction. 11211 */ 11212 if (sc->sc_type == WM_T_PCH2) 11213 delay(100); 11214 11215 return 0; 11216 } 11217 11218 /* 11219 * wm_gmii_i82544_readreg: [mii interface function] 11220 * 11221 * Read a PHY register on the GMII. 11222 */ 11223 static int 11224 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val) 11225 { 11226 struct wm_softc *sc = device_private(dev); 11227 int rv; 11228 11229 if (sc->phy.acquire(sc)) { 11230 device_printf(dev, "%s: failed to get semaphore\n", __func__); 11231 return -1; 11232 } 11233 11234 rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val); 11235 11236 sc->phy.release(sc); 11237 11238 return rv; 11239 } 11240 11241 static int 11242 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val) 11243 { 11244 struct wm_softc *sc = device_private(dev); 11245 int rv; 11246 11247 if (reg > BME1000_MAX_MULTI_PAGE_REG) { 11248 switch (sc->sc_phytype) { 11249 case WMPHY_IGP: 11250 case WMPHY_IGP_2: 11251 case WMPHY_IGP_3: 11252 rv = wm_gmii_mdic_writereg(dev, phy, 11253 IGPHY_PAGE_SELECT, reg); 11254 if (rv != 0) 11255 return rv; 11256 break; 11257 default: 11258 #ifdef WM_DEBUG 11259 device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n", 11260 __func__, sc->sc_phytype, reg); 11261 #endif 11262 break; 11263 } 11264 } 11265 11266 return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val); 11267 } 11268 11269 /* 11270 * wm_gmii_i82544_writereg: [mii interface function] 11271 * 11272 * Write a PHY register on the GMII. 11273 */ 11274 static int 11275 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val) 11276 { 11277 struct wm_softc *sc = device_private(dev); 11278 int rv; 11279 11280 if (sc->phy.acquire(sc)) { 11281 device_printf(dev, "%s: failed to get semaphore\n", __func__); 11282 return -1; 11283 } 11284 11285 rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val); 11286 sc->phy.release(sc); 11287 11288 return rv; 11289 } 11290 11291 static int 11292 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val) 11293 { 11294 struct wm_softc *sc = device_private(dev); 11295 int rv; 11296 11297 if (reg > BME1000_MAX_MULTI_PAGE_REG) { 11298 switch (sc->sc_phytype) { 11299 case WMPHY_IGP: 11300 case WMPHY_IGP_2: 11301 case WMPHY_IGP_3: 11302 rv = wm_gmii_mdic_writereg(dev, phy, 11303 IGPHY_PAGE_SELECT, reg); 11304 if (rv != 0) 11305 return rv; 11306 break; 11307 default: 11308 #ifdef WM_DEBUG 11309 device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x", 11310 __func__, sc->sc_phytype, reg); 11311 #endif 11312 break; 11313 } 11314 } 11315 11316 return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val); 11317 } 11318 11319 /* 11320 * wm_gmii_i80003_readreg: [mii interface function] 11321 * 11322 * Read a PHY register on the kumeran 11323 * This could be handled by the PHY layer if we didn't have to lock the 11324 * resource ... 11325 */ 11326 static int 11327 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val) 11328 { 11329 struct wm_softc *sc = device_private(dev); 11330 int page_select; 11331 uint16_t temp, temp2; 11332 int rv = 0; 11333 11334 if (phy != 1) /* Only one PHY on kumeran bus */ 11335 return -1; 11336 11337 if (sc->phy.acquire(sc)) { 11338 device_printf(dev, "%s: failed to get semaphore\n", __func__); 11339 return -1; 11340 } 11341 11342 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) 11343 page_select = GG82563_PHY_PAGE_SELECT; 11344 else { 11345 /* 11346 * Use Alternative Page Select register to access registers 11347 * 30 and 31. 11348 */ 11349 page_select = GG82563_PHY_PAGE_SELECT_ALT; 11350 } 11351 temp = reg >> GG82563_PAGE_SHIFT; 11352 if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0) 11353 goto out; 11354 11355 if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) { 11356 /* 11357 * Wait more 200us for a bug of the ready bit in the MDIC 11358 * register. 11359 */ 11360 delay(200); 11361 rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2); 11362 if ((rv != 0) || (temp2 != temp)) { 11363 device_printf(dev, "%s failed\n", __func__); 11364 rv = -1; 11365 goto out; 11366 } 11367 delay(200); 11368 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val); 11369 delay(200); 11370 } else 11371 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val); 11372 11373 out: 11374 sc->phy.release(sc); 11375 return rv; 11376 } 11377 11378 /* 11379 * wm_gmii_i80003_writereg: [mii interface function] 11380 * 11381 * Write a PHY register on the kumeran. 11382 * This could be handled by the PHY layer if we didn't have to lock the 11383 * resource ... 11384 */ 11385 static int 11386 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val) 11387 { 11388 struct wm_softc *sc = device_private(dev); 11389 int page_select, rv; 11390 uint16_t temp, temp2; 11391 11392 if (phy != 1) /* Only one PHY on kumeran bus */ 11393 return -1; 11394 11395 if (sc->phy.acquire(sc)) { 11396 device_printf(dev, "%s: failed to get semaphore\n", __func__); 11397 return -1; 11398 } 11399 11400 if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) 11401 page_select = GG82563_PHY_PAGE_SELECT; 11402 else { 11403 /* 11404 * Use Alternative Page Select register to access registers 11405 * 30 and 31. 11406 */ 11407 page_select = GG82563_PHY_PAGE_SELECT_ALT; 11408 } 11409 temp = (uint16_t)reg >> GG82563_PAGE_SHIFT; 11410 if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0) 11411 goto out; 11412 11413 if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) { 11414 /* 11415 * Wait more 200us for a bug of the ready bit in the MDIC 11416 * register. 11417 */ 11418 delay(200); 11419 rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2); 11420 if ((rv != 0) || (temp2 != temp)) { 11421 device_printf(dev, "%s failed\n", __func__); 11422 rv = -1; 11423 goto out; 11424 } 11425 delay(200); 11426 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val); 11427 delay(200); 11428 } else 11429 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val); 11430 11431 out: 11432 sc->phy.release(sc); 11433 return rv; 11434 } 11435 11436 /* 11437 * wm_gmii_bm_readreg: [mii interface function] 11438 * 11439 * Read a PHY register on the kumeran 11440 * This could be handled by the PHY layer if we didn't have to lock the 11441 * resource ... 11442 */ 11443 static int 11444 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val) 11445 { 11446 struct wm_softc *sc = device_private(dev); 11447 uint16_t page = reg >> BME1000_PAGE_SHIFT; 11448 int rv; 11449 11450 if (sc->phy.acquire(sc)) { 11451 device_printf(dev, "%s: failed to get semaphore\n", __func__); 11452 return -1; 11453 } 11454 11455 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583)) 11456 phy = ((page >= 768) || ((page == 0) && (reg == 25)) 11457 || (reg == 31)) ? 1 : phy; 11458 /* Page 800 works differently than the rest so it has its own func */ 11459 if (page == BM_WUC_PAGE) { 11460 rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false); 11461 goto release; 11462 } 11463 11464 if (reg > BME1000_MAX_MULTI_PAGE_REG) { 11465 if ((phy == 1) && (sc->sc_type != WM_T_82574) 11466 && (sc->sc_type != WM_T_82583)) 11467 rv = wm_gmii_mdic_writereg(dev, phy, 11468 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT); 11469 else 11470 rv = wm_gmii_mdic_writereg(dev, phy, 11471 BME1000_PHY_PAGE_SELECT, page); 11472 if (rv != 0) 11473 goto release; 11474 } 11475 11476 rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val); 11477 11478 release: 11479 sc->phy.release(sc); 11480 return rv; 11481 } 11482 11483 /* 11484 * wm_gmii_bm_writereg: [mii interface function] 11485 * 11486 * Write a PHY register on the kumeran. 11487 * This could be handled by the PHY layer if we didn't have to lock the 11488 * resource ... 11489 */ 11490 static int 11491 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val) 11492 { 11493 struct wm_softc *sc = device_private(dev); 11494 uint16_t page = reg >> BME1000_PAGE_SHIFT; 11495 int rv; 11496 11497 if (sc->phy.acquire(sc)) { 11498 device_printf(dev, "%s: failed to get semaphore\n", __func__); 11499 return -1; 11500 } 11501 11502 if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583)) 11503 phy = ((page >= 768) || ((page == 0) && (reg == 25)) 11504 || (reg == 31)) ? 1 : phy; 11505 /* Page 800 works differently than the rest so it has its own func */ 11506 if (page == BM_WUC_PAGE) { 11507 rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false); 11508 goto release; 11509 } 11510 11511 if (reg > BME1000_MAX_MULTI_PAGE_REG) { 11512 if ((phy == 1) && (sc->sc_type != WM_T_82574) 11513 && (sc->sc_type != WM_T_82583)) 11514 rv = wm_gmii_mdic_writereg(dev, phy, 11515 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT); 11516 else 11517 rv = wm_gmii_mdic_writereg(dev, phy, 11518 BME1000_PHY_PAGE_SELECT, page); 11519 if (rv != 0) 11520 goto release; 11521 } 11522 11523 rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val); 11524 11525 release: 11526 sc->phy.release(sc); 11527 return rv; 11528 } 11529 11530 /* 11531 * wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers 11532 * @dev: pointer to the HW structure 11533 * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG 11534 * 11535 * Assumes semaphore already acquired and phy_reg points to a valid memory 11536 * address to store contents of the BM_WUC_ENABLE_REG register. 11537 */ 11538 static int 11539 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp) 11540 { 11541 #ifdef WM_DEBUG 11542 struct wm_softc *sc = device_private(dev); 11543 #endif 11544 uint16_t temp; 11545 int rv; 11546 11547 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 11548 device_xname(dev), __func__)); 11549 11550 if (!phy_regp) 11551 return -1; 11552 11553 /* All page select, port ctrl and wakeup registers use phy address 1 */ 11554 11555 /* Select Port Control Registers page */ 11556 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 11557 BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT); 11558 if (rv != 0) 11559 return rv; 11560 11561 /* Read WUCE and save it */ 11562 rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp); 11563 if (rv != 0) 11564 return rv; 11565 11566 /* Enable both PHY wakeup mode and Wakeup register page writes. 11567 * Prevent a power state change by disabling ME and Host PHY wakeup. 11568 */ 11569 temp = *phy_regp; 11570 temp |= BM_WUC_ENABLE_BIT; 11571 temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT); 11572 11573 if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0) 11574 return rv; 11575 11576 /* Select Host Wakeup Registers page - caller now able to write 11577 * registers on the Wakeup registers page 11578 */ 11579 return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 11580 BM_WUC_PAGE << IGP3_PAGE_SHIFT); 11581 } 11582 11583 /* 11584 * wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs 11585 * @dev: pointer to the HW structure 11586 * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG 11587 * 11588 * Restore BM_WUC_ENABLE_REG to its original value. 11589 * 11590 * Assumes semaphore already acquired and *phy_reg is the contents of the 11591 * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by 11592 * caller. 11593 */ 11594 static int 11595 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp) 11596 { 11597 #ifdef WM_DEBUG 11598 struct wm_softc *sc = device_private(dev); 11599 #endif 11600 11601 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 11602 device_xname(dev), __func__)); 11603 11604 if (!phy_regp) 11605 return -1; 11606 11607 /* Select Port Control Registers page */ 11608 wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 11609 BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT); 11610 11611 /* Restore 769.17 to its original value */ 11612 wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp); 11613 11614 return 0; 11615 } 11616 11617 /* 11618 * wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register 11619 * @sc: pointer to the HW structure 11620 * @offset: register offset to be read or written 11621 * @val: pointer to the data to read or write 11622 * @rd: determines if operation is read or write 11623 * @page_set: BM_WUC_PAGE already set and access enabled 11624 * 11625 * Read the PHY register at offset and store the retrieved information in 11626 * data, or write data to PHY register at offset. Note the procedure to 11627 * access the PHY wakeup registers is different than reading the other PHY 11628 * registers. It works as such: 11629 * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1 11630 * 2) Set page to 800 for host (801 if we were manageability) 11631 * 3) Write the address using the address opcode (0x11) 11632 * 4) Read or write the data using the data opcode (0x12) 11633 * 5) Restore 769.17.2 to its original value 11634 * 11635 * Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and 11636 * step 5 is done by wm_disable_phy_wakeup_reg_access_bm(). 11637 * 11638 * Assumes semaphore is already acquired. When page_set==TRUE, assumes 11639 * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack 11640 * is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()). 11641 */ 11642 static int 11643 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd, 11644 bool page_set) 11645 { 11646 struct wm_softc *sc = device_private(dev); 11647 uint16_t regnum = BM_PHY_REG_NUM(offset); 11648 uint16_t page = BM_PHY_REG_PAGE(offset); 11649 uint16_t wuce; 11650 int rv = 0; 11651 11652 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n", 11653 device_xname(dev), __func__)); 11654 /* XXX Gig must be disabled for MDIO accesses to page 800 */ 11655 if ((sc->sc_type == WM_T_PCH) 11656 && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) { 11657 device_printf(dev, 11658 "Attempting to access page %d while gig enabled.\n", page); 11659 } 11660 11661 if (!page_set) { 11662 /* Enable access to PHY wakeup registers */ 11663 rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce); 11664 if (rv != 0) { 11665 device_printf(dev, 11666 "%s: Could not enable PHY wakeup reg access\n", 11667 __func__); 11668 return rv; 11669 } 11670 } 11671 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n", 11672 device_xname(sc->sc_dev), __func__, page, regnum)); 11673 11674 /* 11675 * 2) Access PHY wakeup register. 11676 * See wm_access_phy_wakeup_reg_bm. 11677 */ 11678 11679 /* Write the Wakeup register page offset value using opcode 0x11 */ 11680 rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum); 11681 if (rv != 0) 11682 return rv; 11683 11684 if (rd) { 11685 /* Read the Wakeup register page value using opcode 0x12 */ 11686 rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val); 11687 } else { 11688 /* Write the Wakeup register page value using opcode 0x12 */ 11689 rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val); 11690 } 11691 if (rv != 0) 11692 return rv; 11693 11694 if (!page_set) 11695 rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce); 11696 11697 return rv; 11698 } 11699 11700 /* 11701 * wm_gmii_hv_readreg: [mii interface function] 11702 * 11703 * Read a PHY register on the kumeran 11704 * This could be handled by the PHY layer if we didn't have to lock the 11705 * resource ... 11706 */ 11707 static int 11708 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val) 11709 { 11710 struct wm_softc *sc = device_private(dev); 11711 int rv; 11712 11713 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n", 11714 device_xname(dev), __func__)); 11715 if (sc->phy.acquire(sc)) { 11716 device_printf(dev, "%s: failed to get semaphore\n", __func__); 11717 return -1; 11718 } 11719 11720 rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val); 11721 sc->phy.release(sc); 11722 return rv; 11723 } 11724 11725 static int 11726 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val) 11727 { 11728 uint16_t page = BM_PHY_REG_PAGE(reg); 11729 uint16_t regnum = BM_PHY_REG_NUM(reg); 11730 int rv; 11731 11732 phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy; 11733 11734 /* Page 800 works differently than the rest so it has its own func */ 11735 if (page == BM_WUC_PAGE) 11736 return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false); 11737 11738 /* 11739 * Lower than page 768 works differently than the rest so it has its 11740 * own func 11741 */ 11742 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) { 11743 device_printf(dev, "gmii_hv_readreg!!!\n"); 11744 return -1; 11745 } 11746 11747 /* 11748 * XXX I21[789] documents say that the SMBus Address register is at 11749 * PHY address 01, Page 0 (not 768), Register 26. 11750 */ 11751 if (page == HV_INTC_FC_PAGE_START) 11752 page = 0; 11753 11754 if (regnum > BME1000_MAX_MULTI_PAGE_REG) { 11755 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 11756 page << BME1000_PAGE_SHIFT); 11757 if (rv != 0) 11758 return rv; 11759 } 11760 11761 return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val); 11762 } 11763 11764 /* 11765 * wm_gmii_hv_writereg: [mii interface function] 11766 * 11767 * Write a PHY register on the kumeran. 11768 * This could be handled by the PHY layer if we didn't have to lock the 11769 * resource ... 11770 */ 11771 static int 11772 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val) 11773 { 11774 struct wm_softc *sc = device_private(dev); 11775 int rv; 11776 11777 DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n", 11778 device_xname(dev), __func__)); 11779 11780 if (sc->phy.acquire(sc)) { 11781 device_printf(dev, "%s: failed to get semaphore\n", __func__); 11782 return -1; 11783 } 11784 11785 rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val); 11786 sc->phy.release(sc); 11787 11788 return rv; 11789 } 11790 11791 static int 11792 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val) 11793 { 11794 struct wm_softc *sc = device_private(dev); 11795 uint16_t page = BM_PHY_REG_PAGE(reg); 11796 uint16_t regnum = BM_PHY_REG_NUM(reg); 11797 int rv; 11798 11799 phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy; 11800 11801 /* Page 800 works differently than the rest so it has its own func */ 11802 if (page == BM_WUC_PAGE) 11803 return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, 11804 false); 11805 11806 /* 11807 * Lower than page 768 works differently than the rest so it has its 11808 * own func 11809 */ 11810 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) { 11811 device_printf(dev, "gmii_hv_writereg!!!\n"); 11812 return -1; 11813 } 11814 11815 { 11816 /* 11817 * XXX I21[789] documents say that the SMBus Address register 11818 * is at PHY address 01, Page 0 (not 768), Register 26. 11819 */ 11820 if (page == HV_INTC_FC_PAGE_START) 11821 page = 0; 11822 11823 /* 11824 * XXX Workaround MDIO accesses being disabled after entering 11825 * IEEE Power Down (whenever bit 11 of the PHY control 11826 * register is set) 11827 */ 11828 if (sc->sc_phytype == WMPHY_82578) { 11829 struct mii_softc *child; 11830 11831 child = LIST_FIRST(&sc->sc_mii.mii_phys); 11832 if ((child != NULL) && (child->mii_mpd_rev >= 1) 11833 && (phy == 2) && ((regnum & MII_ADDRMASK) == 0) 11834 && ((val & (1 << 11)) != 0)) { 11835 device_printf(dev, "XXX need workaround\n"); 11836 } 11837 } 11838 11839 if (regnum > BME1000_MAX_MULTI_PAGE_REG) { 11840 rv = wm_gmii_mdic_writereg(dev, 1, 11841 IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT); 11842 if (rv != 0) 11843 return rv; 11844 } 11845 } 11846 11847 return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val); 11848 } 11849 11850 /* 11851 * wm_gmii_82580_readreg: [mii interface function] 11852 * 11853 * Read a PHY register on the 82580 and I350. 11854 * This could be handled by the PHY layer if we didn't have to lock the 11855 * resource ... 11856 */ 11857 static int 11858 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val) 11859 { 11860 struct wm_softc *sc = device_private(dev); 11861 int rv; 11862 11863 if (sc->phy.acquire(sc) != 0) { 11864 device_printf(dev, "%s: failed to get semaphore\n", __func__); 11865 return -1; 11866 } 11867 11868 #ifdef DIAGNOSTIC 11869 if (reg > MII_ADDRMASK) { 11870 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n", 11871 __func__, sc->sc_phytype, reg); 11872 reg &= MII_ADDRMASK; 11873 } 11874 #endif 11875 rv = wm_gmii_mdic_readreg(dev, phy, reg, val); 11876 11877 sc->phy.release(sc); 11878 return rv; 11879 } 11880 11881 /* 11882 * wm_gmii_82580_writereg: [mii interface function] 11883 * 11884 * Write a PHY register on the 82580 and I350. 11885 * This could be handled by the PHY layer if we didn't have to lock the 11886 * resource ... 11887 */ 11888 static int 11889 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val) 11890 { 11891 struct wm_softc *sc = device_private(dev); 11892 int rv; 11893 11894 if (sc->phy.acquire(sc) != 0) { 11895 device_printf(dev, "%s: failed to get semaphore\n", __func__); 11896 return -1; 11897 } 11898 11899 #ifdef DIAGNOSTIC 11900 if (reg > MII_ADDRMASK) { 11901 device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n", 11902 __func__, sc->sc_phytype, reg); 11903 reg &= MII_ADDRMASK; 11904 } 11905 #endif 11906 rv = wm_gmii_mdic_writereg(dev, phy, reg, val); 11907 11908 sc->phy.release(sc); 11909 return rv; 11910 } 11911 11912 /* 11913 * wm_gmii_gs40g_readreg: [mii interface function] 11914 * 11915 * Read a PHY register on the I2100 and I211. 11916 * This could be handled by the PHY layer if we didn't have to lock the 11917 * resource ... 11918 */ 11919 static int 11920 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val) 11921 { 11922 struct wm_softc *sc = device_private(dev); 11923 int page, offset; 11924 int rv; 11925 11926 /* Acquire semaphore */ 11927 if (sc->phy.acquire(sc)) { 11928 device_printf(dev, "%s: failed to get semaphore\n", __func__); 11929 return -1; 11930 } 11931 11932 /* Page select */ 11933 page = reg >> GS40G_PAGE_SHIFT; 11934 rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page); 11935 if (rv != 0) 11936 goto release; 11937 11938 /* Read reg */ 11939 offset = reg & GS40G_OFFSET_MASK; 11940 rv = wm_gmii_mdic_readreg(dev, phy, offset, val); 11941 11942 release: 11943 sc->phy.release(sc); 11944 return rv; 11945 } 11946 11947 /* 11948 * wm_gmii_gs40g_writereg: [mii interface function] 11949 * 11950 * Write a PHY register on the I210 and I211. 11951 * This could be handled by the PHY layer if we didn't have to lock the 11952 * resource ... 11953 */ 11954 static int 11955 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val) 11956 { 11957 struct wm_softc *sc = device_private(dev); 11958 uint16_t page; 11959 int offset, rv; 11960 11961 /* Acquire semaphore */ 11962 if (sc->phy.acquire(sc)) { 11963 device_printf(dev, "%s: failed to get semaphore\n", __func__); 11964 return -1; 11965 } 11966 11967 /* Page select */ 11968 page = reg >> GS40G_PAGE_SHIFT; 11969 rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page); 11970 if (rv != 0) 11971 goto release; 11972 11973 /* Write reg */ 11974 offset = reg & GS40G_OFFSET_MASK; 11975 rv = wm_gmii_mdic_writereg(dev, phy, offset, val); 11976 11977 release: 11978 /* Release semaphore */ 11979 sc->phy.release(sc); 11980 return rv; 11981 } 11982 11983 /* 11984 * wm_gmii_statchg: [mii interface function] 11985 * 11986 * Callback from MII layer when media changes. 11987 */ 11988 static void 11989 wm_gmii_statchg(struct ifnet *ifp) 11990 { 11991 struct wm_softc *sc = ifp->if_softc; 11992 struct mii_data *mii = &sc->sc_mii; 11993 11994 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE); 11995 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 11996 sc->sc_fcrtl &= ~FCRTL_XONE; 11997 11998 /* Get flow control negotiation result. */ 11999 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 12000 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { 12001 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 12002 mii->mii_media_active &= ~IFM_ETH_FMASK; 12003 } 12004 12005 if (sc->sc_flowflags & IFM_FLOW) { 12006 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) { 12007 sc->sc_ctrl |= CTRL_TFCE; 12008 sc->sc_fcrtl |= FCRTL_XONE; 12009 } 12010 if (sc->sc_flowflags & IFM_ETH_RXPAUSE) 12011 sc->sc_ctrl |= CTRL_RFCE; 12012 } 12013 12014 if (mii->mii_media_active & IFM_FDX) { 12015 DPRINTF(sc, WM_DEBUG_LINK, 12016 ("%s: LINK: statchg: FDX\n", ifp->if_xname)); 12017 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 12018 } else { 12019 DPRINTF(sc, WM_DEBUG_LINK, 12020 ("%s: LINK: statchg: HDX\n", ifp->if_xname)); 12021 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 12022 } 12023 12024 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 12025 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 12026 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL 12027 : WMREG_FCRTL, sc->sc_fcrtl); 12028 if (sc->sc_type == WM_T_80003) { 12029 switch (IFM_SUBTYPE(mii->mii_media_active)) { 12030 case IFM_1000_T: 12031 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL, 12032 KUMCTRLSTA_HD_CTRL_1000_DEFAULT); 12033 sc->sc_tipg = TIPG_1000T_80003_DFLT; 12034 break; 12035 default: 12036 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL, 12037 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT); 12038 sc->sc_tipg = TIPG_10_100_80003_DFLT; 12039 break; 12040 } 12041 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg); 12042 } 12043 } 12044 12045 /* kumeran related (80003, ICH* and PCH*) */ 12046 12047 /* 12048 * wm_kmrn_readreg: 12049 * 12050 * Read a kumeran register 12051 */ 12052 static int 12053 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val) 12054 { 12055 int rv; 12056 12057 if (sc->sc_type == WM_T_80003) 12058 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM); 12059 else 12060 rv = sc->phy.acquire(sc); 12061 if (rv != 0) { 12062 device_printf(sc->sc_dev, "%s: failed to get semaphore\n", 12063 __func__); 12064 return rv; 12065 } 12066 12067 rv = wm_kmrn_readreg_locked(sc, reg, val); 12068 12069 if (sc->sc_type == WM_T_80003) 12070 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM); 12071 else 12072 sc->phy.release(sc); 12073 12074 return rv; 12075 } 12076 12077 static int 12078 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val) 12079 { 12080 12081 CSR_WRITE(sc, WMREG_KUMCTRLSTA, 12082 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | 12083 KUMCTRLSTA_REN); 12084 CSR_WRITE_FLUSH(sc); 12085 delay(2); 12086 12087 *val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK; 12088 12089 return 0; 12090 } 12091 12092 /* 12093 * wm_kmrn_writereg: 12094 * 12095 * Write a kumeran register 12096 */ 12097 static int 12098 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val) 12099 { 12100 int rv; 12101 12102 if (sc->sc_type == WM_T_80003) 12103 rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM); 12104 else 12105 rv = sc->phy.acquire(sc); 12106 if (rv != 0) { 12107 device_printf(sc->sc_dev, "%s: failed to get semaphore\n", 12108 __func__); 12109 return rv; 12110 } 12111 12112 rv = wm_kmrn_writereg_locked(sc, reg, val); 12113 12114 if (sc->sc_type == WM_T_80003) 12115 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM); 12116 else 12117 sc->phy.release(sc); 12118 12119 return rv; 12120 } 12121 12122 static int 12123 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val) 12124 { 12125 12126 CSR_WRITE(sc, WMREG_KUMCTRLSTA, 12127 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val); 12128 12129 return 0; 12130 } 12131 12132 /* 12133 * EMI register related (82579, WMPHY_I217(PCH2 and newer)) 12134 * This access method is different from IEEE MMD. 12135 */ 12136 static int 12137 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd) 12138 { 12139 struct wm_softc *sc = device_private(dev); 12140 int rv; 12141 12142 rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg); 12143 if (rv != 0) 12144 return rv; 12145 12146 if (rd) 12147 rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val); 12148 else 12149 rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val); 12150 return rv; 12151 } 12152 12153 static int 12154 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val) 12155 { 12156 12157 return wm_access_emi_reg_locked(dev, reg, val, true); 12158 } 12159 12160 static int 12161 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val) 12162 { 12163 12164 return wm_access_emi_reg_locked(dev, reg, &val, false); 12165 } 12166 12167 /* SGMII related */ 12168 12169 /* 12170 * wm_sgmii_uses_mdio 12171 * 12172 * Check whether the transaction is to the internal PHY or the external 12173 * MDIO interface. Return true if it's MDIO. 12174 */ 12175 static bool 12176 wm_sgmii_uses_mdio(struct wm_softc *sc) 12177 { 12178 uint32_t reg; 12179 bool ismdio = false; 12180 12181 switch (sc->sc_type) { 12182 case WM_T_82575: 12183 case WM_T_82576: 12184 reg = CSR_READ(sc, WMREG_MDIC); 12185 ismdio = ((reg & MDIC_DEST) != 0); 12186 break; 12187 case WM_T_82580: 12188 case WM_T_I350: 12189 case WM_T_I354: 12190 case WM_T_I210: 12191 case WM_T_I211: 12192 reg = CSR_READ(sc, WMREG_MDICNFG); 12193 ismdio = ((reg & MDICNFG_DEST) != 0); 12194 break; 12195 default: 12196 break; 12197 } 12198 12199 return ismdio; 12200 } 12201 12202 /* Setup internal SGMII PHY for SFP */ 12203 static void 12204 wm_sgmii_sfp_preconfig(struct wm_softc *sc) 12205 { 12206 uint16_t id1, id2, phyreg; 12207 int i, rv; 12208 12209 if (((sc->sc_flags & WM_F_SGMII) == 0) 12210 || ((sc->sc_flags & WM_F_SFP) == 0)) 12211 return; 12212 12213 for (i = 0; i < MII_NPHY; i++) { 12214 sc->phy.no_errprint = true; 12215 rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1); 12216 if (rv != 0) 12217 continue; 12218 rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2); 12219 if (rv != 0) 12220 continue; 12221 if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL) 12222 continue; 12223 sc->phy.no_errprint = false; 12224 12225 sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg); 12226 phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE); 12227 phyreg |= ESSR_SGMII_WOC_COPPER; 12228 sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg); 12229 break; 12230 } 12231 12232 } 12233 12234 /* 12235 * wm_sgmii_readreg: [mii interface function] 12236 * 12237 * Read a PHY register on the SGMII 12238 * This could be handled by the PHY layer if we didn't have to lock the 12239 * resource ... 12240 */ 12241 static int 12242 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val) 12243 { 12244 struct wm_softc *sc = device_private(dev); 12245 int rv; 12246 12247 if (sc->phy.acquire(sc)) { 12248 device_printf(dev, "%s: failed to get semaphore\n", __func__); 12249 return -1; 12250 } 12251 12252 rv = wm_sgmii_readreg_locked(dev, phy, reg, val); 12253 12254 sc->phy.release(sc); 12255 return rv; 12256 } 12257 12258 static int 12259 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val) 12260 { 12261 struct wm_softc *sc = device_private(dev); 12262 uint32_t i2ccmd; 12263 int i, rv = 0; 12264 12265 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT) 12266 | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ; 12267 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd); 12268 12269 /* Poll the ready bit */ 12270 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) { 12271 delay(50); 12272 i2ccmd = CSR_READ(sc, WMREG_I2CCMD); 12273 if (i2ccmd & I2CCMD_READY) 12274 break; 12275 } 12276 if ((i2ccmd & I2CCMD_READY) == 0) { 12277 device_printf(dev, "I2CCMD Read did not complete\n"); 12278 rv = ETIMEDOUT; 12279 } 12280 if ((i2ccmd & I2CCMD_ERROR) != 0) { 12281 if (!sc->phy.no_errprint) 12282 device_printf(dev, "I2CCMD Error bit set\n"); 12283 rv = EIO; 12284 } 12285 12286 *val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00); 12287 12288 return rv; 12289 } 12290 12291 /* 12292 * wm_sgmii_writereg: [mii interface function] 12293 * 12294 * Write a PHY register on the SGMII. 12295 * This could be handled by the PHY layer if we didn't have to lock the 12296 * resource ... 12297 */ 12298 static int 12299 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val) 12300 { 12301 struct wm_softc *sc = device_private(dev); 12302 int rv; 12303 12304 if (sc->phy.acquire(sc) != 0) { 12305 device_printf(dev, "%s: failed to get semaphore\n", __func__); 12306 return -1; 12307 } 12308 12309 rv = wm_sgmii_writereg_locked(dev, phy, reg, val); 12310 12311 sc->phy.release(sc); 12312 12313 return rv; 12314 } 12315 12316 static int 12317 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val) 12318 { 12319 struct wm_softc *sc = device_private(dev); 12320 uint32_t i2ccmd; 12321 uint16_t swapdata; 12322 int rv = 0; 12323 int i; 12324 12325 /* Swap the data bytes for the I2C interface */ 12326 swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00); 12327 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT) 12328 | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata; 12329 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd); 12330 12331 /* Poll the ready bit */ 12332 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) { 12333 delay(50); 12334 i2ccmd = CSR_READ(sc, WMREG_I2CCMD); 12335 if (i2ccmd & I2CCMD_READY) 12336 break; 12337 } 12338 if ((i2ccmd & I2CCMD_READY) == 0) { 12339 device_printf(dev, "I2CCMD Write did not complete\n"); 12340 rv = ETIMEDOUT; 12341 } 12342 if ((i2ccmd & I2CCMD_ERROR) != 0) { 12343 device_printf(dev, "I2CCMD Error bit set\n"); 12344 rv = EIO; 12345 } 12346 12347 return rv; 12348 } 12349 12350 /* TBI related */ 12351 12352 static bool 12353 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl) 12354 { 12355 bool sig; 12356 12357 sig = ctrl & CTRL_SWDPIN(1); 12358 12359 /* 12360 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics 12361 * detect a signal, 1 if they don't. 12362 */ 12363 if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544)) 12364 sig = !sig; 12365 12366 return sig; 12367 } 12368 12369 /* 12370 * wm_tbi_mediainit: 12371 * 12372 * Initialize media for use on 1000BASE-X devices. 12373 */ 12374 static void 12375 wm_tbi_mediainit(struct wm_softc *sc) 12376 { 12377 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 12378 const char *sep = ""; 12379 12380 if (sc->sc_type < WM_T_82543) 12381 sc->sc_tipg = TIPG_WM_DFLT; 12382 else 12383 sc->sc_tipg = TIPG_LG_DFLT; 12384 12385 sc->sc_tbi_serdes_anegticks = 5; 12386 12387 /* Initialize our media structures */ 12388 sc->sc_mii.mii_ifp = ifp; 12389 sc->sc_ethercom.ec_mii = &sc->sc_mii; 12390 12391 ifp->if_baudrate = IF_Gbps(1); 12392 if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) 12393 && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) { 12394 ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK, 12395 wm_serdes_mediachange, wm_serdes_mediastatus, 12396 sc->sc_core_lock); 12397 } else { 12398 ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK, 12399 wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock); 12400 } 12401 12402 /* 12403 * SWD Pins: 12404 * 12405 * 0 = Link LED (output) 12406 * 1 = Loss Of Signal (input) 12407 */ 12408 sc->sc_ctrl |= CTRL_SWDPIO(0); 12409 12410 /* XXX Perhaps this is only for TBI */ 12411 if (sc->sc_mediatype != WM_MEDIATYPE_SERDES) 12412 sc->sc_ctrl &= ~CTRL_SWDPIO(1); 12413 12414 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) 12415 sc->sc_ctrl &= ~CTRL_LRST; 12416 12417 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 12418 12419 #define ADD(ss, mm, dd) \ 12420 do { \ 12421 aprint_normal("%s%s", sep, ss); \ 12422 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \ 12423 sep = ", "; \ 12424 } while (/*CONSTCOND*/0) 12425 12426 aprint_normal_dev(sc->sc_dev, ""); 12427 12428 if (sc->sc_type == WM_T_I354) { 12429 uint32_t status; 12430 12431 status = CSR_READ(sc, WMREG_STATUS); 12432 if (((status & STATUS_2P5_SKU) != 0) 12433 && ((status & STATUS_2P5_SKU_OVER) == 0)) { 12434 ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD); 12435 } else 12436 ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD); 12437 } else if (sc->sc_type == WM_T_82545) { 12438 /* Only 82545 is LX (XXX except SFP) */ 12439 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD); 12440 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD); 12441 } else if (sc->sc_sfptype != 0) { 12442 /* XXX wm(4) fiber/serdes don't use ifm_data */ 12443 switch (sc->sc_sfptype) { 12444 default: 12445 case SFF_SFP_ETH_FLAGS_1000SX: 12446 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD); 12447 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD); 12448 break; 12449 case SFF_SFP_ETH_FLAGS_1000LX: 12450 ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD); 12451 ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD); 12452 break; 12453 case SFF_SFP_ETH_FLAGS_1000CX: 12454 ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD); 12455 ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD); 12456 break; 12457 case SFF_SFP_ETH_FLAGS_1000T: 12458 ADD("1000baseT", IFM_1000_T, 0); 12459 ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0); 12460 break; 12461 case SFF_SFP_ETH_FLAGS_100FX: 12462 ADD("100baseFX", IFM_100_FX, ANAR_TX); 12463 ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD); 12464 break; 12465 } 12466 } else { 12467 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD); 12468 ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD); 12469 } 12470 ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD); 12471 aprint_normal("\n"); 12472 12473 #undef ADD 12474 12475 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 12476 } 12477 12478 /* 12479 * wm_tbi_mediachange: [ifmedia interface function] 12480 * 12481 * Set hardware to newly-selected media on a 1000BASE-X device. 12482 */ 12483 static int 12484 wm_tbi_mediachange(struct ifnet *ifp) 12485 { 12486 struct wm_softc *sc = ifp->if_softc; 12487 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 12488 uint32_t status, ctrl; 12489 bool signal; 12490 int i; 12491 12492 KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER); 12493 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) { 12494 /* XXX need some work for >= 82571 and < 82575 */ 12495 if (sc->sc_type < WM_T_82575) 12496 return 0; 12497 } 12498 12499 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572) 12500 || (sc->sc_type >= WM_T_82575)) 12501 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK); 12502 12503 sc->sc_ctrl &= ~CTRL_LRST; 12504 sc->sc_txcw = TXCW_ANE; 12505 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) 12506 sc->sc_txcw |= TXCW_FD | TXCW_HD; 12507 else if (ife->ifm_media & IFM_FDX) 12508 sc->sc_txcw |= TXCW_FD; 12509 else 12510 sc->sc_txcw |= TXCW_HD; 12511 12512 if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0) 12513 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE; 12514 12515 DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n", 12516 device_xname(sc->sc_dev), sc->sc_txcw)); 12517 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); 12518 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 12519 CSR_WRITE_FLUSH(sc); 12520 delay(1000); 12521 12522 ctrl = CSR_READ(sc, WMREG_CTRL); 12523 signal = wm_tbi_havesignal(sc, ctrl); 12524 12525 DPRINTF(sc, WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev), 12526 signal)); 12527 12528 if (signal) { 12529 /* Have signal; wait for the link to come up. */ 12530 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) { 12531 delay(10000); 12532 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU) 12533 break; 12534 } 12535 12536 DPRINTF(sc, WM_DEBUG_LINK,("%s: i = %d after waiting for link\n", 12537 device_xname(sc->sc_dev), i)); 12538 12539 status = CSR_READ(sc, WMREG_STATUS); 12540 DPRINTF(sc, WM_DEBUG_LINK, 12541 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n", 12542 device_xname(sc->sc_dev), status, (uint32_t)STATUS_LU)); 12543 if (status & STATUS_LU) { 12544 /* Link is up. */ 12545 DPRINTF(sc, WM_DEBUG_LINK, 12546 ("%s: LINK: set media -> link up %s\n", 12547 device_xname(sc->sc_dev), 12548 (status & STATUS_FD) ? "FDX" : "HDX")); 12549 12550 /* 12551 * NOTE: CTRL will update TFCE and RFCE automatically, 12552 * so we should update sc->sc_ctrl 12553 */ 12554 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL); 12555 sc->sc_tctl &= ~TCTL_COLD(0x3ff); 12556 sc->sc_fcrtl &= ~FCRTL_XONE; 12557 if (status & STATUS_FD) 12558 sc->sc_tctl |= 12559 TCTL_COLD(TX_COLLISION_DISTANCE_FDX); 12560 else 12561 sc->sc_tctl |= 12562 TCTL_COLD(TX_COLLISION_DISTANCE_HDX); 12563 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE) 12564 sc->sc_fcrtl |= FCRTL_XONE; 12565 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl); 12566 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? 12567 WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl); 12568 sc->sc_tbi_linkup = 1; 12569 } else { 12570 if (i == WM_LINKUP_TIMEOUT) 12571 wm_check_for_link(sc); 12572 /* Link is down. */ 12573 DPRINTF(sc, WM_DEBUG_LINK, 12574 ("%s: LINK: set media -> link down\n", 12575 device_xname(sc->sc_dev))); 12576 sc->sc_tbi_linkup = 0; 12577 } 12578 } else { 12579 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n", 12580 device_xname(sc->sc_dev))); 12581 sc->sc_tbi_linkup = 0; 12582 } 12583 12584 wm_tbi_serdes_set_linkled(sc); 12585 12586 return 0; 12587 } 12588 12589 /* 12590 * wm_tbi_mediastatus: [ifmedia interface function] 12591 * 12592 * Get the current interface media status on a 1000BASE-X device. 12593 */ 12594 static void 12595 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 12596 { 12597 struct wm_softc *sc = ifp->if_softc; 12598 uint32_t ctrl, status; 12599 12600 ifmr->ifm_status = IFM_AVALID; 12601 ifmr->ifm_active = IFM_ETHER; 12602 12603 status = CSR_READ(sc, WMREG_STATUS); 12604 if ((status & STATUS_LU) == 0) { 12605 ifmr->ifm_active |= IFM_NONE; 12606 return; 12607 } 12608 12609 ifmr->ifm_status |= IFM_ACTIVE; 12610 /* Only 82545 is LX */ 12611 if (sc->sc_type == WM_T_82545) 12612 ifmr->ifm_active |= IFM_1000_LX; 12613 else 12614 ifmr->ifm_active |= IFM_1000_SX; 12615 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD) 12616 ifmr->ifm_active |= IFM_FDX; 12617 else 12618 ifmr->ifm_active |= IFM_HDX; 12619 ctrl = CSR_READ(sc, WMREG_CTRL); 12620 if (ctrl & CTRL_RFCE) 12621 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE; 12622 if (ctrl & CTRL_TFCE) 12623 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE; 12624 } 12625 12626 /* XXX TBI only */ 12627 static int 12628 wm_check_for_link(struct wm_softc *sc) 12629 { 12630 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur; 12631 uint32_t rxcw; 12632 uint32_t ctrl; 12633 uint32_t status; 12634 bool signal; 12635 12636 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n", 12637 device_xname(sc->sc_dev), __func__)); 12638 12639 if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) { 12640 /* XXX need some work for >= 82571 */ 12641 if (sc->sc_type >= WM_T_82571) { 12642 sc->sc_tbi_linkup = 1; 12643 return 0; 12644 } 12645 } 12646 12647 rxcw = CSR_READ(sc, WMREG_RXCW); 12648 ctrl = CSR_READ(sc, WMREG_CTRL); 12649 status = CSR_READ(sc, WMREG_STATUS); 12650 signal = wm_tbi_havesignal(sc, ctrl); 12651 12652 DPRINTF(sc, WM_DEBUG_LINK, 12653 ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n", 12654 device_xname(sc->sc_dev), __func__, signal, 12655 ((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0))); 12656 12657 /* 12658 * SWDPIN LU RXCW 12659 * 0 0 0 12660 * 0 0 1 (should not happen) 12661 * 0 1 0 (should not happen) 12662 * 0 1 1 (should not happen) 12663 * 1 0 0 Disable autonego and force linkup 12664 * 1 0 1 got /C/ but not linkup yet 12665 * 1 1 0 (linkup) 12666 * 1 1 1 If IFM_AUTO, back to autonego 12667 * 12668 */ 12669 if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) { 12670 DPRINTF(sc, WM_DEBUG_LINK, 12671 ("%s: %s: force linkup and fullduplex\n", 12672 device_xname(sc->sc_dev), __func__)); 12673 sc->sc_tbi_linkup = 0; 12674 /* Disable auto-negotiation in the TXCW register */ 12675 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE)); 12676 12677 /* 12678 * Force link-up and also force full-duplex. 12679 * 12680 * NOTE: CTRL was updated TFCE and RFCE automatically, 12681 * so we should update sc->sc_ctrl 12682 */ 12683 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD; 12684 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 12685 } else if (((status & STATUS_LU) != 0) 12686 && ((rxcw & RXCW_C) != 0) 12687 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) { 12688 sc->sc_tbi_linkup = 1; 12689 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n", 12690 device_xname(sc->sc_dev), 12691 __func__)); 12692 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); 12693 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU)); 12694 } else if (signal && ((rxcw & RXCW_C) != 0)) { 12695 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/", 12696 device_xname(sc->sc_dev), __func__)); 12697 } else { 12698 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n", 12699 device_xname(sc->sc_dev), __func__, rxcw, ctrl, 12700 status)); 12701 } 12702 12703 return 0; 12704 } 12705 12706 /* 12707 * wm_tbi_tick: 12708 * 12709 * Check the link on TBI devices. 12710 * This function acts as mii_tick(). 12711 */ 12712 static void 12713 wm_tbi_tick(struct wm_softc *sc) 12714 { 12715 struct mii_data *mii = &sc->sc_mii; 12716 struct ifmedia_entry *ife = mii->mii_media.ifm_cur; 12717 uint32_t status; 12718 12719 KASSERT(WM_CORE_LOCKED(sc)); 12720 12721 status = CSR_READ(sc, WMREG_STATUS); 12722 12723 /* XXX is this needed? */ 12724 (void)CSR_READ(sc, WMREG_RXCW); 12725 (void)CSR_READ(sc, WMREG_CTRL); 12726 12727 /* set link status */ 12728 if ((status & STATUS_LU) == 0) { 12729 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n", 12730 device_xname(sc->sc_dev))); 12731 sc->sc_tbi_linkup = 0; 12732 } else if (sc->sc_tbi_linkup == 0) { 12733 DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n", 12734 device_xname(sc->sc_dev), 12735 (status & STATUS_FD) ? "FDX" : "HDX")); 12736 sc->sc_tbi_linkup = 1; 12737 sc->sc_tbi_serdes_ticks = 0; 12738 } 12739 12740 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0) 12741 goto setled; 12742 12743 if ((status & STATUS_LU) == 0) { 12744 sc->sc_tbi_linkup = 0; 12745 /* If the timer expired, retry autonegotiation */ 12746 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) 12747 && (++sc->sc_tbi_serdes_ticks 12748 >= sc->sc_tbi_serdes_anegticks)) { 12749 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n", 12750 device_xname(sc->sc_dev), __func__)); 12751 sc->sc_tbi_serdes_ticks = 0; 12752 /* 12753 * Reset the link, and let autonegotiation do 12754 * its thing 12755 */ 12756 sc->sc_ctrl |= CTRL_LRST; 12757 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 12758 CSR_WRITE_FLUSH(sc); 12759 delay(1000); 12760 sc->sc_ctrl &= ~CTRL_LRST; 12761 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 12762 CSR_WRITE_FLUSH(sc); 12763 delay(1000); 12764 CSR_WRITE(sc, WMREG_TXCW, 12765 sc->sc_txcw & ~TXCW_ANE); 12766 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw); 12767 } 12768 } 12769 12770 setled: 12771 wm_tbi_serdes_set_linkled(sc); 12772 } 12773 12774 /* SERDES related */ 12775 static void 12776 wm_serdes_power_up_link_82575(struct wm_softc *sc) 12777 { 12778 uint32_t reg; 12779 12780 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES) 12781 && ((sc->sc_flags & WM_F_SGMII) == 0)) 12782 return; 12783 12784 /* Enable PCS to turn on link */ 12785 reg = CSR_READ(sc, WMREG_PCS_CFG); 12786 reg |= PCS_CFG_PCS_EN; 12787 CSR_WRITE(sc, WMREG_PCS_CFG, reg); 12788 12789 /* Power up the laser */ 12790 reg = CSR_READ(sc, WMREG_CTRL_EXT); 12791 reg &= ~CTRL_EXT_SWDPIN(3); 12792 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 12793 12794 /* Flush the write to verify completion */ 12795 CSR_WRITE_FLUSH(sc); 12796 delay(1000); 12797 } 12798 12799 static int 12800 wm_serdes_mediachange(struct ifnet *ifp) 12801 { 12802 struct wm_softc *sc = ifp->if_softc; 12803 bool pcs_autoneg = true; /* XXX */ 12804 uint32_t ctrl_ext, pcs_lctl, reg; 12805 12806 if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES) 12807 && ((sc->sc_flags & WM_F_SGMII) == 0)) 12808 return 0; 12809 12810 /* XXX Currently, this function is not called on 8257[12] */ 12811 if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572) 12812 || (sc->sc_type >= WM_T_82575)) 12813 CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK); 12814 12815 /* Power on the sfp cage if present */ 12816 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT); 12817 ctrl_ext &= ~CTRL_EXT_SWDPIN(3); 12818 ctrl_ext |= CTRL_EXT_I2C_ENA; 12819 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext); 12820 12821 sc->sc_ctrl |= CTRL_SLU; 12822 12823 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) { 12824 sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1); 12825 12826 reg = CSR_READ(sc, WMREG_CONNSW); 12827 reg |= CONNSW_ENRGSRC; 12828 CSR_WRITE(sc, WMREG_CONNSW, reg); 12829 } 12830 12831 pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL); 12832 switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) { 12833 case CTRL_EXT_LINK_MODE_SGMII: 12834 /* SGMII mode lets the phy handle forcing speed/duplex */ 12835 pcs_autoneg = true; 12836 /* Autoneg time out should be disabled for SGMII mode */ 12837 pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT; 12838 break; 12839 case CTRL_EXT_LINK_MODE_1000KX: 12840 pcs_autoneg = false; 12841 /* FALLTHROUGH */ 12842 default: 12843 if ((sc->sc_type == WM_T_82575) 12844 || (sc->sc_type == WM_T_82576)) { 12845 if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0) 12846 pcs_autoneg = false; 12847 } 12848 sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD 12849 | CTRL_FRCFDX; 12850 12851 /* Set speed of 1000/Full if speed/duplex is forced */ 12852 pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL; 12853 } 12854 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl); 12855 12856 pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP | 12857 PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK); 12858 12859 if (pcs_autoneg) { 12860 /* Set PCS register for autoneg */ 12861 pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART; 12862 12863 /* Disable force flow control for autoneg */ 12864 pcs_lctl &= ~PCS_LCTL_FORCE_FC; 12865 12866 /* Configure flow control advertisement for autoneg */ 12867 reg = CSR_READ(sc, WMREG_PCS_ANADV); 12868 reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE); 12869 reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE; 12870 CSR_WRITE(sc, WMREG_PCS_ANADV, reg); 12871 } else 12872 pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC; 12873 12874 CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl); 12875 12876 return 0; 12877 } 12878 12879 static void 12880 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 12881 { 12882 struct wm_softc *sc = ifp->if_softc; 12883 struct mii_data *mii = &sc->sc_mii; 12884 struct ifmedia_entry *ife = mii->mii_media.ifm_cur; 12885 uint32_t pcs_adv, pcs_lpab, reg; 12886 12887 ifmr->ifm_status = IFM_AVALID; 12888 ifmr->ifm_active = IFM_ETHER; 12889 12890 /* Check PCS */ 12891 reg = CSR_READ(sc, WMREG_PCS_LSTS); 12892 if ((reg & PCS_LSTS_LINKOK) == 0) { 12893 ifmr->ifm_active |= IFM_NONE; 12894 sc->sc_tbi_linkup = 0; 12895 goto setled; 12896 } 12897 12898 sc->sc_tbi_linkup = 1; 12899 ifmr->ifm_status |= IFM_ACTIVE; 12900 if (sc->sc_type == WM_T_I354) { 12901 uint32_t status; 12902 12903 status = CSR_READ(sc, WMREG_STATUS); 12904 if (((status & STATUS_2P5_SKU) != 0) 12905 && ((status & STATUS_2P5_SKU_OVER) == 0)) { 12906 ifmr->ifm_active |= IFM_2500_KX; 12907 } else 12908 ifmr->ifm_active |= IFM_1000_KX; 12909 } else { 12910 switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) { 12911 case PCS_LSTS_SPEED_10: 12912 ifmr->ifm_active |= IFM_10_T; /* XXX */ 12913 break; 12914 case PCS_LSTS_SPEED_100: 12915 ifmr->ifm_active |= IFM_100_FX; /* XXX */ 12916 break; 12917 case PCS_LSTS_SPEED_1000: 12918 ifmr->ifm_active |= IFM_1000_SX; /* XXX */ 12919 break; 12920 default: 12921 device_printf(sc->sc_dev, "Unknown speed\n"); 12922 ifmr->ifm_active |= IFM_1000_SX; /* XXX */ 12923 break; 12924 } 12925 } 12926 ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active); 12927 if ((reg & PCS_LSTS_FDX) != 0) 12928 ifmr->ifm_active |= IFM_FDX; 12929 else 12930 ifmr->ifm_active |= IFM_HDX; 12931 mii->mii_media_active &= ~IFM_ETH_FMASK; 12932 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { 12933 /* Check flow */ 12934 reg = CSR_READ(sc, WMREG_PCS_LSTS); 12935 if ((reg & PCS_LSTS_AN_COMP) == 0) { 12936 DPRINTF(sc, WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n")); 12937 goto setled; 12938 } 12939 pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV); 12940 pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB); 12941 DPRINTF(sc, WM_DEBUG_LINK, 12942 ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab)); 12943 if ((pcs_adv & TXCW_SYM_PAUSE) 12944 && (pcs_lpab & TXCW_SYM_PAUSE)) { 12945 mii->mii_media_active |= IFM_FLOW 12946 | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 12947 } else if (((pcs_adv & TXCW_SYM_PAUSE) == 0) 12948 && (pcs_adv & TXCW_ASYM_PAUSE) 12949 && (pcs_lpab & TXCW_SYM_PAUSE) 12950 && (pcs_lpab & TXCW_ASYM_PAUSE)) { 12951 mii->mii_media_active |= IFM_FLOW 12952 | IFM_ETH_TXPAUSE; 12953 } else if ((pcs_adv & TXCW_SYM_PAUSE) 12954 && (pcs_adv & TXCW_ASYM_PAUSE) 12955 && ((pcs_lpab & TXCW_SYM_PAUSE) == 0) 12956 && (pcs_lpab & TXCW_ASYM_PAUSE)) { 12957 mii->mii_media_active |= IFM_FLOW 12958 | IFM_ETH_RXPAUSE; 12959 } 12960 } 12961 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) 12962 | (mii->mii_media_active & IFM_ETH_FMASK); 12963 setled: 12964 wm_tbi_serdes_set_linkled(sc); 12965 } 12966 12967 /* 12968 * wm_serdes_tick: 12969 * 12970 * Check the link on serdes devices. 12971 */ 12972 static void 12973 wm_serdes_tick(struct wm_softc *sc) 12974 { 12975 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 12976 struct mii_data *mii = &sc->sc_mii; 12977 struct ifmedia_entry *ife = mii->mii_media.ifm_cur; 12978 uint32_t reg; 12979 12980 KASSERT(WM_CORE_LOCKED(sc)); 12981 12982 mii->mii_media_status = IFM_AVALID; 12983 mii->mii_media_active = IFM_ETHER; 12984 12985 /* Check PCS */ 12986 reg = CSR_READ(sc, WMREG_PCS_LSTS); 12987 if ((reg & PCS_LSTS_LINKOK) != 0) { 12988 mii->mii_media_status |= IFM_ACTIVE; 12989 sc->sc_tbi_linkup = 1; 12990 sc->sc_tbi_serdes_ticks = 0; 12991 mii->mii_media_active |= IFM_1000_SX; /* XXX */ 12992 if ((reg & PCS_LSTS_FDX) != 0) 12993 mii->mii_media_active |= IFM_FDX; 12994 else 12995 mii->mii_media_active |= IFM_HDX; 12996 } else { 12997 mii->mii_media_status |= IFM_NONE; 12998 sc->sc_tbi_linkup = 0; 12999 /* If the timer expired, retry autonegotiation */ 13000 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) 13001 && (++sc->sc_tbi_serdes_ticks 13002 >= sc->sc_tbi_serdes_anegticks)) { 13003 DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n", 13004 device_xname(sc->sc_dev), __func__)); 13005 sc->sc_tbi_serdes_ticks = 0; 13006 /* XXX */ 13007 wm_serdes_mediachange(ifp); 13008 } 13009 } 13010 13011 wm_tbi_serdes_set_linkled(sc); 13012 } 13013 13014 /* SFP related */ 13015 13016 static int 13017 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data) 13018 { 13019 uint32_t i2ccmd; 13020 int i; 13021 13022 i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ; 13023 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd); 13024 13025 /* Poll the ready bit */ 13026 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) { 13027 delay(50); 13028 i2ccmd = CSR_READ(sc, WMREG_I2CCMD); 13029 if (i2ccmd & I2CCMD_READY) 13030 break; 13031 } 13032 if ((i2ccmd & I2CCMD_READY) == 0) 13033 return -1; 13034 if ((i2ccmd & I2CCMD_ERROR) != 0) 13035 return -1; 13036 13037 *data = i2ccmd & 0x00ff; 13038 13039 return 0; 13040 } 13041 13042 static uint32_t 13043 wm_sfp_get_media_type(struct wm_softc *sc) 13044 { 13045 uint32_t ctrl_ext; 13046 uint8_t val = 0; 13047 int timeout = 3; 13048 uint32_t mediatype = WM_MEDIATYPE_UNKNOWN; 13049 int rv = -1; 13050 13051 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT); 13052 ctrl_ext &= ~CTRL_EXT_SWDPIN(3); 13053 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA); 13054 CSR_WRITE_FLUSH(sc); 13055 13056 /* Read SFP module data */ 13057 while (timeout) { 13058 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val); 13059 if (rv == 0) 13060 break; 13061 delay(100*1000); /* XXX too big */ 13062 timeout--; 13063 } 13064 if (rv != 0) 13065 goto out; 13066 13067 switch (val) { 13068 case SFF_SFP_ID_SFF: 13069 aprint_normal_dev(sc->sc_dev, 13070 "Module/Connector soldered to board\n"); 13071 break; 13072 case SFF_SFP_ID_SFP: 13073 sc->sc_flags |= WM_F_SFP; 13074 break; 13075 case SFF_SFP_ID_UNKNOWN: 13076 goto out; 13077 default: 13078 break; 13079 } 13080 13081 rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val); 13082 if (rv != 0) 13083 goto out; 13084 13085 sc->sc_sfptype = val; 13086 if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0) 13087 mediatype = WM_MEDIATYPE_SERDES; 13088 else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) { 13089 sc->sc_flags |= WM_F_SGMII; 13090 mediatype = WM_MEDIATYPE_COPPER; 13091 } else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) { 13092 sc->sc_flags |= WM_F_SGMII; 13093 mediatype = WM_MEDIATYPE_SERDES; 13094 } else { 13095 device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n", 13096 __func__, sc->sc_sfptype); 13097 sc->sc_sfptype = 0; /* XXX unknown */ 13098 } 13099 13100 out: 13101 /* Restore I2C interface setting */ 13102 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext); 13103 13104 return mediatype; 13105 } 13106 13107 /* 13108 * NVM related. 13109 * Microwire, SPI (w/wo EERD) and Flash. 13110 */ 13111 13112 /* Both spi and uwire */ 13113 13114 /* 13115 * wm_eeprom_sendbits: 13116 * 13117 * Send a series of bits to the EEPROM. 13118 */ 13119 static void 13120 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits) 13121 { 13122 uint32_t reg; 13123 int x; 13124 13125 reg = CSR_READ(sc, WMREG_EECD); 13126 13127 for (x = nbits; x > 0; x--) { 13128 if (bits & (1U << (x - 1))) 13129 reg |= EECD_DI; 13130 else 13131 reg &= ~EECD_DI; 13132 CSR_WRITE(sc, WMREG_EECD, reg); 13133 CSR_WRITE_FLUSH(sc); 13134 delay(2); 13135 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK); 13136 CSR_WRITE_FLUSH(sc); 13137 delay(2); 13138 CSR_WRITE(sc, WMREG_EECD, reg); 13139 CSR_WRITE_FLUSH(sc); 13140 delay(2); 13141 } 13142 } 13143 13144 /* 13145 * wm_eeprom_recvbits: 13146 * 13147 * Receive a series of bits from the EEPROM. 13148 */ 13149 static void 13150 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits) 13151 { 13152 uint32_t reg, val; 13153 int x; 13154 13155 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI; 13156 13157 val = 0; 13158 for (x = nbits; x > 0; x--) { 13159 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK); 13160 CSR_WRITE_FLUSH(sc); 13161 delay(2); 13162 if (CSR_READ(sc, WMREG_EECD) & EECD_DO) 13163 val |= (1U << (x - 1)); 13164 CSR_WRITE(sc, WMREG_EECD, reg); 13165 CSR_WRITE_FLUSH(sc); 13166 delay(2); 13167 } 13168 *valp = val; 13169 } 13170 13171 /* Microwire */ 13172 13173 /* 13174 * wm_nvm_read_uwire: 13175 * 13176 * Read a word from the EEPROM using the MicroWire protocol. 13177 */ 13178 static int 13179 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 13180 { 13181 uint32_t reg, val; 13182 int i; 13183 13184 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n", 13185 device_xname(sc->sc_dev), __func__)); 13186 13187 if (sc->nvm.acquire(sc) != 0) 13188 return -1; 13189 13190 for (i = 0; i < wordcnt; i++) { 13191 /* Clear SK and DI. */ 13192 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI); 13193 CSR_WRITE(sc, WMREG_EECD, reg); 13194 13195 /* 13196 * XXX: workaround for a bug in qemu-0.12.x and prior 13197 * and Xen. 13198 * 13199 * We use this workaround only for 82540 because qemu's 13200 * e1000 act as 82540. 13201 */ 13202 if (sc->sc_type == WM_T_82540) { 13203 reg |= EECD_SK; 13204 CSR_WRITE(sc, WMREG_EECD, reg); 13205 reg &= ~EECD_SK; 13206 CSR_WRITE(sc, WMREG_EECD, reg); 13207 CSR_WRITE_FLUSH(sc); 13208 delay(2); 13209 } 13210 /* XXX: end of workaround */ 13211 13212 /* Set CHIP SELECT. */ 13213 reg |= EECD_CS; 13214 CSR_WRITE(sc, WMREG_EECD, reg); 13215 CSR_WRITE_FLUSH(sc); 13216 delay(2); 13217 13218 /* Shift in the READ command. */ 13219 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3); 13220 13221 /* Shift in address. */ 13222 wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits); 13223 13224 /* Shift out the data. */ 13225 wm_eeprom_recvbits(sc, &val, 16); 13226 data[i] = val & 0xffff; 13227 13228 /* Clear CHIP SELECT. */ 13229 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS; 13230 CSR_WRITE(sc, WMREG_EECD, reg); 13231 CSR_WRITE_FLUSH(sc); 13232 delay(2); 13233 } 13234 13235 sc->nvm.release(sc); 13236 return 0; 13237 } 13238 13239 /* SPI */ 13240 13241 /* 13242 * Set SPI and FLASH related information from the EECD register. 13243 * For 82541 and 82547, the word size is taken from EEPROM. 13244 */ 13245 static int 13246 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc) 13247 { 13248 int size; 13249 uint32_t reg; 13250 uint16_t data; 13251 13252 reg = CSR_READ(sc, WMREG_EECD); 13253 sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8; 13254 13255 /* Read the size of NVM from EECD by default */ 13256 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK); 13257 switch (sc->sc_type) { 13258 case WM_T_82541: 13259 case WM_T_82541_2: 13260 case WM_T_82547: 13261 case WM_T_82547_2: 13262 /* Set dummy value to access EEPROM */ 13263 sc->sc_nvm_wordsize = 64; 13264 if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) { 13265 aprint_error_dev(sc->sc_dev, 13266 "%s: failed to read EEPROM size\n", __func__); 13267 } 13268 reg = data; 13269 size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK); 13270 if (size == 0) 13271 size = 6; /* 64 word size */ 13272 else 13273 size += NVM_WORD_SIZE_BASE_SHIFT + 1; 13274 break; 13275 case WM_T_80003: 13276 case WM_T_82571: 13277 case WM_T_82572: 13278 case WM_T_82573: /* SPI case */ 13279 case WM_T_82574: /* SPI case */ 13280 case WM_T_82583: /* SPI case */ 13281 size += NVM_WORD_SIZE_BASE_SHIFT; 13282 if (size > 14) 13283 size = 14; 13284 break; 13285 case WM_T_82575: 13286 case WM_T_82576: 13287 case WM_T_82580: 13288 case WM_T_I350: 13289 case WM_T_I354: 13290 case WM_T_I210: 13291 case WM_T_I211: 13292 size += NVM_WORD_SIZE_BASE_SHIFT; 13293 if (size > 15) 13294 size = 15; 13295 break; 13296 default: 13297 aprint_error_dev(sc->sc_dev, 13298 "%s: unknown device(%d)?\n", __func__, sc->sc_type); 13299 return -1; 13300 break; 13301 } 13302 13303 sc->sc_nvm_wordsize = 1 << size; 13304 13305 return 0; 13306 } 13307 13308 /* 13309 * wm_nvm_ready_spi: 13310 * 13311 * Wait for a SPI EEPROM to be ready for commands. 13312 */ 13313 static int 13314 wm_nvm_ready_spi(struct wm_softc *sc) 13315 { 13316 uint32_t val; 13317 int usec; 13318 13319 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n", 13320 device_xname(sc->sc_dev), __func__)); 13321 13322 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) { 13323 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8); 13324 wm_eeprom_recvbits(sc, &val, 8); 13325 if ((val & SPI_SR_RDY) == 0) 13326 break; 13327 } 13328 if (usec >= SPI_MAX_RETRIES) { 13329 aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n"); 13330 return -1; 13331 } 13332 return 0; 13333 } 13334 13335 /* 13336 * wm_nvm_read_spi: 13337 * 13338 * Read a work from the EEPROM using the SPI protocol. 13339 */ 13340 static int 13341 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 13342 { 13343 uint32_t reg, val; 13344 int i; 13345 uint8_t opc; 13346 int rv = 0; 13347 13348 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n", 13349 device_xname(sc->sc_dev), __func__)); 13350 13351 if (sc->nvm.acquire(sc) != 0) 13352 return -1; 13353 13354 /* Clear SK and CS. */ 13355 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS); 13356 CSR_WRITE(sc, WMREG_EECD, reg); 13357 CSR_WRITE_FLUSH(sc); 13358 delay(2); 13359 13360 if ((rv = wm_nvm_ready_spi(sc)) != 0) 13361 goto out; 13362 13363 /* Toggle CS to flush commands. */ 13364 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS); 13365 CSR_WRITE_FLUSH(sc); 13366 delay(2); 13367 CSR_WRITE(sc, WMREG_EECD, reg); 13368 CSR_WRITE_FLUSH(sc); 13369 delay(2); 13370 13371 opc = SPI_OPC_READ; 13372 if (sc->sc_nvm_addrbits == 8 && word >= 128) 13373 opc |= SPI_OPC_A8; 13374 13375 wm_eeprom_sendbits(sc, opc, 8); 13376 wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits); 13377 13378 for (i = 0; i < wordcnt; i++) { 13379 wm_eeprom_recvbits(sc, &val, 16); 13380 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8); 13381 } 13382 13383 /* Raise CS and clear SK. */ 13384 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS; 13385 CSR_WRITE(sc, WMREG_EECD, reg); 13386 CSR_WRITE_FLUSH(sc); 13387 delay(2); 13388 13389 out: 13390 sc->nvm.release(sc); 13391 return rv; 13392 } 13393 13394 /* Using with EERD */ 13395 13396 static int 13397 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw) 13398 { 13399 uint32_t attempts = 100000; 13400 uint32_t i, reg = 0; 13401 int32_t done = -1; 13402 13403 for (i = 0; i < attempts; i++) { 13404 reg = CSR_READ(sc, rw); 13405 13406 if (reg & EERD_DONE) { 13407 done = 0; 13408 break; 13409 } 13410 delay(5); 13411 } 13412 13413 return done; 13414 } 13415 13416 static int 13417 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data) 13418 { 13419 int i, eerd = 0; 13420 int rv = 0; 13421 13422 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n", 13423 device_xname(sc->sc_dev), __func__)); 13424 13425 if (sc->nvm.acquire(sc) != 0) 13426 return -1; 13427 13428 for (i = 0; i < wordcnt; i++) { 13429 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START; 13430 CSR_WRITE(sc, WMREG_EERD, eerd); 13431 rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD); 13432 if (rv != 0) { 13433 aprint_error_dev(sc->sc_dev, "EERD polling failed: " 13434 "offset=%d. wordcnt=%d\n", offset, wordcnt); 13435 break; 13436 } 13437 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT); 13438 } 13439 13440 sc->nvm.release(sc); 13441 return rv; 13442 } 13443 13444 /* Flash */ 13445 13446 static int 13447 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank) 13448 { 13449 uint32_t eecd; 13450 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1; 13451 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t); 13452 uint32_t nvm_dword = 0; 13453 uint8_t sig_byte = 0; 13454 int rv; 13455 13456 switch (sc->sc_type) { 13457 case WM_T_PCH_SPT: 13458 case WM_T_PCH_CNP: 13459 bank1_offset = sc->sc_ich8_flash_bank_size * 2; 13460 act_offset = ICH_NVM_SIG_WORD * 2; 13461 13462 /* Set bank to 0 in case flash read fails. */ 13463 *bank = 0; 13464 13465 /* Check bank 0 */ 13466 rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword); 13467 if (rv != 0) 13468 return rv; 13469 sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8); 13470 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) { 13471 *bank = 0; 13472 return 0; 13473 } 13474 13475 /* Check bank 1 */ 13476 rv = wm_read_ich8_dword(sc, act_offset + bank1_offset, 13477 &nvm_dword); 13478 sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8); 13479 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) { 13480 *bank = 1; 13481 return 0; 13482 } 13483 aprint_error_dev(sc->sc_dev, 13484 "%s: no valid NVM bank present (%u)\n", __func__, *bank); 13485 return -1; 13486 case WM_T_ICH8: 13487 case WM_T_ICH9: 13488 eecd = CSR_READ(sc, WMREG_EECD); 13489 if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) { 13490 *bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0; 13491 return 0; 13492 } 13493 /* FALLTHROUGH */ 13494 default: 13495 /* Default to 0 */ 13496 *bank = 0; 13497 13498 /* Check bank 0 */ 13499 wm_read_ich8_byte(sc, act_offset, &sig_byte); 13500 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) { 13501 *bank = 0; 13502 return 0; 13503 } 13504 13505 /* Check bank 1 */ 13506 wm_read_ich8_byte(sc, act_offset + bank1_offset, 13507 &sig_byte); 13508 if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) { 13509 *bank = 1; 13510 return 0; 13511 } 13512 } 13513 13514 DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n", 13515 device_xname(sc->sc_dev))); 13516 return -1; 13517 } 13518 13519 /****************************************************************************** 13520 * This function does initial flash setup so that a new read/write/erase cycle 13521 * can be started. 13522 * 13523 * sc - The pointer to the hw structure 13524 ****************************************************************************/ 13525 static int32_t 13526 wm_ich8_cycle_init(struct wm_softc *sc) 13527 { 13528 uint16_t hsfsts; 13529 int32_t error = 1; 13530 int32_t i = 0; 13531 13532 if (sc->sc_type >= WM_T_PCH_SPT) 13533 hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL; 13534 else 13535 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 13536 13537 /* May be check the Flash Des Valid bit in Hw status */ 13538 if ((hsfsts & HSFSTS_FLDVAL) == 0) 13539 return error; 13540 13541 /* Clear FCERR in Hw status by writing 1 */ 13542 /* Clear DAEL in Hw status by writing a 1 */ 13543 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL; 13544 13545 if (sc->sc_type >= WM_T_PCH_SPT) 13546 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL); 13547 else 13548 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); 13549 13550 /* 13551 * Either we should have a hardware SPI cycle in progress bit to check 13552 * against, in order to start a new cycle or FDONE bit should be 13553 * changed in the hardware so that it is 1 after hardware reset, which 13554 * can then be used as an indication whether a cycle is in progress or 13555 * has been completed .. we should also have some software semaphore 13556 * mechanism to guard FDONE or the cycle in progress bit so that two 13557 * threads access to those bits can be sequentiallized or a way so that 13558 * 2 threads don't start the cycle at the same time 13559 */ 13560 13561 if ((hsfsts & HSFSTS_FLINPRO) == 0) { 13562 /* 13563 * There is no cycle running at present, so we can start a 13564 * cycle 13565 */ 13566 13567 /* Begin by setting Flash Cycle Done. */ 13568 hsfsts |= HSFSTS_DONE; 13569 if (sc->sc_type >= WM_T_PCH_SPT) 13570 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, 13571 hsfsts & 0xffffUL); 13572 else 13573 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts); 13574 error = 0; 13575 } else { 13576 /* 13577 * Otherwise poll for sometime so the current cycle has a 13578 * chance to end before giving up. 13579 */ 13580 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) { 13581 if (sc->sc_type >= WM_T_PCH_SPT) 13582 hsfsts = ICH8_FLASH_READ32(sc, 13583 ICH_FLASH_HSFSTS) & 0xffffUL; 13584 else 13585 hsfsts = ICH8_FLASH_READ16(sc, 13586 ICH_FLASH_HSFSTS); 13587 if ((hsfsts & HSFSTS_FLINPRO) == 0) { 13588 error = 0; 13589 break; 13590 } 13591 delay(1); 13592 } 13593 if (error == 0) { 13594 /* 13595 * Successful in waiting for previous cycle to timeout, 13596 * now set the Flash Cycle Done. 13597 */ 13598 hsfsts |= HSFSTS_DONE; 13599 if (sc->sc_type >= WM_T_PCH_SPT) 13600 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, 13601 hsfsts & 0xffffUL); 13602 else 13603 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, 13604 hsfsts); 13605 } 13606 } 13607 return error; 13608 } 13609 13610 /****************************************************************************** 13611 * This function starts a flash cycle and waits for its completion 13612 * 13613 * sc - The pointer to the hw structure 13614 ****************************************************************************/ 13615 static int32_t 13616 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout) 13617 { 13618 uint16_t hsflctl; 13619 uint16_t hsfsts; 13620 int32_t error = 1; 13621 uint32_t i = 0; 13622 13623 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ 13624 if (sc->sc_type >= WM_T_PCH_SPT) 13625 hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16; 13626 else 13627 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL); 13628 hsflctl |= HSFCTL_GO; 13629 if (sc->sc_type >= WM_T_PCH_SPT) 13630 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, 13631 (uint32_t)hsflctl << 16); 13632 else 13633 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl); 13634 13635 /* Wait till FDONE bit is set to 1 */ 13636 do { 13637 if (sc->sc_type >= WM_T_PCH_SPT) 13638 hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) 13639 & 0xffffUL; 13640 else 13641 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS); 13642 if (hsfsts & HSFSTS_DONE) 13643 break; 13644 delay(1); 13645 i++; 13646 } while (i < timeout); 13647 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) 13648 error = 0; 13649 13650 return error; 13651 } 13652 13653 /****************************************************************************** 13654 * Reads a byte or (d)word from the NVM using the ICH8 flash access registers. 13655 * 13656 * sc - The pointer to the hw structure 13657 * index - The index of the byte or word to read. 13658 * size - Size of data to read, 1=byte 2=word, 4=dword 13659 * data - Pointer to the word to store the value read. 13660 *****************************************************************************/ 13661 static int32_t 13662 wm_read_ich8_data(struct wm_softc *sc, uint32_t index, 13663 uint32_t size, uint32_t *data) 13664 { 13665 uint16_t hsfsts; 13666 uint16_t hsflctl; 13667 uint32_t flash_linear_address; 13668 uint32_t flash_data = 0; 13669 int32_t error = 1; 13670 int32_t count = 0; 13671 13672 if (size < 1 || size > 4 || data == 0x0 || 13673 index > ICH_FLASH_LINEAR_ADDR_MASK) 13674 return error; 13675 13676 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) + 13677 sc->sc_ich8_flash_base; 13678 13679 do { 13680 delay(1); 13681 /* Steps */ 13682 error = wm_ich8_cycle_init(sc); 13683 if (error) 13684 break; 13685 13686 if (sc->sc_type >= WM_T_PCH_SPT) 13687 hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) 13688 >> 16; 13689 else 13690 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL); 13691 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 13692 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT) 13693 & HSFCTL_BCOUNT_MASK; 13694 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT; 13695 if (sc->sc_type >= WM_T_PCH_SPT) { 13696 /* 13697 * In SPT, This register is in Lan memory space, not 13698 * flash. Therefore, only 32 bit access is supported. 13699 */ 13700 ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, 13701 (uint32_t)hsflctl << 16); 13702 } else 13703 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl); 13704 13705 /* 13706 * Write the last 24 bits of index into Flash Linear address 13707 * field in Flash Address 13708 */ 13709 /* TODO: TBD maybe check the index against the size of flash */ 13710 13711 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address); 13712 13713 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT); 13714 13715 /* 13716 * Check if FCERR is set to 1, if set to 1, clear it and try 13717 * the whole sequence a few more times, else read in (shift in) 13718 * the Flash Data0, the order is least significant byte first 13719 * msb to lsb 13720 */ 13721 if (error == 0) { 13722 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0); 13723 if (size == 1) 13724 *data = (uint8_t)(flash_data & 0x000000FF); 13725 else if (size == 2) 13726 *data = (uint16_t)(flash_data & 0x0000FFFF); 13727 else if (size == 4) 13728 *data = (uint32_t)flash_data; 13729 break; 13730 } else { 13731 /* 13732 * If we've gotten here, then things are probably 13733 * completely hosed, but if the error condition is 13734 * detected, it won't hurt to give it another try... 13735 * ICH_FLASH_CYCLE_REPEAT_COUNT times. 13736 */ 13737 if (sc->sc_type >= WM_T_PCH_SPT) 13738 hsfsts = ICH8_FLASH_READ32(sc, 13739 ICH_FLASH_HSFSTS) & 0xffffUL; 13740 else 13741 hsfsts = ICH8_FLASH_READ16(sc, 13742 ICH_FLASH_HSFSTS); 13743 13744 if (hsfsts & HSFSTS_ERR) { 13745 /* Repeat for some time before giving up. */ 13746 continue; 13747 } else if ((hsfsts & HSFSTS_DONE) == 0) 13748 break; 13749 } 13750 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); 13751 13752 return error; 13753 } 13754 13755 /****************************************************************************** 13756 * Reads a single byte from the NVM using the ICH8 flash access registers. 13757 * 13758 * sc - pointer to wm_hw structure 13759 * index - The index of the byte to read. 13760 * data - Pointer to a byte to store the value read. 13761 *****************************************************************************/ 13762 static int32_t 13763 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data) 13764 { 13765 int32_t status; 13766 uint32_t word = 0; 13767 13768 status = wm_read_ich8_data(sc, index, 1, &word); 13769 if (status == 0) 13770 *data = (uint8_t)word; 13771 else 13772 *data = 0; 13773 13774 return status; 13775 } 13776 13777 /****************************************************************************** 13778 * Reads a word from the NVM using the ICH8 flash access registers. 13779 * 13780 * sc - pointer to wm_hw structure 13781 * index - The starting byte index of the word to read. 13782 * data - Pointer to a word to store the value read. 13783 *****************************************************************************/ 13784 static int32_t 13785 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data) 13786 { 13787 int32_t status; 13788 uint32_t word = 0; 13789 13790 status = wm_read_ich8_data(sc, index, 2, &word); 13791 if (status == 0) 13792 *data = (uint16_t)word; 13793 else 13794 *data = 0; 13795 13796 return status; 13797 } 13798 13799 /****************************************************************************** 13800 * Reads a dword from the NVM using the ICH8 flash access registers. 13801 * 13802 * sc - pointer to wm_hw structure 13803 * index - The starting byte index of the word to read. 13804 * data - Pointer to a word to store the value read. 13805 *****************************************************************************/ 13806 static int32_t 13807 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data) 13808 { 13809 int32_t status; 13810 13811 status = wm_read_ich8_data(sc, index, 4, data); 13812 return status; 13813 } 13814 13815 /****************************************************************************** 13816 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access 13817 * register. 13818 * 13819 * sc - Struct containing variables accessed by shared code 13820 * offset - offset of word in the EEPROM to read 13821 * data - word read from the EEPROM 13822 * words - number of words to read 13823 *****************************************************************************/ 13824 static int 13825 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data) 13826 { 13827 int32_t rv = 0; 13828 uint32_t flash_bank = 0; 13829 uint32_t act_offset = 0; 13830 uint32_t bank_offset = 0; 13831 uint16_t word = 0; 13832 uint16_t i = 0; 13833 13834 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n", 13835 device_xname(sc->sc_dev), __func__)); 13836 13837 if (sc->nvm.acquire(sc) != 0) 13838 return -1; 13839 13840 /* 13841 * We need to know which is the valid flash bank. In the event 13842 * that we didn't allocate eeprom_shadow_ram, we may not be 13843 * managing flash_bank. So it cannot be trusted and needs 13844 * to be updated with each read. 13845 */ 13846 rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank); 13847 if (rv) { 13848 DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n", 13849 device_xname(sc->sc_dev))); 13850 flash_bank = 0; 13851 } 13852 13853 /* 13854 * Adjust offset appropriately if we're on bank 1 - adjust for word 13855 * size 13856 */ 13857 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2); 13858 13859 for (i = 0; i < words; i++) { 13860 /* The NVM part needs a byte offset, hence * 2 */ 13861 act_offset = bank_offset + ((offset + i) * 2); 13862 rv = wm_read_ich8_word(sc, act_offset, &word); 13863 if (rv) { 13864 aprint_error_dev(sc->sc_dev, 13865 "%s: failed to read NVM\n", __func__); 13866 break; 13867 } 13868 data[i] = word; 13869 } 13870 13871 sc->nvm.release(sc); 13872 return rv; 13873 } 13874 13875 /****************************************************************************** 13876 * Reads a 16 bit word or words from the EEPROM using the SPT's flash access 13877 * register. 13878 * 13879 * sc - Struct containing variables accessed by shared code 13880 * offset - offset of word in the EEPROM to read 13881 * data - word read from the EEPROM 13882 * words - number of words to read 13883 *****************************************************************************/ 13884 static int 13885 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data) 13886 { 13887 int32_t rv = 0; 13888 uint32_t flash_bank = 0; 13889 uint32_t act_offset = 0; 13890 uint32_t bank_offset = 0; 13891 uint32_t dword = 0; 13892 uint16_t i = 0; 13893 13894 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n", 13895 device_xname(sc->sc_dev), __func__)); 13896 13897 if (sc->nvm.acquire(sc) != 0) 13898 return -1; 13899 13900 /* 13901 * We need to know which is the valid flash bank. In the event 13902 * that we didn't allocate eeprom_shadow_ram, we may not be 13903 * managing flash_bank. So it cannot be trusted and needs 13904 * to be updated with each read. 13905 */ 13906 rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank); 13907 if (rv) { 13908 DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n", 13909 device_xname(sc->sc_dev))); 13910 flash_bank = 0; 13911 } 13912 13913 /* 13914 * Adjust offset appropriately if we're on bank 1 - adjust for word 13915 * size 13916 */ 13917 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2); 13918 13919 for (i = 0; i < words; i++) { 13920 /* The NVM part needs a byte offset, hence * 2 */ 13921 act_offset = bank_offset + ((offset + i) * 2); 13922 /* but we must read dword aligned, so mask ... */ 13923 rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword); 13924 if (rv) { 13925 aprint_error_dev(sc->sc_dev, 13926 "%s: failed to read NVM\n", __func__); 13927 break; 13928 } 13929 /* ... and pick out low or high word */ 13930 if ((act_offset & 0x2) == 0) 13931 data[i] = (uint16_t)(dword & 0xFFFF); 13932 else 13933 data[i] = (uint16_t)((dword >> 16) & 0xFFFF); 13934 } 13935 13936 sc->nvm.release(sc); 13937 return rv; 13938 } 13939 13940 /* iNVM */ 13941 13942 static int 13943 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data) 13944 { 13945 int32_t rv = 0; 13946 uint32_t invm_dword; 13947 uint16_t i; 13948 uint8_t record_type, word_address; 13949 13950 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n", 13951 device_xname(sc->sc_dev), __func__)); 13952 13953 for (i = 0; i < INVM_SIZE; i++) { 13954 invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i)); 13955 /* Get record type */ 13956 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); 13957 if (record_type == INVM_UNINITIALIZED_STRUCTURE) 13958 break; 13959 if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE) 13960 i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; 13961 if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE) 13962 i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; 13963 if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) { 13964 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); 13965 if (word_address == address) { 13966 *data = INVM_DWORD_TO_WORD_DATA(invm_dword); 13967 rv = 0; 13968 break; 13969 } 13970 } 13971 } 13972 13973 return rv; 13974 } 13975 13976 static int 13977 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data) 13978 { 13979 int rv = 0; 13980 int i; 13981 13982 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n", 13983 device_xname(sc->sc_dev), __func__)); 13984 13985 if (sc->nvm.acquire(sc) != 0) 13986 return -1; 13987 13988 for (i = 0; i < words; i++) { 13989 switch (offset + i) { 13990 case NVM_OFF_MACADDR: 13991 case NVM_OFF_MACADDR1: 13992 case NVM_OFF_MACADDR2: 13993 rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]); 13994 if (rv != 0) { 13995 data[i] = 0xffff; 13996 rv = -1; 13997 } 13998 break; 13999 case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */ 14000 rv = wm_nvm_read_word_invm(sc, offset, data); 14001 if (rv != 0) { 14002 *data = INVM_DEFAULT_AL; 14003 rv = 0; 14004 } 14005 break; 14006 case NVM_OFF_CFG2: 14007 rv = wm_nvm_read_word_invm(sc, offset, data); 14008 if (rv != 0) { 14009 *data = NVM_INIT_CTRL_2_DEFAULT_I211; 14010 rv = 0; 14011 } 14012 break; 14013 case NVM_OFF_CFG4: 14014 rv = wm_nvm_read_word_invm(sc, offset, data); 14015 if (rv != 0) { 14016 *data = NVM_INIT_CTRL_4_DEFAULT_I211; 14017 rv = 0; 14018 } 14019 break; 14020 case NVM_OFF_LED_1_CFG: 14021 rv = wm_nvm_read_word_invm(sc, offset, data); 14022 if (rv != 0) { 14023 *data = NVM_LED_1_CFG_DEFAULT_I211; 14024 rv = 0; 14025 } 14026 break; 14027 case NVM_OFF_LED_0_2_CFG: 14028 rv = wm_nvm_read_word_invm(sc, offset, data); 14029 if (rv != 0) { 14030 *data = NVM_LED_0_2_CFG_DEFAULT_I211; 14031 rv = 0; 14032 } 14033 break; 14034 case NVM_OFF_ID_LED_SETTINGS: 14035 rv = wm_nvm_read_word_invm(sc, offset, data); 14036 if (rv != 0) { 14037 *data = ID_LED_RESERVED_FFFF; 14038 rv = 0; 14039 } 14040 break; 14041 default: 14042 DPRINTF(sc, WM_DEBUG_NVM, 14043 ("NVM word 0x%02x is not mapped.\n", offset)); 14044 *data = NVM_RESERVED_WORD; 14045 break; 14046 } 14047 } 14048 14049 sc->nvm.release(sc); 14050 return rv; 14051 } 14052 14053 /* Lock, detecting NVM type, validate checksum, version and read */ 14054 14055 static int 14056 wm_nvm_is_onboard_eeprom(struct wm_softc *sc) 14057 { 14058 uint32_t eecd = 0; 14059 14060 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574 14061 || sc->sc_type == WM_T_82583) { 14062 eecd = CSR_READ(sc, WMREG_EECD); 14063 14064 /* Isolate bits 15 & 16 */ 14065 eecd = ((eecd >> 15) & 0x03); 14066 14067 /* If both bits are set, device is Flash type */ 14068 if (eecd == 0x03) 14069 return 0; 14070 } 14071 return 1; 14072 } 14073 14074 static int 14075 wm_nvm_flash_presence_i210(struct wm_softc *sc) 14076 { 14077 uint32_t eec; 14078 14079 eec = CSR_READ(sc, WMREG_EEC); 14080 if ((eec & EEC_FLASH_DETECTED) != 0) 14081 return 1; 14082 14083 return 0; 14084 } 14085 14086 /* 14087 * wm_nvm_validate_checksum 14088 * 14089 * The checksum is defined as the sum of the first 64 (16 bit) words. 14090 */ 14091 static int 14092 wm_nvm_validate_checksum(struct wm_softc *sc) 14093 { 14094 uint16_t checksum; 14095 uint16_t eeprom_data; 14096 #ifdef WM_DEBUG 14097 uint16_t csum_wordaddr, valid_checksum; 14098 #endif 14099 int i; 14100 14101 checksum = 0; 14102 14103 /* Don't check for I211 */ 14104 if (sc->sc_type == WM_T_I211) 14105 return 0; 14106 14107 #ifdef WM_DEBUG 14108 if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT) 14109 || (sc->sc_type == WM_T_PCH_CNP)) { 14110 csum_wordaddr = NVM_OFF_COMPAT; 14111 valid_checksum = NVM_COMPAT_VALID_CHECKSUM; 14112 } else { 14113 csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1; 14114 valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM; 14115 } 14116 14117 /* Dump EEPROM image for debug */ 14118 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 14119 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 14120 || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) { 14121 /* XXX PCH_SPT? */ 14122 wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data); 14123 if ((eeprom_data & valid_checksum) == 0) 14124 DPRINTF(sc, WM_DEBUG_NVM, 14125 ("%s: NVM need to be updated (%04x != %04x)\n", 14126 device_xname(sc->sc_dev), eeprom_data, 14127 valid_checksum)); 14128 } 14129 14130 if ((sc->sc_debug & WM_DEBUG_NVM) != 0) { 14131 printf("%s: NVM dump:\n", device_xname(sc->sc_dev)); 14132 for (i = 0; i < NVM_SIZE; i++) { 14133 if (wm_nvm_read(sc, i, 1, &eeprom_data)) 14134 printf("XXXX "); 14135 else 14136 printf("%04hx ", eeprom_data); 14137 if (i % 8 == 7) 14138 printf("\n"); 14139 } 14140 } 14141 14142 #endif /* WM_DEBUG */ 14143 14144 for (i = 0; i < NVM_SIZE; i++) { 14145 if (wm_nvm_read(sc, i, 1, &eeprom_data)) 14146 return 1; 14147 checksum += eeprom_data; 14148 } 14149 14150 if (checksum != (uint16_t) NVM_CHECKSUM) { 14151 #ifdef WM_DEBUG 14152 printf("%s: NVM checksum mismatch (%04x != %04x)\n", 14153 device_xname(sc->sc_dev), checksum, NVM_CHECKSUM); 14154 #endif 14155 } 14156 14157 return 0; 14158 } 14159 14160 static void 14161 wm_nvm_version_invm(struct wm_softc *sc) 14162 { 14163 uint32_t dword; 14164 14165 /* 14166 * Linux's code to decode version is very strange, so we don't 14167 * obey that algorithm and just use word 61 as the document. 14168 * Perhaps it's not perfect though... 14169 * 14170 * Example: 14171 * 14172 * Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6) 14173 */ 14174 dword = CSR_READ(sc, WM_INVM_DATA_REG(61)); 14175 dword = __SHIFTOUT(dword, INVM_VER_1); 14176 sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR); 14177 sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR); 14178 } 14179 14180 static void 14181 wm_nvm_version(struct wm_softc *sc) 14182 { 14183 uint16_t major, minor, build, patch; 14184 uint16_t uid0, uid1; 14185 uint16_t nvm_data; 14186 uint16_t off; 14187 bool check_version = false; 14188 bool check_optionrom = false; 14189 bool have_build = false; 14190 bool have_uid = true; 14191 14192 /* 14193 * Version format: 14194 * 14195 * XYYZ 14196 * X0YZ 14197 * X0YY 14198 * 14199 * Example: 14200 * 14201 * 82571 0x50a2 5.10.2? (the spec update notes about 5.6-5.10) 14202 * 82571 0x50a6 5.10.6? 14203 * 82572 0x506a 5.6.10? 14204 * 82572EI 0x5069 5.6.9? 14205 * 82574L 0x1080 1.8.0? (the spec update notes about 2.1.4) 14206 * 0x2013 2.1.3? 14207 * 82583 0x10a0 1.10.0? (document says it's default value) 14208 * ICH8+82567 0x0040 0.4.0? 14209 * ICH9+82566 0x1040 1.4.0? 14210 *ICH10+82567 0x0043 0.4.3? 14211 * PCH+82577 0x00c1 0.12.1? 14212 * PCH2+82579 0x00d3 0.13.3? 14213 * 0x00d4 0.13.4? 14214 * LPT+I218 0x0023 0.2.3? 14215 * SPT+I219 0x0084 0.8.4? 14216 * CNP+I219 0x0054 0.5.4? 14217 */ 14218 14219 /* 14220 * XXX 14221 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words. 14222 * I've never seen on real 82574 hardware with such small SPI ROM. 14223 */ 14224 if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1) 14225 || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0)) 14226 have_uid = false; 14227 14228 switch (sc->sc_type) { 14229 case WM_T_82571: 14230 case WM_T_82572: 14231 case WM_T_82574: 14232 case WM_T_82583: 14233 check_version = true; 14234 check_optionrom = true; 14235 have_build = true; 14236 break; 14237 case WM_T_ICH8: 14238 case WM_T_ICH9: 14239 case WM_T_ICH10: 14240 case WM_T_PCH: 14241 case WM_T_PCH2: 14242 case WM_T_PCH_LPT: 14243 case WM_T_PCH_SPT: 14244 case WM_T_PCH_CNP: 14245 check_version = true; 14246 have_build = true; 14247 have_uid = false; 14248 break; 14249 case WM_T_82575: 14250 case WM_T_82576: 14251 case WM_T_82580: 14252 if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID) 14253 check_version = true; 14254 break; 14255 case WM_T_I211: 14256 wm_nvm_version_invm(sc); 14257 have_uid = false; 14258 goto printver; 14259 case WM_T_I210: 14260 if (!wm_nvm_flash_presence_i210(sc)) { 14261 wm_nvm_version_invm(sc); 14262 have_uid = false; 14263 goto printver; 14264 } 14265 /* FALLTHROUGH */ 14266 case WM_T_I350: 14267 case WM_T_I354: 14268 check_version = true; 14269 check_optionrom = true; 14270 break; 14271 default: 14272 return; 14273 } 14274 if (check_version 14275 && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) { 14276 major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT; 14277 if (have_build || ((nvm_data & 0x0f00) != 0x0000)) { 14278 minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT; 14279 build = nvm_data & NVM_BUILD_MASK; 14280 have_build = true; 14281 } else 14282 minor = nvm_data & 0x00ff; 14283 14284 /* Decimal */ 14285 minor = (minor / 16) * 10 + (minor % 16); 14286 sc->sc_nvm_ver_major = major; 14287 sc->sc_nvm_ver_minor = minor; 14288 14289 printver: 14290 aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major, 14291 sc->sc_nvm_ver_minor); 14292 if (have_build) { 14293 sc->sc_nvm_ver_build = build; 14294 aprint_verbose(".%d", build); 14295 } 14296 } 14297 14298 /* Assume the Option ROM area is at avove NVM_SIZE */ 14299 if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom 14300 && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) { 14301 /* Option ROM Version */ 14302 if ((off != 0x0000) && (off != 0xffff)) { 14303 int rv; 14304 14305 off += NVM_COMBO_VER_OFF; 14306 rv = wm_nvm_read(sc, off + 1, 1, &uid1); 14307 rv |= wm_nvm_read(sc, off, 1, &uid0); 14308 if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff) 14309 && (uid1 != 0) && (uid1 != 0xffff)) { 14310 /* 16bits */ 14311 major = uid0 >> 8; 14312 build = (uid0 << 8) | (uid1 >> 8); 14313 patch = uid1 & 0x00ff; 14314 aprint_verbose(", option ROM Version %d.%d.%d", 14315 major, build, patch); 14316 } 14317 } 14318 } 14319 14320 if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0)) 14321 aprint_verbose(", Image Unique ID %08x", 14322 ((uint32_t)uid1 << 16) | uid0); 14323 } 14324 14325 /* 14326 * wm_nvm_read: 14327 * 14328 * Read data from the serial EEPROM. 14329 */ 14330 static int 14331 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data) 14332 { 14333 int rv; 14334 14335 DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n", 14336 device_xname(sc->sc_dev), __func__)); 14337 14338 if (sc->sc_flags & WM_F_EEPROM_INVALID) 14339 return -1; 14340 14341 rv = sc->nvm.read(sc, word, wordcnt, data); 14342 14343 return rv; 14344 } 14345 14346 /* 14347 * Hardware semaphores. 14348 * Very complexed... 14349 */ 14350 14351 static int 14352 wm_get_null(struct wm_softc *sc) 14353 { 14354 14355 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 14356 device_xname(sc->sc_dev), __func__)); 14357 return 0; 14358 } 14359 14360 static void 14361 wm_put_null(struct wm_softc *sc) 14362 { 14363 14364 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 14365 device_xname(sc->sc_dev), __func__)); 14366 return; 14367 } 14368 14369 static int 14370 wm_get_eecd(struct wm_softc *sc) 14371 { 14372 uint32_t reg; 14373 int x; 14374 14375 DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n", 14376 device_xname(sc->sc_dev), __func__)); 14377 14378 reg = CSR_READ(sc, WMREG_EECD); 14379 14380 /* Request EEPROM access. */ 14381 reg |= EECD_EE_REQ; 14382 CSR_WRITE(sc, WMREG_EECD, reg); 14383 14384 /* ..and wait for it to be granted. */ 14385 for (x = 0; x < 1000; x++) { 14386 reg = CSR_READ(sc, WMREG_EECD); 14387 if (reg & EECD_EE_GNT) 14388 break; 14389 delay(5); 14390 } 14391 if ((reg & EECD_EE_GNT) == 0) { 14392 aprint_error_dev(sc->sc_dev, 14393 "could not acquire EEPROM GNT\n"); 14394 reg &= ~EECD_EE_REQ; 14395 CSR_WRITE(sc, WMREG_EECD, reg); 14396 return -1; 14397 } 14398 14399 return 0; 14400 } 14401 14402 static void 14403 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd) 14404 { 14405 14406 *eecd |= EECD_SK; 14407 CSR_WRITE(sc, WMREG_EECD, *eecd); 14408 CSR_WRITE_FLUSH(sc); 14409 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) 14410 delay(1); 14411 else 14412 delay(50); 14413 } 14414 14415 static void 14416 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd) 14417 { 14418 14419 *eecd &= ~EECD_SK; 14420 CSR_WRITE(sc, WMREG_EECD, *eecd); 14421 CSR_WRITE_FLUSH(sc); 14422 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) 14423 delay(1); 14424 else 14425 delay(50); 14426 } 14427 14428 static void 14429 wm_put_eecd(struct wm_softc *sc) 14430 { 14431 uint32_t reg; 14432 14433 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 14434 device_xname(sc->sc_dev), __func__)); 14435 14436 /* Stop nvm */ 14437 reg = CSR_READ(sc, WMREG_EECD); 14438 if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) { 14439 /* Pull CS high */ 14440 reg |= EECD_CS; 14441 wm_nvm_eec_clock_lower(sc, ®); 14442 } else { 14443 /* CS on Microwire is active-high */ 14444 reg &= ~(EECD_CS | EECD_DI); 14445 CSR_WRITE(sc, WMREG_EECD, reg); 14446 wm_nvm_eec_clock_raise(sc, ®); 14447 wm_nvm_eec_clock_lower(sc, ®); 14448 } 14449 14450 reg = CSR_READ(sc, WMREG_EECD); 14451 reg &= ~EECD_EE_REQ; 14452 CSR_WRITE(sc, WMREG_EECD, reg); 14453 14454 return; 14455 } 14456 14457 /* 14458 * Get hardware semaphore. 14459 * Same as e1000_get_hw_semaphore_generic() 14460 */ 14461 static int 14462 wm_get_swsm_semaphore(struct wm_softc *sc) 14463 { 14464 int32_t timeout; 14465 uint32_t swsm; 14466 14467 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 14468 device_xname(sc->sc_dev), __func__)); 14469 KASSERT(sc->sc_nvm_wordsize > 0); 14470 14471 retry: 14472 /* Get the SW semaphore. */ 14473 timeout = sc->sc_nvm_wordsize + 1; 14474 while (timeout) { 14475 swsm = CSR_READ(sc, WMREG_SWSM); 14476 14477 if ((swsm & SWSM_SMBI) == 0) 14478 break; 14479 14480 delay(50); 14481 timeout--; 14482 } 14483 14484 if (timeout == 0) { 14485 if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) { 14486 /* 14487 * In rare circumstances, the SW semaphore may already 14488 * be held unintentionally. Clear the semaphore once 14489 * before giving up. 14490 */ 14491 sc->sc_flags &= ~WM_F_WA_I210_CLSEM; 14492 wm_put_swsm_semaphore(sc); 14493 goto retry; 14494 } 14495 aprint_error_dev(sc->sc_dev, 14496 "could not acquire SWSM SMBI\n"); 14497 return 1; 14498 } 14499 14500 /* Get the FW semaphore. */ 14501 timeout = sc->sc_nvm_wordsize + 1; 14502 while (timeout) { 14503 swsm = CSR_READ(sc, WMREG_SWSM); 14504 swsm |= SWSM_SWESMBI; 14505 CSR_WRITE(sc, WMREG_SWSM, swsm); 14506 /* If we managed to set the bit we got the semaphore. */ 14507 swsm = CSR_READ(sc, WMREG_SWSM); 14508 if (swsm & SWSM_SWESMBI) 14509 break; 14510 14511 delay(50); 14512 timeout--; 14513 } 14514 14515 if (timeout == 0) { 14516 aprint_error_dev(sc->sc_dev, 14517 "could not acquire SWSM SWESMBI\n"); 14518 /* Release semaphores */ 14519 wm_put_swsm_semaphore(sc); 14520 return 1; 14521 } 14522 return 0; 14523 } 14524 14525 /* 14526 * Put hardware semaphore. 14527 * Same as e1000_put_hw_semaphore_generic() 14528 */ 14529 static void 14530 wm_put_swsm_semaphore(struct wm_softc *sc) 14531 { 14532 uint32_t swsm; 14533 14534 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 14535 device_xname(sc->sc_dev), __func__)); 14536 14537 swsm = CSR_READ(sc, WMREG_SWSM); 14538 swsm &= ~(SWSM_SMBI | SWSM_SWESMBI); 14539 CSR_WRITE(sc, WMREG_SWSM, swsm); 14540 } 14541 14542 /* 14543 * Get SW/FW semaphore. 14544 * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}(). 14545 */ 14546 static int 14547 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask) 14548 { 14549 uint32_t swfw_sync; 14550 uint32_t swmask = mask << SWFW_SOFT_SHIFT; 14551 uint32_t fwmask = mask << SWFW_FIRM_SHIFT; 14552 int timeout; 14553 14554 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 14555 device_xname(sc->sc_dev), __func__)); 14556 14557 if (sc->sc_type == WM_T_80003) 14558 timeout = 50; 14559 else 14560 timeout = 200; 14561 14562 while (timeout) { 14563 if (wm_get_swsm_semaphore(sc)) { 14564 aprint_error_dev(sc->sc_dev, 14565 "%s: failed to get semaphore\n", 14566 __func__); 14567 return 1; 14568 } 14569 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC); 14570 if ((swfw_sync & (swmask | fwmask)) == 0) { 14571 swfw_sync |= swmask; 14572 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync); 14573 wm_put_swsm_semaphore(sc); 14574 return 0; 14575 } 14576 wm_put_swsm_semaphore(sc); 14577 delay(5000); 14578 timeout--; 14579 } 14580 device_printf(sc->sc_dev, 14581 "failed to get swfw semaphore mask 0x%x swfw 0x%x\n", 14582 mask, swfw_sync); 14583 return 1; 14584 } 14585 14586 static void 14587 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask) 14588 { 14589 uint32_t swfw_sync; 14590 14591 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 14592 device_xname(sc->sc_dev), __func__)); 14593 14594 while (wm_get_swsm_semaphore(sc) != 0) 14595 continue; 14596 14597 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC); 14598 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT); 14599 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync); 14600 14601 wm_put_swsm_semaphore(sc); 14602 } 14603 14604 static int 14605 wm_get_nvm_80003(struct wm_softc *sc) 14606 { 14607 int rv; 14608 14609 DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n", 14610 device_xname(sc->sc_dev), __func__)); 14611 14612 if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) { 14613 aprint_error_dev(sc->sc_dev, 14614 "%s: failed to get semaphore(SWFW)\n", __func__); 14615 return rv; 14616 } 14617 14618 if (((sc->sc_flags & WM_F_LOCK_EECD) != 0) 14619 && (rv = wm_get_eecd(sc)) != 0) { 14620 aprint_error_dev(sc->sc_dev, 14621 "%s: failed to get semaphore(EECD)\n", __func__); 14622 wm_put_swfw_semaphore(sc, SWFW_EEP_SM); 14623 return rv; 14624 } 14625 14626 return 0; 14627 } 14628 14629 static void 14630 wm_put_nvm_80003(struct wm_softc *sc) 14631 { 14632 14633 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 14634 device_xname(sc->sc_dev), __func__)); 14635 14636 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0) 14637 wm_put_eecd(sc); 14638 wm_put_swfw_semaphore(sc, SWFW_EEP_SM); 14639 } 14640 14641 static int 14642 wm_get_nvm_82571(struct wm_softc *sc) 14643 { 14644 int rv; 14645 14646 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 14647 device_xname(sc->sc_dev), __func__)); 14648 14649 if ((rv = wm_get_swsm_semaphore(sc)) != 0) 14650 return rv; 14651 14652 switch (sc->sc_type) { 14653 case WM_T_82573: 14654 break; 14655 default: 14656 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0) 14657 rv = wm_get_eecd(sc); 14658 break; 14659 } 14660 14661 if (rv != 0) { 14662 aprint_error_dev(sc->sc_dev, 14663 "%s: failed to get semaphore\n", 14664 __func__); 14665 wm_put_swsm_semaphore(sc); 14666 } 14667 14668 return rv; 14669 } 14670 14671 static void 14672 wm_put_nvm_82571(struct wm_softc *sc) 14673 { 14674 14675 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 14676 device_xname(sc->sc_dev), __func__)); 14677 14678 switch (sc->sc_type) { 14679 case WM_T_82573: 14680 break; 14681 default: 14682 if ((sc->sc_flags & WM_F_LOCK_EECD) != 0) 14683 wm_put_eecd(sc); 14684 break; 14685 } 14686 14687 wm_put_swsm_semaphore(sc); 14688 } 14689 14690 static int 14691 wm_get_phy_82575(struct wm_softc *sc) 14692 { 14693 14694 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 14695 device_xname(sc->sc_dev), __func__)); 14696 return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]); 14697 } 14698 14699 static void 14700 wm_put_phy_82575(struct wm_softc *sc) 14701 { 14702 14703 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 14704 device_xname(sc->sc_dev), __func__)); 14705 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]); 14706 } 14707 14708 static int 14709 wm_get_swfwhw_semaphore(struct wm_softc *sc) 14710 { 14711 uint32_t ext_ctrl; 14712 int timeout = 200; 14713 14714 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 14715 device_xname(sc->sc_dev), __func__)); 14716 14717 mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */ 14718 for (timeout = 0; timeout < 200; timeout++) { 14719 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 14720 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP; 14721 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 14722 14723 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 14724 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) 14725 return 0; 14726 delay(5000); 14727 } 14728 device_printf(sc->sc_dev, 14729 "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl); 14730 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */ 14731 return 1; 14732 } 14733 14734 static void 14735 wm_put_swfwhw_semaphore(struct wm_softc *sc) 14736 { 14737 uint32_t ext_ctrl; 14738 14739 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 14740 device_xname(sc->sc_dev), __func__)); 14741 14742 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 14743 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP; 14744 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 14745 14746 mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */ 14747 } 14748 14749 static int 14750 wm_get_swflag_ich8lan(struct wm_softc *sc) 14751 { 14752 uint32_t ext_ctrl; 14753 int timeout; 14754 14755 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 14756 device_xname(sc->sc_dev), __func__)); 14757 mutex_enter(sc->sc_ich_phymtx); 14758 for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) { 14759 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 14760 if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0) 14761 break; 14762 delay(1000); 14763 } 14764 if (timeout >= WM_PHY_CFG_TIMEOUT) { 14765 device_printf(sc->sc_dev, 14766 "SW has already locked the resource\n"); 14767 goto out; 14768 } 14769 14770 ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP; 14771 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 14772 for (timeout = 0; timeout < 1000; timeout++) { 14773 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 14774 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) 14775 break; 14776 delay(1000); 14777 } 14778 if (timeout >= 1000) { 14779 device_printf(sc->sc_dev, "failed to acquire semaphore\n"); 14780 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP; 14781 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 14782 goto out; 14783 } 14784 return 0; 14785 14786 out: 14787 mutex_exit(sc->sc_ich_phymtx); 14788 return 1; 14789 } 14790 14791 static void 14792 wm_put_swflag_ich8lan(struct wm_softc *sc) 14793 { 14794 uint32_t ext_ctrl; 14795 14796 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 14797 device_xname(sc->sc_dev), __func__)); 14798 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR); 14799 if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) { 14800 ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP; 14801 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl); 14802 } else { 14803 device_printf(sc->sc_dev, "Semaphore unexpectedly released\n"); 14804 } 14805 14806 mutex_exit(sc->sc_ich_phymtx); 14807 } 14808 14809 static int 14810 wm_get_nvm_ich8lan(struct wm_softc *sc) 14811 { 14812 14813 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 14814 device_xname(sc->sc_dev), __func__)); 14815 mutex_enter(sc->sc_ich_nvmmtx); 14816 14817 return 0; 14818 } 14819 14820 static void 14821 wm_put_nvm_ich8lan(struct wm_softc *sc) 14822 { 14823 14824 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 14825 device_xname(sc->sc_dev), __func__)); 14826 mutex_exit(sc->sc_ich_nvmmtx); 14827 } 14828 14829 static int 14830 wm_get_hw_semaphore_82573(struct wm_softc *sc) 14831 { 14832 int i = 0; 14833 uint32_t reg; 14834 14835 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 14836 device_xname(sc->sc_dev), __func__)); 14837 14838 reg = CSR_READ(sc, WMREG_EXTCNFCTR); 14839 do { 14840 CSR_WRITE(sc, WMREG_EXTCNFCTR, 14841 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP); 14842 reg = CSR_READ(sc, WMREG_EXTCNFCTR); 14843 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0) 14844 break; 14845 delay(2*1000); 14846 i++; 14847 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT); 14848 14849 if (i == WM_MDIO_OWNERSHIP_TIMEOUT) { 14850 wm_put_hw_semaphore_82573(sc); 14851 log(LOG_ERR, "%s: Driver can't access the PHY\n", 14852 device_xname(sc->sc_dev)); 14853 return -1; 14854 } 14855 14856 return 0; 14857 } 14858 14859 static void 14860 wm_put_hw_semaphore_82573(struct wm_softc *sc) 14861 { 14862 uint32_t reg; 14863 14864 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 14865 device_xname(sc->sc_dev), __func__)); 14866 14867 reg = CSR_READ(sc, WMREG_EXTCNFCTR); 14868 reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP; 14869 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg); 14870 } 14871 14872 /* 14873 * Management mode and power management related subroutines. 14874 * BMC, AMT, suspend/resume and EEE. 14875 */ 14876 14877 #ifdef WM_WOL 14878 static int 14879 wm_check_mng_mode(struct wm_softc *sc) 14880 { 14881 int rv; 14882 14883 switch (sc->sc_type) { 14884 case WM_T_ICH8: 14885 case WM_T_ICH9: 14886 case WM_T_ICH10: 14887 case WM_T_PCH: 14888 case WM_T_PCH2: 14889 case WM_T_PCH_LPT: 14890 case WM_T_PCH_SPT: 14891 case WM_T_PCH_CNP: 14892 rv = wm_check_mng_mode_ich8lan(sc); 14893 break; 14894 case WM_T_82574: 14895 case WM_T_82583: 14896 rv = wm_check_mng_mode_82574(sc); 14897 break; 14898 case WM_T_82571: 14899 case WM_T_82572: 14900 case WM_T_82573: 14901 case WM_T_80003: 14902 rv = wm_check_mng_mode_generic(sc); 14903 break; 14904 default: 14905 /* Noting to do */ 14906 rv = 0; 14907 break; 14908 } 14909 14910 return rv; 14911 } 14912 14913 static int 14914 wm_check_mng_mode_ich8lan(struct wm_softc *sc) 14915 { 14916 uint32_t fwsm; 14917 14918 fwsm = CSR_READ(sc, WMREG_FWSM); 14919 14920 if (((fwsm & FWSM_FW_VALID) != 0) 14921 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE)) 14922 return 1; 14923 14924 return 0; 14925 } 14926 14927 static int 14928 wm_check_mng_mode_82574(struct wm_softc *sc) 14929 { 14930 uint16_t data; 14931 14932 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data); 14933 14934 if ((data & NVM_CFG2_MNGM_MASK) != 0) 14935 return 1; 14936 14937 return 0; 14938 } 14939 14940 static int 14941 wm_check_mng_mode_generic(struct wm_softc *sc) 14942 { 14943 uint32_t fwsm; 14944 14945 fwsm = CSR_READ(sc, WMREG_FWSM); 14946 14947 if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE) 14948 return 1; 14949 14950 return 0; 14951 } 14952 #endif /* WM_WOL */ 14953 14954 static int 14955 wm_enable_mng_pass_thru(struct wm_softc *sc) 14956 { 14957 uint32_t manc, fwsm, factps; 14958 14959 if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0) 14960 return 0; 14961 14962 manc = CSR_READ(sc, WMREG_MANC); 14963 14964 DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n", 14965 device_xname(sc->sc_dev), manc)); 14966 if ((manc & MANC_RECV_TCO_EN) == 0) 14967 return 0; 14968 14969 if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) { 14970 fwsm = CSR_READ(sc, WMREG_FWSM); 14971 factps = CSR_READ(sc, WMREG_FACTPS); 14972 if (((factps & FACTPS_MNGCG) == 0) 14973 && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE)) 14974 return 1; 14975 } else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){ 14976 uint16_t data; 14977 14978 factps = CSR_READ(sc, WMREG_FACTPS); 14979 wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data); 14980 DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n", 14981 device_xname(sc->sc_dev), factps, data)); 14982 if (((factps & FACTPS_MNGCG) == 0) 14983 && ((data & NVM_CFG2_MNGM_MASK) 14984 == (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT))) 14985 return 1; 14986 } else if (((manc & MANC_SMBUS_EN) != 0) 14987 && ((manc & MANC_ASF_EN) == 0)) 14988 return 1; 14989 14990 return 0; 14991 } 14992 14993 static bool 14994 wm_phy_resetisblocked(struct wm_softc *sc) 14995 { 14996 bool blocked = false; 14997 uint32_t reg; 14998 int i = 0; 14999 15000 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 15001 device_xname(sc->sc_dev), __func__)); 15002 15003 switch (sc->sc_type) { 15004 case WM_T_ICH8: 15005 case WM_T_ICH9: 15006 case WM_T_ICH10: 15007 case WM_T_PCH: 15008 case WM_T_PCH2: 15009 case WM_T_PCH_LPT: 15010 case WM_T_PCH_SPT: 15011 case WM_T_PCH_CNP: 15012 do { 15013 reg = CSR_READ(sc, WMREG_FWSM); 15014 if ((reg & FWSM_RSPCIPHY) == 0) { 15015 blocked = true; 15016 delay(10*1000); 15017 continue; 15018 } 15019 blocked = false; 15020 } while (blocked && (i++ < 30)); 15021 return blocked; 15022 break; 15023 case WM_T_82571: 15024 case WM_T_82572: 15025 case WM_T_82573: 15026 case WM_T_82574: 15027 case WM_T_82583: 15028 case WM_T_80003: 15029 reg = CSR_READ(sc, WMREG_MANC); 15030 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0) 15031 return true; 15032 else 15033 return false; 15034 break; 15035 default: 15036 /* No problem */ 15037 break; 15038 } 15039 15040 return false; 15041 } 15042 15043 static void 15044 wm_get_hw_control(struct wm_softc *sc) 15045 { 15046 uint32_t reg; 15047 15048 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 15049 device_xname(sc->sc_dev), __func__)); 15050 15051 if (sc->sc_type == WM_T_82573) { 15052 reg = CSR_READ(sc, WMREG_SWSM); 15053 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD); 15054 } else if (sc->sc_type >= WM_T_82571) { 15055 reg = CSR_READ(sc, WMREG_CTRL_EXT); 15056 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD); 15057 } 15058 } 15059 15060 static void 15061 wm_release_hw_control(struct wm_softc *sc) 15062 { 15063 uint32_t reg; 15064 15065 DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n", 15066 device_xname(sc->sc_dev), __func__)); 15067 15068 if (sc->sc_type == WM_T_82573) { 15069 reg = CSR_READ(sc, WMREG_SWSM); 15070 CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD); 15071 } else if (sc->sc_type >= WM_T_82571) { 15072 reg = CSR_READ(sc, WMREG_CTRL_EXT); 15073 CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD); 15074 } 15075 } 15076 15077 static void 15078 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate) 15079 { 15080 uint32_t reg; 15081 15082 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 15083 device_xname(sc->sc_dev), __func__)); 15084 15085 if (sc->sc_type < WM_T_PCH2) 15086 return; 15087 15088 reg = CSR_READ(sc, WMREG_EXTCNFCTR); 15089 15090 if (gate) 15091 reg |= EXTCNFCTR_GATE_PHY_CFG; 15092 else 15093 reg &= ~EXTCNFCTR_GATE_PHY_CFG; 15094 15095 CSR_WRITE(sc, WMREG_EXTCNFCTR, reg); 15096 } 15097 15098 static int 15099 wm_init_phy_workarounds_pchlan(struct wm_softc *sc) 15100 { 15101 uint32_t fwsm, reg; 15102 int rv = 0; 15103 15104 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 15105 device_xname(sc->sc_dev), __func__)); 15106 15107 /* Gate automatic PHY configuration by hardware on non-managed 82579 */ 15108 wm_gate_hw_phy_config_ich8lan(sc, true); 15109 15110 /* Disable ULP */ 15111 wm_ulp_disable(sc); 15112 15113 /* Acquire PHY semaphore */ 15114 rv = sc->phy.acquire(sc); 15115 if (rv != 0) { 15116 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n", 15117 device_xname(sc->sc_dev), __func__)); 15118 return -1; 15119 } 15120 15121 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is 15122 * inaccessible and resetting the PHY is not blocked, toggle the 15123 * LANPHYPC Value bit to force the interconnect to PCIe mode. 15124 */ 15125 fwsm = CSR_READ(sc, WMREG_FWSM); 15126 switch (sc->sc_type) { 15127 case WM_T_PCH_LPT: 15128 case WM_T_PCH_SPT: 15129 case WM_T_PCH_CNP: 15130 if (wm_phy_is_accessible_pchlan(sc)) 15131 break; 15132 15133 /* Before toggling LANPHYPC, see if PHY is accessible by 15134 * forcing MAC to SMBus mode first. 15135 */ 15136 reg = CSR_READ(sc, WMREG_CTRL_EXT); 15137 reg |= CTRL_EXT_FORCE_SMBUS; 15138 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 15139 #if 0 15140 /* XXX Isn't this required??? */ 15141 CSR_WRITE_FLUSH(sc); 15142 #endif 15143 /* Wait 50 milliseconds for MAC to finish any retries 15144 * that it might be trying to perform from previous 15145 * attempts to acknowledge any phy read requests. 15146 */ 15147 delay(50 * 1000); 15148 /* FALLTHROUGH */ 15149 case WM_T_PCH2: 15150 if (wm_phy_is_accessible_pchlan(sc) == true) 15151 break; 15152 /* FALLTHROUGH */ 15153 case WM_T_PCH: 15154 if (sc->sc_type == WM_T_PCH) 15155 if ((fwsm & FWSM_FW_VALID) != 0) 15156 break; 15157 15158 if (wm_phy_resetisblocked(sc) == true) { 15159 device_printf(sc->sc_dev, "XXX reset is blocked(3)\n"); 15160 break; 15161 } 15162 15163 /* Toggle LANPHYPC Value bit */ 15164 wm_toggle_lanphypc_pch_lpt(sc); 15165 15166 if (sc->sc_type >= WM_T_PCH_LPT) { 15167 if (wm_phy_is_accessible_pchlan(sc) == true) 15168 break; 15169 15170 /* Toggling LANPHYPC brings the PHY out of SMBus mode 15171 * so ensure that the MAC is also out of SMBus mode 15172 */ 15173 reg = CSR_READ(sc, WMREG_CTRL_EXT); 15174 reg &= ~CTRL_EXT_FORCE_SMBUS; 15175 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 15176 15177 if (wm_phy_is_accessible_pchlan(sc) == true) 15178 break; 15179 rv = -1; 15180 } 15181 break; 15182 default: 15183 break; 15184 } 15185 15186 /* Release semaphore */ 15187 sc->phy.release(sc); 15188 15189 if (rv == 0) { 15190 /* Check to see if able to reset PHY. Print error if not */ 15191 if (wm_phy_resetisblocked(sc)) { 15192 device_printf(sc->sc_dev, "XXX reset is blocked(4)\n"); 15193 goto out; 15194 } 15195 15196 /* Reset the PHY before any access to it. Doing so, ensures 15197 * that the PHY is in a known good state before we read/write 15198 * PHY registers. The generic reset is sufficient here, 15199 * because we haven't determined the PHY type yet. 15200 */ 15201 if (wm_reset_phy(sc) != 0) 15202 goto out; 15203 15204 /* On a successful reset, possibly need to wait for the PHY 15205 * to quiesce to an accessible state before returning control 15206 * to the calling function. If the PHY does not quiesce, then 15207 * return E1000E_BLK_PHY_RESET, as this is the condition that 15208 * the PHY is in. 15209 */ 15210 if (wm_phy_resetisblocked(sc)) 15211 device_printf(sc->sc_dev, "XXX reset is blocked(4)\n"); 15212 } 15213 15214 out: 15215 /* Ungate automatic PHY configuration on non-managed 82579 */ 15216 if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) { 15217 delay(10*1000); 15218 wm_gate_hw_phy_config_ich8lan(sc, false); 15219 } 15220 15221 return 0; 15222 } 15223 15224 static void 15225 wm_init_manageability(struct wm_softc *sc) 15226 { 15227 15228 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 15229 device_xname(sc->sc_dev), __func__)); 15230 if (sc->sc_flags & WM_F_HAS_MANAGE) { 15231 uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H); 15232 uint32_t manc = CSR_READ(sc, WMREG_MANC); 15233 15234 /* Disable hardware interception of ARP */ 15235 manc &= ~MANC_ARP_EN; 15236 15237 /* Enable receiving management packets to the host */ 15238 if (sc->sc_type >= WM_T_82571) { 15239 manc |= MANC_EN_MNG2HOST; 15240 manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624; 15241 CSR_WRITE(sc, WMREG_MANC2H, manc2h); 15242 } 15243 15244 CSR_WRITE(sc, WMREG_MANC, manc); 15245 } 15246 } 15247 15248 static void 15249 wm_release_manageability(struct wm_softc *sc) 15250 { 15251 15252 if (sc->sc_flags & WM_F_HAS_MANAGE) { 15253 uint32_t manc = CSR_READ(sc, WMREG_MANC); 15254 15255 manc |= MANC_ARP_EN; 15256 if (sc->sc_type >= WM_T_82571) 15257 manc &= ~MANC_EN_MNG2HOST; 15258 15259 CSR_WRITE(sc, WMREG_MANC, manc); 15260 } 15261 } 15262 15263 static void 15264 wm_get_wakeup(struct wm_softc *sc) 15265 { 15266 15267 /* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */ 15268 switch (sc->sc_type) { 15269 case WM_T_82573: 15270 case WM_T_82583: 15271 sc->sc_flags |= WM_F_HAS_AMT; 15272 /* FALLTHROUGH */ 15273 case WM_T_80003: 15274 case WM_T_82575: 15275 case WM_T_82576: 15276 case WM_T_82580: 15277 case WM_T_I350: 15278 case WM_T_I354: 15279 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0) 15280 sc->sc_flags |= WM_F_ARC_SUBSYS_VALID; 15281 /* FALLTHROUGH */ 15282 case WM_T_82541: 15283 case WM_T_82541_2: 15284 case WM_T_82547: 15285 case WM_T_82547_2: 15286 case WM_T_82571: 15287 case WM_T_82572: 15288 case WM_T_82574: 15289 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES; 15290 break; 15291 case WM_T_ICH8: 15292 case WM_T_ICH9: 15293 case WM_T_ICH10: 15294 case WM_T_PCH: 15295 case WM_T_PCH2: 15296 case WM_T_PCH_LPT: 15297 case WM_T_PCH_SPT: 15298 case WM_T_PCH_CNP: 15299 sc->sc_flags |= WM_F_HAS_AMT; 15300 sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES; 15301 break; 15302 default: 15303 break; 15304 } 15305 15306 /* 1: HAS_MANAGE */ 15307 if (wm_enable_mng_pass_thru(sc) != 0) 15308 sc->sc_flags |= WM_F_HAS_MANAGE; 15309 15310 /* 15311 * Note that the WOL flags is set after the resetting of the eeprom 15312 * stuff 15313 */ 15314 } 15315 15316 /* 15317 * Unconfigure Ultra Low Power mode. 15318 * Only for I217 and newer (see below). 15319 */ 15320 static int 15321 wm_ulp_disable(struct wm_softc *sc) 15322 { 15323 uint32_t reg; 15324 uint16_t phyreg; 15325 int i = 0, rv = 0; 15326 15327 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 15328 device_xname(sc->sc_dev), __func__)); 15329 /* Exclude old devices */ 15330 if ((sc->sc_type < WM_T_PCH_LPT) 15331 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM) 15332 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V) 15333 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2) 15334 || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2)) 15335 return 0; 15336 15337 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) { 15338 /* Request ME un-configure ULP mode in the PHY */ 15339 reg = CSR_READ(sc, WMREG_H2ME); 15340 reg &= ~H2ME_ULP; 15341 reg |= H2ME_ENFORCE_SETTINGS; 15342 CSR_WRITE(sc, WMREG_H2ME, reg); 15343 15344 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */ 15345 while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) { 15346 if (i++ == 30) { 15347 device_printf(sc->sc_dev, "%s timed out\n", 15348 __func__); 15349 return -1; 15350 } 15351 delay(10 * 1000); 15352 } 15353 reg = CSR_READ(sc, WMREG_H2ME); 15354 reg &= ~H2ME_ENFORCE_SETTINGS; 15355 CSR_WRITE(sc, WMREG_H2ME, reg); 15356 15357 return 0; 15358 } 15359 15360 /* Acquire semaphore */ 15361 rv = sc->phy.acquire(sc); 15362 if (rv != 0) { 15363 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n", 15364 device_xname(sc->sc_dev), __func__)); 15365 return -1; 15366 } 15367 15368 /* Toggle LANPHYPC */ 15369 wm_toggle_lanphypc_pch_lpt(sc); 15370 15371 /* Unforce SMBus mode in PHY */ 15372 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg); 15373 if (rv != 0) { 15374 uint32_t reg2; 15375 15376 aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n", 15377 __func__); 15378 reg2 = CSR_READ(sc, WMREG_CTRL_EXT); 15379 reg2 |= CTRL_EXT_FORCE_SMBUS; 15380 CSR_WRITE(sc, WMREG_CTRL_EXT, reg2); 15381 delay(50 * 1000); 15382 15383 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, 15384 &phyreg); 15385 if (rv != 0) 15386 goto release; 15387 } 15388 phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS; 15389 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg); 15390 15391 /* Unforce SMBus mode in MAC */ 15392 reg = CSR_READ(sc, WMREG_CTRL_EXT); 15393 reg &= ~CTRL_EXT_FORCE_SMBUS; 15394 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 15395 15396 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg); 15397 if (rv != 0) 15398 goto release; 15399 phyreg |= HV_PM_CTRL_K1_ENA; 15400 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg); 15401 15402 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, 15403 &phyreg); 15404 if (rv != 0) 15405 goto release; 15406 phyreg &= ~(I218_ULP_CONFIG1_IND 15407 | I218_ULP_CONFIG1_STICKY_ULP 15408 | I218_ULP_CONFIG1_RESET_TO_SMBUS 15409 | I218_ULP_CONFIG1_WOL_HOST 15410 | I218_ULP_CONFIG1_INBAND_EXIT 15411 | I218_ULP_CONFIG1_EN_ULP_LANPHYPC 15412 | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST 15413 | I218_ULP_CONFIG1_DIS_SMB_PERST); 15414 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg); 15415 phyreg |= I218_ULP_CONFIG1_START; 15416 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg); 15417 15418 reg = CSR_READ(sc, WMREG_FEXTNVM7); 15419 reg &= ~FEXTNVM7_DIS_SMB_PERST; 15420 CSR_WRITE(sc, WMREG_FEXTNVM7, reg); 15421 15422 release: 15423 /* Release semaphore */ 15424 sc->phy.release(sc); 15425 wm_gmii_reset(sc); 15426 delay(50 * 1000); 15427 15428 return rv; 15429 } 15430 15431 /* WOL in the newer chipset interfaces (pchlan) */ 15432 static int 15433 wm_enable_phy_wakeup(struct wm_softc *sc) 15434 { 15435 device_t dev = sc->sc_dev; 15436 uint32_t mreg, moff; 15437 uint16_t wuce, wuc, wufc, preg; 15438 int i, rv; 15439 15440 KASSERT(sc->sc_type >= WM_T_PCH); 15441 15442 /* Copy MAC RARs to PHY RARs */ 15443 wm_copy_rx_addrs_to_phy_ich8lan(sc); 15444 15445 /* Activate PHY wakeup */ 15446 rv = sc->phy.acquire(sc); 15447 if (rv != 0) { 15448 device_printf(dev, "%s: failed to acquire semaphore\n", 15449 __func__); 15450 return rv; 15451 } 15452 15453 /* 15454 * Enable access to PHY wakeup registers. 15455 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE. 15456 */ 15457 rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce); 15458 if (rv != 0) { 15459 device_printf(dev, 15460 "%s: Could not enable PHY wakeup reg access\n", __func__); 15461 goto release; 15462 } 15463 15464 /* Copy MAC MTA to PHY MTA */ 15465 for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) { 15466 uint16_t lo, hi; 15467 15468 mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4)); 15469 lo = (uint16_t)(mreg & 0xffff); 15470 hi = (uint16_t)((mreg >> 16) & 0xffff); 15471 wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true); 15472 wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true); 15473 } 15474 15475 /* Configure PHY Rx Control register */ 15476 wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true); 15477 mreg = CSR_READ(sc, WMREG_RCTL); 15478 if (mreg & RCTL_UPE) 15479 preg |= BM_RCTL_UPE; 15480 if (mreg & RCTL_MPE) 15481 preg |= BM_RCTL_MPE; 15482 preg &= ~(BM_RCTL_MO_MASK); 15483 moff = __SHIFTOUT(mreg, RCTL_MO); 15484 if (moff != 0) 15485 preg |= moff << BM_RCTL_MO_SHIFT; 15486 if (mreg & RCTL_BAM) 15487 preg |= BM_RCTL_BAM; 15488 if (mreg & RCTL_PMCF) 15489 preg |= BM_RCTL_PMCF; 15490 mreg = CSR_READ(sc, WMREG_CTRL); 15491 if (mreg & CTRL_RFCE) 15492 preg |= BM_RCTL_RFCE; 15493 wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true); 15494 15495 wuc = WUC_APME | WUC_PME_EN; 15496 wufc = WUFC_MAG; 15497 /* Enable PHY wakeup in MAC register */ 15498 CSR_WRITE(sc, WMREG_WUC, 15499 WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc); 15500 CSR_WRITE(sc, WMREG_WUFC, wufc); 15501 15502 /* Configure and enable PHY wakeup in PHY registers */ 15503 wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true); 15504 wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true); 15505 15506 wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; 15507 wm_disable_phy_wakeup_reg_access_bm(dev, &wuce); 15508 15509 release: 15510 sc->phy.release(sc); 15511 15512 return 0; 15513 } 15514 15515 /* Power down workaround on D3 */ 15516 static void 15517 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc) 15518 { 15519 uint32_t reg; 15520 uint16_t phyreg; 15521 int i; 15522 15523 for (i = 0; i < 2; i++) { 15524 /* Disable link */ 15525 reg = CSR_READ(sc, WMREG_PHY_CTRL); 15526 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS; 15527 CSR_WRITE(sc, WMREG_PHY_CTRL, reg); 15528 15529 /* 15530 * Call gig speed drop workaround on Gig disable before 15531 * accessing any PHY registers 15532 */ 15533 if (sc->sc_type == WM_T_ICH8) 15534 wm_gig_downshift_workaround_ich8lan(sc); 15535 15536 /* Write VR power-down enable */ 15537 sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg); 15538 phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; 15539 phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN; 15540 sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg); 15541 15542 /* Read it back and test */ 15543 sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg); 15544 phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; 15545 if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0)) 15546 break; 15547 15548 /* Issue PHY reset and repeat at most one more time */ 15549 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET); 15550 } 15551 } 15552 15553 /* 15554 * wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx 15555 * @sc: pointer to the HW structure 15556 * 15557 * During S0 to Sx transition, it is possible the link remains at gig 15558 * instead of negotiating to a lower speed. Before going to Sx, set 15559 * 'Gig Disable' to force link speed negotiation to a lower speed based on 15560 * the LPLU setting in the NVM or custom setting. For PCH and newer parts, 15561 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also 15562 * needs to be written. 15563 * Parts that support (and are linked to a partner which support) EEE in 15564 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power 15565 * than 10Mbps w/o EEE. 15566 */ 15567 static void 15568 wm_suspend_workarounds_ich8lan(struct wm_softc *sc) 15569 { 15570 device_t dev = sc->sc_dev; 15571 struct ethercom *ec = &sc->sc_ethercom; 15572 uint32_t phy_ctrl; 15573 int rv; 15574 15575 phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL); 15576 phy_ctrl |= PHY_CTRL_GBE_DIS; 15577 15578 KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP)); 15579 15580 if (sc->sc_phytype == WMPHY_I217) { 15581 uint16_t devid = sc->sc_pcidevid; 15582 15583 if ((devid == PCI_PRODUCT_INTEL_I218_LM) || 15584 (devid == PCI_PRODUCT_INTEL_I218_V) || 15585 (devid == PCI_PRODUCT_INTEL_I218_LM3) || 15586 (devid == PCI_PRODUCT_INTEL_I218_V3) || 15587 (sc->sc_type >= WM_T_PCH_SPT)) 15588 CSR_WRITE(sc, WMREG_FEXTNVM6, 15589 CSR_READ(sc, WMREG_FEXTNVM6) 15590 & ~FEXTNVM6_REQ_PLL_CLK); 15591 15592 if (sc->phy.acquire(sc) != 0) 15593 goto out; 15594 15595 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) { 15596 uint16_t eee_advert; 15597 15598 rv = wm_read_emi_reg_locked(dev, 15599 I217_EEE_ADVERTISEMENT, &eee_advert); 15600 if (rv) 15601 goto release; 15602 15603 /* 15604 * Disable LPLU if both link partners support 100BaseT 15605 * EEE and 100Full is advertised on both ends of the 15606 * link, and enable Auto Enable LPI since there will 15607 * be no driver to enable LPI while in Sx. 15608 */ 15609 if ((eee_advert & AN_EEEADVERT_100_TX) && 15610 (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) { 15611 uint16_t anar, phy_reg; 15612 15613 sc->phy.readreg_locked(dev, 2, MII_ANAR, 15614 &anar); 15615 if (anar & ANAR_TX_FD) { 15616 phy_ctrl &= ~(PHY_CTRL_D0A_LPLU | 15617 PHY_CTRL_NOND0A_LPLU); 15618 15619 /* Set Auto Enable LPI after link up */ 15620 sc->phy.readreg_locked(dev, 2, 15621 I217_LPI_GPIO_CTRL, &phy_reg); 15622 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI; 15623 sc->phy.writereg_locked(dev, 2, 15624 I217_LPI_GPIO_CTRL, phy_reg); 15625 } 15626 } 15627 } 15628 15629 /* 15630 * For i217 Intel Rapid Start Technology support, 15631 * when the system is going into Sx and no manageability engine 15632 * is present, the driver must configure proxy to reset only on 15633 * power good. LPI (Low Power Idle) state must also reset only 15634 * on power good, as well as the MTA (Multicast table array). 15635 * The SMBus release must also be disabled on LCD reset. 15636 */ 15637 15638 /* 15639 * Enable MTA to reset for Intel Rapid Start Technology 15640 * Support 15641 */ 15642 15643 release: 15644 sc->phy.release(sc); 15645 } 15646 out: 15647 CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl); 15648 15649 if (sc->sc_type == WM_T_ICH8) 15650 wm_gig_downshift_workaround_ich8lan(sc); 15651 15652 if (sc->sc_type >= WM_T_PCH) { 15653 wm_oem_bits_config_ich8lan(sc, false); 15654 15655 /* Reset PHY to activate OEM bits on 82577/8 */ 15656 if (sc->sc_type == WM_T_PCH) 15657 wm_reset_phy(sc); 15658 15659 if (sc->phy.acquire(sc) != 0) 15660 return; 15661 wm_write_smbus_addr(sc); 15662 sc->phy.release(sc); 15663 } 15664 } 15665 15666 /* 15667 * wm_resume_workarounds_pchlan - workarounds needed during Sx->S0 15668 * @sc: pointer to the HW structure 15669 * 15670 * During Sx to S0 transitions on non-managed devices or managed devices 15671 * on which PHY resets are not blocked, if the PHY registers cannot be 15672 * accessed properly by the s/w toggle the LANPHYPC value to power cycle 15673 * the PHY. 15674 * On i217, setup Intel Rapid Start Technology. 15675 */ 15676 static int 15677 wm_resume_workarounds_pchlan(struct wm_softc *sc) 15678 { 15679 device_t dev = sc->sc_dev; 15680 int rv; 15681 15682 if (sc->sc_type < WM_T_PCH2) 15683 return 0; 15684 15685 rv = wm_init_phy_workarounds_pchlan(sc); 15686 if (rv != 0) 15687 return -1; 15688 15689 /* For i217 Intel Rapid Start Technology support when the system 15690 * is transitioning from Sx and no manageability engine is present 15691 * configure SMBus to restore on reset, disable proxy, and enable 15692 * the reset on MTA (Multicast table array). 15693 */ 15694 if (sc->sc_phytype == WMPHY_I217) { 15695 uint16_t phy_reg; 15696 15697 if (sc->phy.acquire(sc) != 0) 15698 return -1; 15699 15700 /* Clear Auto Enable LPI after link up */ 15701 sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg); 15702 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI; 15703 sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg); 15704 15705 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) { 15706 /* Restore clear on SMB if no manageability engine 15707 * is present 15708 */ 15709 rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR, 15710 &phy_reg); 15711 if (rv != 0) 15712 goto release; 15713 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE; 15714 sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg); 15715 15716 /* Disable Proxy */ 15717 sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0); 15718 } 15719 /* Enable reset on MTA */ 15720 sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg); 15721 if (rv != 0) 15722 goto release; 15723 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET; 15724 sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg); 15725 15726 release: 15727 sc->phy.release(sc); 15728 return rv; 15729 } 15730 15731 return 0; 15732 } 15733 15734 static void 15735 wm_enable_wakeup(struct wm_softc *sc) 15736 { 15737 uint32_t reg, pmreg; 15738 pcireg_t pmode; 15739 int rv = 0; 15740 15741 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 15742 device_xname(sc->sc_dev), __func__)); 15743 15744 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT, 15745 &pmreg, NULL) == 0) 15746 return; 15747 15748 if ((sc->sc_flags & WM_F_WOL) == 0) 15749 goto pme; 15750 15751 /* Advertise the wakeup capability */ 15752 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2) 15753 | CTRL_SWDPIN(3)); 15754 15755 /* Keep the laser running on fiber adapters */ 15756 if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER) 15757 || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) { 15758 reg = CSR_READ(sc, WMREG_CTRL_EXT); 15759 reg |= CTRL_EXT_SWDPIN(3); 15760 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 15761 } 15762 15763 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) || 15764 (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) || 15765 (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) || 15766 (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)) 15767 wm_suspend_workarounds_ich8lan(sc); 15768 15769 #if 0 /* For the multicast packet */ 15770 reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG; 15771 reg |= WUFC_MC; 15772 CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE); 15773 #endif 15774 15775 if (sc->sc_type >= WM_T_PCH) { 15776 rv = wm_enable_phy_wakeup(sc); 15777 if (rv != 0) 15778 goto pme; 15779 } else { 15780 /* Enable wakeup by the MAC */ 15781 CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN); 15782 CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG); 15783 } 15784 15785 if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) 15786 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) 15787 || (sc->sc_type == WM_T_PCH2)) 15788 && (sc->sc_phytype == WMPHY_IGP_3)) 15789 wm_igp3_phy_powerdown_workaround_ich8lan(sc); 15790 15791 pme: 15792 /* Request PME */ 15793 pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR); 15794 pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */ 15795 if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) { 15796 /* For WOL */ 15797 pmode |= PCI_PMCSR_PME_EN; 15798 } else { 15799 /* Disable WOL */ 15800 pmode &= ~PCI_PMCSR_PME_EN; 15801 } 15802 pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode); 15803 } 15804 15805 /* Disable ASPM L0s and/or L1 for workaround */ 15806 static void 15807 wm_disable_aspm(struct wm_softc *sc) 15808 { 15809 pcireg_t reg, mask = 0; 15810 unsigned const char *str = ""; 15811 15812 /* 15813 * Only for PCIe device which has PCIe capability in the PCI config 15814 * space. 15815 */ 15816 if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0)) 15817 return; 15818 15819 switch (sc->sc_type) { 15820 case WM_T_82571: 15821 case WM_T_82572: 15822 /* 15823 * 8257[12] Errata 13: Device Does Not Support PCIe Active 15824 * State Power management L1 State (ASPM L1). 15825 */ 15826 mask = PCIE_LCSR_ASPM_L1; 15827 str = "L1 is"; 15828 break; 15829 case WM_T_82573: 15830 case WM_T_82574: 15831 case WM_T_82583: 15832 /* 15833 * The 82573 disappears when PCIe ASPM L0s is enabled. 15834 * 15835 * The 82574 and 82583 does not support PCIe ASPM L0s with 15836 * some chipset. The document of 82574 and 82583 says that 15837 * disabling L0s with some specific chipset is sufficient, 15838 * but we follow as of the Intel em driver does. 15839 * 15840 * References: 15841 * Errata 8 of the Specification Update of i82573. 15842 * Errata 20 of the Specification Update of i82574. 15843 * Errata 9 of the Specification Update of i82583. 15844 */ 15845 mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S; 15846 str = "L0s and L1 are"; 15847 break; 15848 default: 15849 return; 15850 } 15851 15852 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 15853 sc->sc_pcixe_capoff + PCIE_LCSR); 15854 reg &= ~mask; 15855 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 15856 sc->sc_pcixe_capoff + PCIE_LCSR, reg); 15857 15858 /* Print only in wm_attach() */ 15859 if ((sc->sc_flags & WM_F_ATTACHED) == 0) 15860 aprint_verbose_dev(sc->sc_dev, 15861 "ASPM %s disabled to workaround the errata.\n", str); 15862 } 15863 15864 /* LPLU */ 15865 15866 static void 15867 wm_lplu_d0_disable(struct wm_softc *sc) 15868 { 15869 struct mii_data *mii = &sc->sc_mii; 15870 uint32_t reg; 15871 uint16_t phyval; 15872 15873 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 15874 device_xname(sc->sc_dev), __func__)); 15875 15876 if (sc->sc_phytype == WMPHY_IFE) 15877 return; 15878 15879 switch (sc->sc_type) { 15880 case WM_T_82571: 15881 case WM_T_82572: 15882 case WM_T_82573: 15883 case WM_T_82575: 15884 case WM_T_82576: 15885 mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval); 15886 phyval &= ~PMR_D0_LPLU; 15887 mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval); 15888 break; 15889 case WM_T_82580: 15890 case WM_T_I350: 15891 case WM_T_I210: 15892 case WM_T_I211: 15893 reg = CSR_READ(sc, WMREG_PHPM); 15894 reg &= ~PHPM_D0A_LPLU; 15895 CSR_WRITE(sc, WMREG_PHPM, reg); 15896 break; 15897 case WM_T_82574: 15898 case WM_T_82583: 15899 case WM_T_ICH8: 15900 case WM_T_ICH9: 15901 case WM_T_ICH10: 15902 reg = CSR_READ(sc, WMREG_PHY_CTRL); 15903 reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU); 15904 CSR_WRITE(sc, WMREG_PHY_CTRL, reg); 15905 CSR_WRITE_FLUSH(sc); 15906 break; 15907 case WM_T_PCH: 15908 case WM_T_PCH2: 15909 case WM_T_PCH_LPT: 15910 case WM_T_PCH_SPT: 15911 case WM_T_PCH_CNP: 15912 wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval); 15913 phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU); 15914 if (wm_phy_resetisblocked(sc) == false) 15915 phyval |= HV_OEM_BITS_ANEGNOW; 15916 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval); 15917 break; 15918 default: 15919 break; 15920 } 15921 } 15922 15923 /* EEE */ 15924 15925 static int 15926 wm_set_eee_i350(struct wm_softc *sc) 15927 { 15928 struct ethercom *ec = &sc->sc_ethercom; 15929 uint32_t ipcnfg, eeer; 15930 uint32_t ipcnfg_mask 15931 = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE; 15932 uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC; 15933 15934 KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER); 15935 15936 ipcnfg = CSR_READ(sc, WMREG_IPCNFG); 15937 eeer = CSR_READ(sc, WMREG_EEER); 15938 15939 /* Enable or disable per user setting */ 15940 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) { 15941 ipcnfg |= ipcnfg_mask; 15942 eeer |= eeer_mask; 15943 } else { 15944 ipcnfg &= ~ipcnfg_mask; 15945 eeer &= ~eeer_mask; 15946 } 15947 15948 CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg); 15949 CSR_WRITE(sc, WMREG_EEER, eeer); 15950 CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */ 15951 CSR_READ(sc, WMREG_EEER); /* XXX flush? */ 15952 15953 return 0; 15954 } 15955 15956 static int 15957 wm_set_eee_pchlan(struct wm_softc *sc) 15958 { 15959 device_t dev = sc->sc_dev; 15960 struct ethercom *ec = &sc->sc_ethercom; 15961 uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data; 15962 int rv = 0; 15963 15964 switch (sc->sc_phytype) { 15965 case WMPHY_82579: 15966 lpa = I82579_EEE_LP_ABILITY; 15967 pcs_status = I82579_EEE_PCS_STATUS; 15968 adv_addr = I82579_EEE_ADVERTISEMENT; 15969 break; 15970 case WMPHY_I217: 15971 lpa = I217_EEE_LP_ABILITY; 15972 pcs_status = I217_EEE_PCS_STATUS; 15973 adv_addr = I217_EEE_ADVERTISEMENT; 15974 break; 15975 default: 15976 return 0; 15977 } 15978 15979 if (sc->phy.acquire(sc)) { 15980 device_printf(dev, "%s: failed to get semaphore\n", __func__); 15981 return 0; 15982 } 15983 15984 rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl); 15985 if (rv != 0) 15986 goto release; 15987 15988 /* Clear bits that enable EEE in various speeds */ 15989 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE; 15990 15991 if ((ec->ec_capenable & ETHERCAP_EEE) != 0) { 15992 /* Save off link partner's EEE ability */ 15993 rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability); 15994 if (rv != 0) 15995 goto release; 15996 15997 /* Read EEE advertisement */ 15998 if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0) 15999 goto release; 16000 16001 /* 16002 * Enable EEE only for speeds in which the link partner is 16003 * EEE capable and for which we advertise EEE. 16004 */ 16005 if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T) 16006 lpi_ctrl |= I82579_LPI_CTRL_EN_1000; 16007 if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) { 16008 sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data); 16009 if ((data & ANLPAR_TX_FD) != 0) 16010 lpi_ctrl |= I82579_LPI_CTRL_EN_100; 16011 else { 16012 /* 16013 * EEE is not supported in 100Half, so ignore 16014 * partner's EEE in 100 ability if full-duplex 16015 * is not advertised. 16016 */ 16017 sc->eee_lp_ability 16018 &= ~AN_EEEADVERT_100_TX; 16019 } 16020 } 16021 } 16022 16023 if (sc->sc_phytype == WMPHY_82579) { 16024 rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data); 16025 if (rv != 0) 16026 goto release; 16027 16028 data &= ~I82579_LPI_PLL_SHUT_100; 16029 rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data); 16030 } 16031 16032 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */ 16033 if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0) 16034 goto release; 16035 16036 rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl); 16037 release: 16038 sc->phy.release(sc); 16039 16040 return rv; 16041 } 16042 16043 static int 16044 wm_set_eee(struct wm_softc *sc) 16045 { 16046 struct ethercom *ec = &sc->sc_ethercom; 16047 16048 if ((ec->ec_capabilities & ETHERCAP_EEE) == 0) 16049 return 0; 16050 16051 if (sc->sc_type == WM_T_I354) { 16052 /* I354 uses an external PHY */ 16053 return 0; /* not yet */ 16054 } else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)) 16055 return wm_set_eee_i350(sc); 16056 else if (sc->sc_type >= WM_T_PCH2) 16057 return wm_set_eee_pchlan(sc); 16058 16059 return 0; 16060 } 16061 16062 /* 16063 * Workarounds (mainly PHY related). 16064 * Basically, PHY's workarounds are in the PHY drivers. 16065 */ 16066 16067 /* Work-around for 82566 Kumeran PCS lock loss */ 16068 static int 16069 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc) 16070 { 16071 struct mii_data *mii = &sc->sc_mii; 16072 uint32_t status = CSR_READ(sc, WMREG_STATUS); 16073 int i, reg, rv; 16074 uint16_t phyreg; 16075 16076 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 16077 device_xname(sc->sc_dev), __func__)); 16078 16079 /* If the link is not up, do nothing */ 16080 if ((status & STATUS_LU) == 0) 16081 return 0; 16082 16083 /* Nothing to do if the link is other than 1Gbps */ 16084 if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000) 16085 return 0; 16086 16087 for (i = 0; i < 10; i++) { 16088 /* read twice */ 16089 rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg); 16090 if (rv != 0) 16091 return rv; 16092 rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg); 16093 if (rv != 0) 16094 return rv; 16095 16096 if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0) 16097 goto out; /* GOOD! */ 16098 16099 /* Reset the PHY */ 16100 wm_reset_phy(sc); 16101 delay(5*1000); 16102 } 16103 16104 /* Disable GigE link negotiation */ 16105 reg = CSR_READ(sc, WMREG_PHY_CTRL); 16106 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS; 16107 CSR_WRITE(sc, WMREG_PHY_CTRL, reg); 16108 16109 /* 16110 * Call gig speed drop workaround on Gig disable before accessing 16111 * any PHY registers. 16112 */ 16113 wm_gig_downshift_workaround_ich8lan(sc); 16114 16115 out: 16116 return 0; 16117 } 16118 16119 /* 16120 * wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working 16121 * @sc: pointer to the HW structure 16122 * 16123 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), 16124 * LPLU, Gig disable, MDIC PHY reset): 16125 * 1) Set Kumeran Near-end loopback 16126 * 2) Clear Kumeran Near-end loopback 16127 * Should only be called for ICH8[m] devices with any 1G Phy. 16128 */ 16129 static void 16130 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc) 16131 { 16132 uint16_t kmreg; 16133 16134 /* Only for igp3 */ 16135 if (sc->sc_phytype == WMPHY_IGP_3) { 16136 if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0) 16137 return; 16138 kmreg |= KUMCTRLSTA_DIAG_NELPBK; 16139 if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0) 16140 return; 16141 kmreg &= ~KUMCTRLSTA_DIAG_NELPBK; 16142 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg); 16143 } 16144 } 16145 16146 /* 16147 * Workaround for pch's PHYs 16148 * XXX should be moved to new PHY driver? 16149 */ 16150 static int 16151 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc) 16152 { 16153 device_t dev = sc->sc_dev; 16154 struct mii_data *mii = &sc->sc_mii; 16155 struct mii_softc *child; 16156 uint16_t phy_data, phyrev = 0; 16157 int phytype = sc->sc_phytype; 16158 int rv; 16159 16160 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 16161 device_xname(dev), __func__)); 16162 KASSERT(sc->sc_type == WM_T_PCH); 16163 16164 /* Set MDIO slow mode before any other MDIO access */ 16165 if (phytype == WMPHY_82577) 16166 if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0) 16167 return rv; 16168 16169 child = LIST_FIRST(&mii->mii_phys); 16170 if (child != NULL) 16171 phyrev = child->mii_mpd_rev; 16172 16173 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/ 16174 if ((child != NULL) && 16175 (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) || 16176 ((phytype == WMPHY_82578) && (phyrev == 1)))) { 16177 /* Disable generation of early preamble (0x4431) */ 16178 rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL, 16179 &phy_data); 16180 if (rv != 0) 16181 return rv; 16182 phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE | 16183 BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE); 16184 rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL, 16185 phy_data); 16186 if (rv != 0) 16187 return rv; 16188 16189 /* Preamble tuning for SSC */ 16190 rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204); 16191 if (rv != 0) 16192 return rv; 16193 } 16194 16195 /* 82578 */ 16196 if (phytype == WMPHY_82578) { 16197 /* 16198 * Return registers to default by doing a soft reset then 16199 * writing 0x3140 to the control register 16200 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 16201 */ 16202 if ((child != NULL) && (phyrev < 2)) { 16203 PHY_RESET(child); 16204 rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140); 16205 if (rv != 0) 16206 return rv; 16207 } 16208 } 16209 16210 /* Select page 0 */ 16211 if ((rv = sc->phy.acquire(sc)) != 0) 16212 return rv; 16213 rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0); 16214 sc->phy.release(sc); 16215 if (rv != 0) 16216 return rv; 16217 16218 /* 16219 * Configure the K1 Si workaround during phy reset assuming there is 16220 * link so that it disables K1 if link is in 1Gbps. 16221 */ 16222 if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0) 16223 return rv; 16224 16225 /* Workaround for link disconnects on a busy hub in half duplex */ 16226 rv = sc->phy.acquire(sc); 16227 if (rv) 16228 return rv; 16229 rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data); 16230 if (rv) 16231 goto release; 16232 rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG, 16233 phy_data & 0x00ff); 16234 if (rv) 16235 goto release; 16236 16237 /* Set MSE higher to enable link to stay up when noise is high */ 16238 rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034); 16239 release: 16240 sc->phy.release(sc); 16241 16242 return rv; 16243 } 16244 16245 /* 16246 * wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY 16247 * @sc: pointer to the HW structure 16248 */ 16249 static void 16250 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc) 16251 { 16252 16253 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 16254 device_xname(sc->sc_dev), __func__)); 16255 16256 if (sc->phy.acquire(sc) != 0) 16257 return; 16258 16259 wm_copy_rx_addrs_to_phy_ich8lan_locked(sc); 16260 16261 sc->phy.release(sc); 16262 } 16263 16264 static void 16265 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc) 16266 { 16267 device_t dev = sc->sc_dev; 16268 uint32_t mac_reg; 16269 uint16_t i, wuce; 16270 int count; 16271 16272 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 16273 device_xname(dev), __func__)); 16274 16275 if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0) 16276 return; 16277 16278 /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */ 16279 count = wm_rar_count(sc); 16280 for (i = 0; i < count; i++) { 16281 uint16_t lo, hi; 16282 mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i)); 16283 lo = (uint16_t)(mac_reg & 0xffff); 16284 hi = (uint16_t)((mac_reg >> 16) & 0xffff); 16285 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true); 16286 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true); 16287 16288 mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i)); 16289 lo = (uint16_t)(mac_reg & 0xffff); 16290 hi = (uint16_t)((mac_reg & RAL_AV) >> 16); 16291 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true); 16292 wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true); 16293 } 16294 16295 wm_disable_phy_wakeup_reg_access_bm(dev, &wuce); 16296 } 16297 16298 /* 16299 * wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation 16300 * with 82579 PHY 16301 * @enable: flag to enable/disable workaround when enabling/disabling jumbos 16302 */ 16303 static int 16304 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable) 16305 { 16306 device_t dev = sc->sc_dev; 16307 int rar_count; 16308 int rv; 16309 uint32_t mac_reg; 16310 uint16_t dft_ctrl, data; 16311 uint16_t i; 16312 16313 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 16314 device_xname(dev), __func__)); 16315 16316 if (sc->sc_type < WM_T_PCH2) 16317 return 0; 16318 16319 /* Acquire PHY semaphore */ 16320 rv = sc->phy.acquire(sc); 16321 if (rv != 0) 16322 return rv; 16323 16324 /* Disable Rx path while enabling/disabling workaround */ 16325 rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl); 16326 if (rv != 0) 16327 goto out; 16328 rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL, 16329 dft_ctrl | (1 << 14)); 16330 if (rv != 0) 16331 goto out; 16332 16333 if (enable) { 16334 /* Write Rx addresses (rar_entry_count for RAL/H, and 16335 * SHRAL/H) and initial CRC values to the MAC 16336 */ 16337 rar_count = wm_rar_count(sc); 16338 for (i = 0; i < rar_count; i++) { 16339 uint8_t mac_addr[ETHER_ADDR_LEN] = {0}; 16340 uint32_t addr_high, addr_low; 16341 16342 addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i)); 16343 if (!(addr_high & RAL_AV)) 16344 continue; 16345 addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i)); 16346 mac_addr[0] = (addr_low & 0xFF); 16347 mac_addr[1] = ((addr_low >> 8) & 0xFF); 16348 mac_addr[2] = ((addr_low >> 16) & 0xFF); 16349 mac_addr[3] = ((addr_low >> 24) & 0xFF); 16350 mac_addr[4] = (addr_high & 0xFF); 16351 mac_addr[5] = ((addr_high >> 8) & 0xFF); 16352 16353 CSR_WRITE(sc, WMREG_PCH_RAICC(i), 16354 ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN)); 16355 } 16356 16357 /* Write Rx addresses to the PHY */ 16358 wm_copy_rx_addrs_to_phy_ich8lan_locked(sc); 16359 } 16360 16361 /* 16362 * If enable == 16363 * true: Enable jumbo frame workaround in the MAC. 16364 * false: Write MAC register values back to h/w defaults. 16365 */ 16366 mac_reg = CSR_READ(sc, WMREG_FFLT_DBG); 16367 if (enable) { 16368 mac_reg &= ~(1 << 14); 16369 mac_reg |= (7 << 15); 16370 } else 16371 mac_reg &= ~(0xf << 14); 16372 CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg); 16373 16374 mac_reg = CSR_READ(sc, WMREG_RCTL); 16375 if (enable) { 16376 mac_reg |= RCTL_SECRC; 16377 sc->sc_rctl |= RCTL_SECRC; 16378 sc->sc_flags |= WM_F_CRC_STRIP; 16379 } else { 16380 mac_reg &= ~RCTL_SECRC; 16381 sc->sc_rctl &= ~RCTL_SECRC; 16382 sc->sc_flags &= ~WM_F_CRC_STRIP; 16383 } 16384 CSR_WRITE(sc, WMREG_RCTL, mac_reg); 16385 16386 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data); 16387 if (rv != 0) 16388 goto out; 16389 if (enable) 16390 data |= 1 << 0; 16391 else 16392 data &= ~(1 << 0); 16393 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data); 16394 if (rv != 0) 16395 goto out; 16396 16397 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data); 16398 if (rv != 0) 16399 goto out; 16400 /* 16401 * XXX FreeBSD and Linux do the same thing that they set the same value 16402 * on both the enable case and the disable case. Is it correct? 16403 */ 16404 data &= ~(0xf << 8); 16405 data |= (0xb << 8); 16406 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data); 16407 if (rv != 0) 16408 goto out; 16409 16410 /* 16411 * If enable == 16412 * true: Enable jumbo frame workaround in the PHY. 16413 * false: Write PHY register values back to h/w defaults. 16414 */ 16415 rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data); 16416 if (rv != 0) 16417 goto out; 16418 data &= ~(0x7F << 5); 16419 if (enable) 16420 data |= (0x37 << 5); 16421 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data); 16422 if (rv != 0) 16423 goto out; 16424 16425 rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data); 16426 if (rv != 0) 16427 goto out; 16428 if (enable) 16429 data &= ~(1 << 13); 16430 else 16431 data |= (1 << 13); 16432 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data); 16433 if (rv != 0) 16434 goto out; 16435 16436 rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data); 16437 if (rv != 0) 16438 goto out; 16439 data &= ~(0x3FF << 2); 16440 if (enable) 16441 data |= (I82579_TX_PTR_GAP << 2); 16442 else 16443 data |= (0x8 << 2); 16444 rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data); 16445 if (rv != 0) 16446 goto out; 16447 16448 rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23), 16449 enable ? 0xf100 : 0x7e00); 16450 if (rv != 0) 16451 goto out; 16452 16453 rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data); 16454 if (rv != 0) 16455 goto out; 16456 if (enable) 16457 data |= 1 << 10; 16458 else 16459 data &= ~(1 << 10); 16460 rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data); 16461 if (rv != 0) 16462 goto out; 16463 16464 /* Re-enable Rx path after enabling/disabling workaround */ 16465 rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL, 16466 dft_ctrl & ~(1 << 14)); 16467 16468 out: 16469 sc->phy.release(sc); 16470 16471 return rv; 16472 } 16473 16474 /* 16475 * wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be 16476 * done after every PHY reset. 16477 */ 16478 static int 16479 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc) 16480 { 16481 device_t dev = sc->sc_dev; 16482 int rv; 16483 16484 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 16485 device_xname(dev), __func__)); 16486 KASSERT(sc->sc_type == WM_T_PCH2); 16487 16488 /* Set MDIO slow mode before any other MDIO access */ 16489 rv = wm_set_mdio_slow_mode_hv(sc); 16490 if (rv != 0) 16491 return rv; 16492 16493 rv = sc->phy.acquire(sc); 16494 if (rv != 0) 16495 return rv; 16496 /* Set MSE higher to enable link to stay up when noise is high */ 16497 rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034); 16498 if (rv != 0) 16499 goto release; 16500 /* Drop link after 5 times MSE threshold was reached */ 16501 rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005); 16502 release: 16503 sc->phy.release(sc); 16504 16505 return rv; 16506 } 16507 16508 /** 16509 * wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP 16510 * @link: link up bool flag 16511 * 16512 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications 16513 * preventing further DMA write requests. Workaround the issue by disabling 16514 * the de-assertion of the clock request when in 1Gpbs mode. 16515 * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link 16516 * speeds in order to avoid Tx hangs. 16517 **/ 16518 static int 16519 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link) 16520 { 16521 uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6); 16522 uint32_t status = CSR_READ(sc, WMREG_STATUS); 16523 uint32_t speed = __SHIFTOUT(status, STATUS_SPEED); 16524 uint16_t phyreg; 16525 16526 if (link && (speed == STATUS_SPEED_1000)) { 16527 sc->phy.acquire(sc); 16528 int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, 16529 &phyreg); 16530 if (rv != 0) 16531 goto release; 16532 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, 16533 phyreg & ~KUMCTRLSTA_K1_ENABLE); 16534 if (rv != 0) 16535 goto release; 16536 delay(20); 16537 CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK); 16538 16539 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, 16540 &phyreg); 16541 release: 16542 sc->phy.release(sc); 16543 return rv; 16544 } 16545 16546 fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK; 16547 16548 struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys); 16549 if (((child != NULL) && (child->mii_mpd_rev > 5)) 16550 || !link 16551 || ((speed == STATUS_SPEED_100) && (status & STATUS_FD))) 16552 goto update_fextnvm6; 16553 16554 wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg); 16555 16556 /* Clear link status transmit timeout */ 16557 phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK; 16558 if (speed == STATUS_SPEED_100) { 16559 /* Set inband Tx timeout to 5x10us for 100Half */ 16560 phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; 16561 16562 /* Do not extend the K1 entry latency for 100Half */ 16563 fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; 16564 } else { 16565 /* Set inband Tx timeout to 50x10us for 10Full/Half */ 16566 phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; 16567 16568 /* Extend the K1 entry latency for 10 Mbps */ 16569 fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; 16570 } 16571 16572 wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg); 16573 16574 update_fextnvm6: 16575 CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6); 16576 return 0; 16577 } 16578 16579 /* 16580 * wm_k1_gig_workaround_hv - K1 Si workaround 16581 * @sc: pointer to the HW structure 16582 * @link: link up bool flag 16583 * 16584 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning 16585 * from a lower speed. This workaround disables K1 whenever link is at 1Gig 16586 * If link is down, the function will restore the default K1 setting located 16587 * in the NVM. 16588 */ 16589 static int 16590 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link) 16591 { 16592 int k1_enable = sc->sc_nvm_k1_enabled; 16593 16594 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 16595 device_xname(sc->sc_dev), __func__)); 16596 16597 if (sc->phy.acquire(sc) != 0) 16598 return -1; 16599 16600 if (link) { 16601 k1_enable = 0; 16602 16603 /* Link stall fix for link up */ 16604 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 16605 0x0100); 16606 } else { 16607 /* Link stall fix for link down */ 16608 wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 16609 0x4100); 16610 } 16611 16612 wm_configure_k1_ich8lan(sc, k1_enable); 16613 sc->phy.release(sc); 16614 16615 return 0; 16616 } 16617 16618 /* 16619 * wm_k1_workaround_lv - K1 Si workaround 16620 * @sc: pointer to the HW structure 16621 * 16622 * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps 16623 * Disable K1 for 1000 and 100 speeds 16624 */ 16625 static int 16626 wm_k1_workaround_lv(struct wm_softc *sc) 16627 { 16628 uint32_t reg; 16629 uint16_t phyreg; 16630 int rv; 16631 16632 if (sc->sc_type != WM_T_PCH2) 16633 return 0; 16634 16635 /* Set K1 beacon duration based on 10Mbps speed */ 16636 rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg); 16637 if (rv != 0) 16638 return rv; 16639 16640 if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) 16641 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { 16642 if (phyreg & 16643 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) { 16644 /* LV 1G/100 Packet drop issue wa */ 16645 rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL, 16646 &phyreg); 16647 if (rv != 0) 16648 return rv; 16649 phyreg &= ~HV_PM_CTRL_K1_ENA; 16650 rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL, 16651 phyreg); 16652 if (rv != 0) 16653 return rv; 16654 } else { 16655 /* For 10Mbps */ 16656 reg = CSR_READ(sc, WMREG_FEXTNVM4); 16657 reg &= ~FEXTNVM4_BEACON_DURATION; 16658 reg |= FEXTNVM4_BEACON_DURATION_16US; 16659 CSR_WRITE(sc, WMREG_FEXTNVM4, reg); 16660 } 16661 } 16662 16663 return 0; 16664 } 16665 16666 /* 16667 * wm_link_stall_workaround_hv - Si workaround 16668 * @sc: pointer to the HW structure 16669 * 16670 * This function works around a Si bug where the link partner can get 16671 * a link up indication before the PHY does. If small packets are sent 16672 * by the link partner they can be placed in the packet buffer without 16673 * being properly accounted for by the PHY and will stall preventing 16674 * further packets from being received. The workaround is to clear the 16675 * packet buffer after the PHY detects link up. 16676 */ 16677 static int 16678 wm_link_stall_workaround_hv(struct wm_softc *sc) 16679 { 16680 uint16_t phyreg; 16681 16682 if (sc->sc_phytype != WMPHY_82578) 16683 return 0; 16684 16685 /* Do not apply workaround if in PHY loopback bit 14 set */ 16686 wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg); 16687 if ((phyreg & BMCR_LOOP) != 0) 16688 return 0; 16689 16690 /* Check if link is up and at 1Gbps */ 16691 wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg); 16692 phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED 16693 | BM_CS_STATUS_SPEED_MASK; 16694 if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED 16695 | BM_CS_STATUS_SPEED_1000)) 16696 return 0; 16697 16698 delay(200 * 1000); /* XXX too big */ 16699 16700 /* Flush the packets in the fifo buffer */ 16701 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL, 16702 HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED); 16703 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL, 16704 HV_MUX_DATA_CTRL_GEN_TO_MAC); 16705 16706 return 0; 16707 } 16708 16709 static int 16710 wm_set_mdio_slow_mode_hv(struct wm_softc *sc) 16711 { 16712 int rv; 16713 uint16_t reg; 16714 16715 rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, ®); 16716 if (rv != 0) 16717 return rv; 16718 16719 return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, 16720 reg | HV_KMRN_MDIO_SLOW); 16721 } 16722 16723 /* 16724 * wm_configure_k1_ich8lan - Configure K1 power state 16725 * @sc: pointer to the HW structure 16726 * @enable: K1 state to configure 16727 * 16728 * Configure the K1 power state based on the provided parameter. 16729 * Assumes semaphore already acquired. 16730 */ 16731 static void 16732 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable) 16733 { 16734 uint32_t ctrl, ctrl_ext, tmp; 16735 uint16_t kmreg; 16736 int rv; 16737 16738 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP); 16739 16740 rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg); 16741 if (rv != 0) 16742 return; 16743 16744 if (k1_enable) 16745 kmreg |= KUMCTRLSTA_K1_ENABLE; 16746 else 16747 kmreg &= ~KUMCTRLSTA_K1_ENABLE; 16748 16749 rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg); 16750 if (rv != 0) 16751 return; 16752 16753 delay(20); 16754 16755 ctrl = CSR_READ(sc, WMREG_CTRL); 16756 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT); 16757 16758 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100); 16759 tmp |= CTRL_FRCSPD; 16760 16761 CSR_WRITE(sc, WMREG_CTRL, tmp); 16762 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS); 16763 CSR_WRITE_FLUSH(sc); 16764 delay(20); 16765 16766 CSR_WRITE(sc, WMREG_CTRL, ctrl); 16767 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext); 16768 CSR_WRITE_FLUSH(sc); 16769 delay(20); 16770 16771 return; 16772 } 16773 16774 /* special case - for 82575 - need to do manual init ... */ 16775 static void 16776 wm_reset_init_script_82575(struct wm_softc *sc) 16777 { 16778 /* 16779 * Remark: this is untested code - we have no board without EEPROM 16780 * same setup as mentioned int the FreeBSD driver for the i82575 16781 */ 16782 16783 /* SerDes configuration via SERDESCTRL */ 16784 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c); 16785 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78); 16786 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23); 16787 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15); 16788 16789 /* CCM configuration via CCMCTL register */ 16790 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00); 16791 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00); 16792 16793 /* PCIe lanes configuration */ 16794 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec); 16795 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf); 16796 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05); 16797 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81); 16798 16799 /* PCIe PLL Configuration */ 16800 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47); 16801 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00); 16802 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00); 16803 } 16804 16805 static void 16806 wm_reset_mdicnfg_82580(struct wm_softc *sc) 16807 { 16808 uint32_t reg; 16809 uint16_t nvmword; 16810 int rv; 16811 16812 if (sc->sc_type != WM_T_82580) 16813 return; 16814 if ((sc->sc_flags & WM_F_SGMII) == 0) 16815 return; 16816 16817 rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) 16818 + NVM_OFF_CFG3_PORTA, 1, &nvmword); 16819 if (rv != 0) { 16820 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n", 16821 __func__); 16822 return; 16823 } 16824 16825 reg = CSR_READ(sc, WMREG_MDICNFG); 16826 if (nvmword & NVM_CFG3_PORTA_EXT_MDIO) 16827 reg |= MDICNFG_DEST; 16828 if (nvmword & NVM_CFG3_PORTA_COM_MDIO) 16829 reg |= MDICNFG_COM_MDIO; 16830 CSR_WRITE(sc, WMREG_MDICNFG, reg); 16831 } 16832 16833 #define MII_INVALIDID(x) (((x) == 0x0000) || ((x) == 0xffff)) 16834 16835 static bool 16836 wm_phy_is_accessible_pchlan(struct wm_softc *sc) 16837 { 16838 uint32_t reg; 16839 uint16_t id1, id2; 16840 int i, rv; 16841 16842 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 16843 device_xname(sc->sc_dev), __func__)); 16844 KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP); 16845 16846 id1 = id2 = 0xffff; 16847 for (i = 0; i < 2; i++) { 16848 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1, 16849 &id1); 16850 if ((rv != 0) || MII_INVALIDID(id1)) 16851 continue; 16852 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2, 16853 &id2); 16854 if ((rv != 0) || MII_INVALIDID(id2)) 16855 continue; 16856 break; 16857 } 16858 if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2)) 16859 goto out; 16860 16861 /* 16862 * In case the PHY needs to be in mdio slow mode, 16863 * set slow mode and try to get the PHY id again. 16864 */ 16865 rv = 0; 16866 if (sc->sc_type < WM_T_PCH_LPT) { 16867 sc->phy.release(sc); 16868 wm_set_mdio_slow_mode_hv(sc); 16869 rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1); 16870 rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2); 16871 sc->phy.acquire(sc); 16872 } 16873 if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) { 16874 device_printf(sc->sc_dev, "XXX return with false\n"); 16875 return false; 16876 } 16877 out: 16878 if (sc->sc_type >= WM_T_PCH_LPT) { 16879 /* Only unforce SMBus if ME is not active */ 16880 if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) { 16881 uint16_t phyreg; 16882 16883 /* Unforce SMBus mode in PHY */ 16884 rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, 16885 CV_SMB_CTRL, &phyreg); 16886 phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS; 16887 wm_gmii_hv_writereg_locked(sc->sc_dev, 2, 16888 CV_SMB_CTRL, phyreg); 16889 16890 /* Unforce SMBus mode in MAC */ 16891 reg = CSR_READ(sc, WMREG_CTRL_EXT); 16892 reg &= ~CTRL_EXT_FORCE_SMBUS; 16893 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 16894 } 16895 } 16896 return true; 16897 } 16898 16899 static void 16900 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc) 16901 { 16902 uint32_t reg; 16903 int i; 16904 16905 /* Set PHY Config Counter to 50msec */ 16906 reg = CSR_READ(sc, WMREG_FEXTNVM3); 16907 reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK; 16908 reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS; 16909 CSR_WRITE(sc, WMREG_FEXTNVM3, reg); 16910 16911 /* Toggle LANPHYPC */ 16912 reg = CSR_READ(sc, WMREG_CTRL); 16913 reg |= CTRL_LANPHYPC_OVERRIDE; 16914 reg &= ~CTRL_LANPHYPC_VALUE; 16915 CSR_WRITE(sc, WMREG_CTRL, reg); 16916 CSR_WRITE_FLUSH(sc); 16917 delay(1000); 16918 reg &= ~CTRL_LANPHYPC_OVERRIDE; 16919 CSR_WRITE(sc, WMREG_CTRL, reg); 16920 CSR_WRITE_FLUSH(sc); 16921 16922 if (sc->sc_type < WM_T_PCH_LPT) 16923 delay(50 * 1000); 16924 else { 16925 i = 20; 16926 16927 do { 16928 delay(5 * 1000); 16929 } while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0) 16930 && i--); 16931 16932 delay(30 * 1000); 16933 } 16934 } 16935 16936 static int 16937 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link) 16938 { 16939 uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ) 16940 | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND; 16941 uint32_t rxa; 16942 uint16_t scale = 0, lat_enc = 0; 16943 int32_t obff_hwm = 0; 16944 int64_t lat_ns, value; 16945 16946 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 16947 device_xname(sc->sc_dev), __func__)); 16948 16949 if (link) { 16950 uint16_t max_snoop, max_nosnoop, max_ltr_enc; 16951 uint32_t status; 16952 uint16_t speed; 16953 pcireg_t preg; 16954 16955 status = CSR_READ(sc, WMREG_STATUS); 16956 switch (__SHIFTOUT(status, STATUS_SPEED)) { 16957 case STATUS_SPEED_10: 16958 speed = 10; 16959 break; 16960 case STATUS_SPEED_100: 16961 speed = 100; 16962 break; 16963 case STATUS_SPEED_1000: 16964 speed = 1000; 16965 break; 16966 default: 16967 device_printf(sc->sc_dev, "Unknown speed " 16968 "(status = %08x)\n", status); 16969 return -1; 16970 } 16971 16972 /* Rx Packet Buffer Allocation size (KB) */ 16973 rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK; 16974 16975 /* 16976 * Determine the maximum latency tolerated by the device. 16977 * 16978 * Per the PCIe spec, the tolerated latencies are encoded as 16979 * a 3-bit encoded scale (only 0-5 are valid) multiplied by 16980 * a 10-bit value (0-1023) to provide a range from 1 ns to 16981 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns, 16982 * 1=2^5ns, 2=2^10ns,...5=2^25ns. 16983 */ 16984 lat_ns = ((int64_t)rxa * 1024 - 16985 (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu 16986 + ETHER_HDR_LEN))) * 8 * 1000; 16987 if (lat_ns < 0) 16988 lat_ns = 0; 16989 else 16990 lat_ns /= speed; 16991 value = lat_ns; 16992 16993 while (value > LTRV_VALUE) { 16994 scale ++; 16995 value = howmany(value, __BIT(5)); 16996 } 16997 if (scale > LTRV_SCALE_MAX) { 16998 device_printf(sc->sc_dev, 16999 "Invalid LTR latency scale %d\n", scale); 17000 return -1; 17001 } 17002 lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value); 17003 17004 /* Determine the maximum latency tolerated by the platform */ 17005 preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 17006 WM_PCI_LTR_CAP_LPT); 17007 max_snoop = preg & 0xffff; 17008 max_nosnoop = preg >> 16; 17009 17010 max_ltr_enc = MAX(max_snoop, max_nosnoop); 17011 17012 if (lat_enc > max_ltr_enc) { 17013 lat_enc = max_ltr_enc; 17014 lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL) 17015 * PCI_LTR_SCALETONS( 17016 __SHIFTOUT(lat_enc, 17017 PCI_LTR_MAXSNOOPLAT_SCALE)); 17018 } 17019 17020 if (lat_ns) { 17021 lat_ns *= speed * 1000; 17022 lat_ns /= 8; 17023 lat_ns /= 1000000000; 17024 obff_hwm = (int32_t)(rxa - lat_ns); 17025 } 17026 if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) { 17027 device_printf(sc->sc_dev, "Invalid high water mark %d" 17028 "(rxa = %d, lat_ns = %d)\n", 17029 obff_hwm, (int32_t)rxa, (int32_t)lat_ns); 17030 return -1; 17031 } 17032 } 17033 /* Snoop and No-Snoop latencies the same */ 17034 reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP); 17035 CSR_WRITE(sc, WMREG_LTRV, reg); 17036 17037 /* Set OBFF high water mark */ 17038 reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM; 17039 reg |= obff_hwm; 17040 CSR_WRITE(sc, WMREG_SVT, reg); 17041 17042 /* Enable OBFF */ 17043 reg = CSR_READ(sc, WMREG_SVCR); 17044 reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT; 17045 CSR_WRITE(sc, WMREG_SVCR, reg); 17046 17047 return 0; 17048 } 17049 17050 /* 17051 * I210 Errata 25 and I211 Errata 10 17052 * Slow System Clock. 17053 * 17054 * Note that this function is called on both FLASH and iNVM case on NetBSD. 17055 */ 17056 static int 17057 wm_pll_workaround_i210(struct wm_softc *sc) 17058 { 17059 uint32_t mdicnfg, wuc; 17060 uint32_t reg; 17061 pcireg_t pcireg; 17062 uint32_t pmreg; 17063 uint16_t nvmword, tmp_nvmword; 17064 uint16_t phyval; 17065 bool wa_done = false; 17066 int i, rv = 0; 17067 17068 /* Get Power Management cap offset */ 17069 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT, 17070 &pmreg, NULL) == 0) 17071 return -1; 17072 17073 /* Save WUC and MDICNFG registers */ 17074 wuc = CSR_READ(sc, WMREG_WUC); 17075 mdicnfg = CSR_READ(sc, WMREG_MDICNFG); 17076 17077 reg = mdicnfg & ~MDICNFG_DEST; 17078 CSR_WRITE(sc, WMREG_MDICNFG, reg); 17079 17080 if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) { 17081 /* 17082 * The default value of the Initialization Control Word 1 17083 * is the same on both I210's FLASH_HW and I21[01]'s iNVM. 17084 */ 17085 nvmword = INVM_DEFAULT_AL; 17086 } 17087 tmp_nvmword = nvmword | INVM_PLL_WO_VAL; 17088 17089 for (i = 0; i < WM_MAX_PLL_TRIES; i++) { 17090 wm_gmii_gs40g_readreg(sc->sc_dev, 1, 17091 GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval); 17092 17093 if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) { 17094 rv = 0; 17095 break; /* OK */ 17096 } else 17097 rv = -1; 17098 17099 wa_done = true; 17100 /* Directly reset the internal PHY */ 17101 reg = CSR_READ(sc, WMREG_CTRL); 17102 CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET); 17103 17104 reg = CSR_READ(sc, WMREG_CTRL_EXT); 17105 reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE; 17106 CSR_WRITE(sc, WMREG_CTRL_EXT, reg); 17107 17108 CSR_WRITE(sc, WMREG_WUC, 0); 17109 reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16); 17110 CSR_WRITE(sc, WMREG_EEARBC_I210, reg); 17111 17112 pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 17113 pmreg + PCI_PMCSR); 17114 pcireg |= PCI_PMCSR_STATE_D3; 17115 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 17116 pmreg + PCI_PMCSR, pcireg); 17117 delay(1000); 17118 pcireg &= ~PCI_PMCSR_STATE_D3; 17119 pci_conf_write(sc->sc_pc, sc->sc_pcitag, 17120 pmreg + PCI_PMCSR, pcireg); 17121 17122 reg = (INVM_AUTOLOAD << 4) | (nvmword << 16); 17123 CSR_WRITE(sc, WMREG_EEARBC_I210, reg); 17124 17125 /* Restore WUC register */ 17126 CSR_WRITE(sc, WMREG_WUC, wuc); 17127 } 17128 17129 /* Restore MDICNFG setting */ 17130 CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg); 17131 if (wa_done) 17132 aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n"); 17133 return rv; 17134 } 17135 17136 static void 17137 wm_legacy_irq_quirk_spt(struct wm_softc *sc) 17138 { 17139 uint32_t reg; 17140 17141 DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n", 17142 device_xname(sc->sc_dev), __func__)); 17143 KASSERT((sc->sc_type == WM_T_PCH_SPT) 17144 || (sc->sc_type == WM_T_PCH_CNP)); 17145 17146 reg = CSR_READ(sc, WMREG_FEXTNVM7); 17147 reg |= FEXTNVM7_SIDE_CLK_UNGATE; 17148 CSR_WRITE(sc, WMREG_FEXTNVM7, reg); 17149 17150 reg = CSR_READ(sc, WMREG_FEXTNVM9); 17151 reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS; 17152 CSR_WRITE(sc, WMREG_FEXTNVM9, reg); 17153 } 17154 17155 /* Sysctl functions */ 17156 static int 17157 wm_sysctl_tdh_handler(SYSCTLFN_ARGS) 17158 { 17159 struct sysctlnode node = *rnode; 17160 struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data; 17161 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq); 17162 struct wm_softc *sc = txq->txq_sc; 17163 uint32_t reg; 17164 17165 reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id)); 17166 node.sysctl_data = ® 17167 return sysctl_lookup(SYSCTLFN_CALL(&node)); 17168 } 17169 17170 static int 17171 wm_sysctl_tdt_handler(SYSCTLFN_ARGS) 17172 { 17173 struct sysctlnode node = *rnode; 17174 struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data; 17175 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq); 17176 struct wm_softc *sc = txq->txq_sc; 17177 uint32_t reg; 17178 17179 reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id)); 17180 node.sysctl_data = ® 17181 return sysctl_lookup(SYSCTLFN_CALL(&node)); 17182 } 17183 17184 #ifdef WM_DEBUG 17185 static int 17186 wm_sysctl_debug(SYSCTLFN_ARGS) 17187 { 17188 struct sysctlnode node = *rnode; 17189 struct wm_softc *sc = (struct wm_softc *)node.sysctl_data; 17190 uint32_t dflags; 17191 int error; 17192 17193 dflags = sc->sc_debug; 17194 node.sysctl_data = &dflags; 17195 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 17196 17197 if (error || newp == NULL) 17198 return error; 17199 17200 sc->sc_debug = dflags; 17201 device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0)); 17202 device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0))); 17203 17204 return 0; 17205 } 17206 #endif 17207