xref: /netbsd-src/sys/dev/pci/if_wm.c (revision a6f3f22f245acb8ee3bbf6871d7dce989204fa97)
1 /*	$NetBSD: if_wm.c,v 1.374 2015/10/22 09:51:21 knakahara Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Check XXX'ed comments
76  *	- LPLU other than PCH*
77  *	- TX Multi queue
78  *	- EEE (Energy Efficiency Ethernet)
79  *	- Virtual Function
80  *	- Set LED correctly (based on contents in EEPROM)
81  *	- Rework how parameters are loaded from the EEPROM.
82  *	- Image Unique ID
83  */
84 
85 #include <sys/cdefs.h>
86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.374 2015/10/22 09:51:21 knakahara Exp $");
87 
88 #ifdef _KERNEL_OPT
89 #include "opt_net_mpsafe.h"
90 #endif
91 
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/callout.h>
95 #include <sys/mbuf.h>
96 #include <sys/malloc.h>
97 #include <sys/kmem.h>
98 #include <sys/kernel.h>
99 #include <sys/socket.h>
100 #include <sys/ioctl.h>
101 #include <sys/errno.h>
102 #include <sys/device.h>
103 #include <sys/queue.h>
104 #include <sys/syslog.h>
105 #include <sys/interrupt.h>
106 
107 #include <sys/rndsource.h>
108 
109 #include <net/if.h>
110 #include <net/if_dl.h>
111 #include <net/if_media.h>
112 #include <net/if_ether.h>
113 
114 #include <net/bpf.h>
115 
116 #include <netinet/in.h>			/* XXX for struct ip */
117 #include <netinet/in_systm.h>		/* XXX for struct ip */
118 #include <netinet/ip.h>			/* XXX for struct ip */
119 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
120 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
121 
122 #include <sys/bus.h>
123 #include <sys/intr.h>
124 #include <machine/endian.h>
125 
126 #include <dev/mii/mii.h>
127 #include <dev/mii/miivar.h>
128 #include <dev/mii/miidevs.h>
129 #include <dev/mii/mii_bitbang.h>
130 #include <dev/mii/ikphyreg.h>
131 #include <dev/mii/igphyreg.h>
132 #include <dev/mii/igphyvar.h>
133 #include <dev/mii/inbmphyreg.h>
134 
135 #include <dev/pci/pcireg.h>
136 #include <dev/pci/pcivar.h>
137 #include <dev/pci/pcidevs.h>
138 
139 #include <dev/pci/if_wmreg.h>
140 #include <dev/pci/if_wmvar.h>
141 
142 #ifdef WM_DEBUG
143 #define	WM_DEBUG_LINK		0x01
144 #define	WM_DEBUG_TX		0x02
145 #define	WM_DEBUG_RX		0x04
146 #define	WM_DEBUG_GMII		0x08
147 #define	WM_DEBUG_MANAGE		0x10
148 #define	WM_DEBUG_NVM		0x20
149 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
150     | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
151 
152 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
153 #else
154 #define	DPRINTF(x, y)	/* nothing */
155 #endif /* WM_DEBUG */
156 
157 #ifdef NET_MPSAFE
158 #define WM_MPSAFE	1
159 #endif
160 
161 /*
162  * This device driver's max interrupt numbers.
163  */
164 #define WM_MAX_NTXINTR		16
165 #define WM_MAX_NRXINTR		16
166 #define WM_MAX_NINTR		(WM_MAX_NTXINTR + WM_MAX_NRXINTR + 1)
167 
168 /*
169  * Transmit descriptor list size.  Due to errata, we can only have
170  * 256 hardware descriptors in the ring on < 82544, but we use 4096
171  * on >= 82544.  We tell the upper layers that they can queue a lot
172  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
173  * of them at a time.
174  *
175  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
176  * chains containing many small mbufs have been observed in zero-copy
177  * situations with jumbo frames.
178  */
179 #define	WM_NTXSEGS		256
180 #define	WM_IFQUEUELEN		256
181 #define	WM_TXQUEUELEN_MAX	64
182 #define	WM_TXQUEUELEN_MAX_82547	16
183 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
184 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
185 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
186 #define	WM_NTXDESC_82542	256
187 #define	WM_NTXDESC_82544	4096
188 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
189 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
190 #define	WM_TXDESCSIZE(txq)	(WM_NTXDESC(txq) * sizeof(wiseman_txdesc_t))
191 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
192 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
193 
194 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
195 
196 /*
197  * Receive descriptor list size.  We have one Rx buffer for normal
198  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
199  * packet.  We allocate 256 receive descriptors, each with a 2k
200  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
201  */
202 #define	WM_NRXDESC		256
203 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
204 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
205 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
206 
207 typedef union txdescs {
208 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
209 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
210 } txdescs_t;
211 
212 #define	WM_CDTXOFF(x)	(sizeof(wiseman_txdesc_t) * x)
213 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
214 
215 /*
216  * Software state for transmit jobs.
217  */
218 struct wm_txsoft {
219 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
220 	bus_dmamap_t txs_dmamap;	/* our DMA map */
221 	int txs_firstdesc;		/* first descriptor in packet */
222 	int txs_lastdesc;		/* last descriptor in packet */
223 	int txs_ndesc;			/* # of descriptors used */
224 };
225 
226 /*
227  * Software state for receive buffers.  Each descriptor gets a
228  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
229  * more than one buffer, we chain them together.
230  */
231 struct wm_rxsoft {
232 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
233 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
234 };
235 
236 #define WM_LINKUP_TIMEOUT	50
237 
238 static uint16_t swfwphysem[] = {
239 	SWFW_PHY0_SM,
240 	SWFW_PHY1_SM,
241 	SWFW_PHY2_SM,
242 	SWFW_PHY3_SM
243 };
244 
245 static const uint32_t wm_82580_rxpbs_table[] = {
246 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
247 };
248 
249 struct wm_softc;
250 
251 struct wm_txqueue {
252 	kmutex_t *txq_lock;		/* lock for tx operations */
253 
254 	struct wm_softc *txq_sc;
255 
256 	int txq_id;			/* index of transmit queues */
257 	int txq_intr_idx;		/* index of MSI-X tables */
258 
259 	/* Software state for the transmit descriptors. */
260 	int txq_num;			/* must be a power of two */
261 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
262 
263 	/* TX control data structures. */
264 	int txq_ndesc;			/* must be a power of two */
265 	txdescs_t *txq_descs_u;
266         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
267 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
268 	int txq_desc_rseg;		/* real number of control segment */
269 	size_t txq_desc_size;		/* control data size */
270 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
271 #define	txq_descs	txq_descs_u->sctxu_txdescs
272 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
273 
274 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
275 
276 	int txq_free;			/* number of free Tx descriptors */
277 	int txq_next;			/* next ready Tx descriptor */
278 
279 	int txq_sfree;			/* number of free Tx jobs */
280 	int txq_snext;			/* next free Tx job */
281 	int txq_sdirty;			/* dirty Tx jobs */
282 
283 	/* These 4 variables are used only on the 82547. */
284 	int txq_fifo_size;		/* Tx FIFO size */
285 	int txq_fifo_head;		/* current head of FIFO */
286 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
287 	int txq_fifo_stall;		/* Tx FIFO is stalled */
288 
289 	/* XXX which event counter is required? */
290 };
291 
292 struct wm_rxqueue {
293 	kmutex_t *rxq_lock;		/* lock for rx operations */
294 
295 	struct wm_softc *rxq_sc;
296 
297 	int rxq_id;			/* index of receive queues */
298 	int rxq_intr_idx;		/* index of MSI-X tables */
299 
300 	/* Software state for the receive descriptors. */
301 	wiseman_rxdesc_t *rxq_descs;
302 
303 	/* RX control data structures. */
304 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
305 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
306 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
307 	int rxq_desc_rseg;		/* real number of control segment */
308 	size_t rxq_desc_size;		/* control data size */
309 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
310 
311 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
312 
313 	int rxq_ptr;			/* next ready Rx descriptor/queue ent */
314 	int rxq_discard;
315 	int rxq_len;
316 	struct mbuf *rxq_head;
317 	struct mbuf *rxq_tail;
318 	struct mbuf **rxq_tailp;
319 
320 	/* XXX which event counter is required? */
321 };
322 
323 /*
324  * Software state per device.
325  */
326 struct wm_softc {
327 	device_t sc_dev;		/* generic device information */
328 	bus_space_tag_t sc_st;		/* bus space tag */
329 	bus_space_handle_t sc_sh;	/* bus space handle */
330 	bus_size_t sc_ss;		/* bus space size */
331 	bus_space_tag_t sc_iot;		/* I/O space tag */
332 	bus_space_handle_t sc_ioh;	/* I/O space handle */
333 	bus_size_t sc_ios;		/* I/O space size */
334 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
335 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
336 	bus_size_t sc_flashs;		/* flash registers space size */
337 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
338 
339 	struct ethercom sc_ethercom;	/* ethernet common data */
340 	struct mii_data sc_mii;		/* MII/media information */
341 
342 	pci_chipset_tag_t sc_pc;
343 	pcitag_t sc_pcitag;
344 	int sc_bus_speed;		/* PCI/PCIX bus speed */
345 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
346 
347 	uint16_t sc_pcidevid;		/* PCI device ID */
348 	wm_chip_type sc_type;		/* MAC type */
349 	int sc_rev;			/* MAC revision */
350 	wm_phy_type sc_phytype;		/* PHY type */
351 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
352 #define	WM_MEDIATYPE_UNKNOWN		0x00
353 #define	WM_MEDIATYPE_FIBER		0x01
354 #define	WM_MEDIATYPE_COPPER		0x02
355 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
356 	int sc_funcid;			/* unit number of the chip (0 to 3) */
357 	int sc_flags;			/* flags; see below */
358 	int sc_if_flags;		/* last if_flags */
359 	int sc_flowflags;		/* 802.3x flow control flags */
360 	int sc_align_tweak;
361 
362 	void *sc_ihs[WM_MAX_NINTR];	/*
363 					 * interrupt cookie.
364 					 * legacy and msi use sc_ihs[0].
365 					 */
366 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
367 	int sc_nintrs;			/* number of interrupts */
368 
369 	int sc_link_intr_idx;		/* index of MSI-X tables */
370 
371 	callout_t sc_tick_ch;		/* tick callout */
372 	bool sc_stopping;
373 
374 	int sc_nvm_ver_major;
375 	int sc_nvm_ver_minor;
376 	int sc_nvm_ver_build;
377 	int sc_nvm_addrbits;		/* NVM address bits */
378 	unsigned int sc_nvm_wordsize;	/* NVM word size */
379 	int sc_ich8_flash_base;
380 	int sc_ich8_flash_bank_size;
381 	int sc_nvm_k1_enabled;
382 
383 	int sc_ntxqueues;
384 	struct wm_txqueue *sc_txq;
385 
386 	int sc_nrxqueues;
387 	struct wm_rxqueue *sc_rxq;
388 
389 #ifdef WM_EVENT_COUNTERS
390 	/* Event counters. */
391 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
392 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
393 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
394 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
395 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
396 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
397 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
398 
399 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
400 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
401 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
402 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
403 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
404 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
405 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
406 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
407 
408 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
409 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
410 
411 	struct evcnt sc_ev_tu;		/* Tx underrun */
412 
413 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
414 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
415 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
416 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
417 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
418 #endif /* WM_EVENT_COUNTERS */
419 
420 	/* This variable are used only on the 82547. */
421 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
422 
423 	uint32_t sc_ctrl;		/* prototype CTRL register */
424 #if 0
425 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
426 #endif
427 	uint32_t sc_icr;		/* prototype interrupt bits */
428 	uint32_t sc_itr;		/* prototype intr throttling reg */
429 	uint32_t sc_tctl;		/* prototype TCTL register */
430 	uint32_t sc_rctl;		/* prototype RCTL register */
431 	uint32_t sc_txcw;		/* prototype TXCW register */
432 	uint32_t sc_tipg;		/* prototype TIPG register */
433 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
434 	uint32_t sc_pba;		/* prototype PBA register */
435 
436 	int sc_tbi_linkup;		/* TBI link status */
437 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
438 	int sc_tbi_serdes_ticks;	/* tbi ticks */
439 
440 	int sc_mchash_type;		/* multicast filter offset */
441 
442 	krndsource_t rnd_source;	/* random source */
443 
444 	kmutex_t *sc_core_lock;		/* lock for softc operations */
445 };
446 
447 #define WM_TX_LOCK(_txq)	if ((_txq)->txq_lock) mutex_enter((_txq)->txq_lock)
448 #define WM_TX_UNLOCK(_txq)	if ((_txq)->txq_lock) mutex_exit((_txq)->txq_lock)
449 #define WM_TX_LOCKED(_txq)	(!(_txq)->txq_lock || mutex_owned((_txq)->txq_lock))
450 #define WM_RX_LOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_enter((_rxq)->rxq_lock)
451 #define WM_RX_UNLOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_exit((_rxq)->rxq_lock)
452 #define WM_RX_LOCKED(_rxq)	(!(_rxq)->rxq_lock || mutex_owned((_rxq)->rxq_lock))
453 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
454 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
455 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
456 
457 #ifdef WM_MPSAFE
458 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
459 #else
460 #define CALLOUT_FLAGS	0
461 #endif
462 
463 #define	WM_RXCHAIN_RESET(rxq)						\
464 do {									\
465 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
466 	*(rxq)->rxq_tailp = NULL;					\
467 	(rxq)->rxq_len = 0;						\
468 } while (/*CONSTCOND*/0)
469 
470 #define	WM_RXCHAIN_LINK(rxq, m)						\
471 do {									\
472 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
473 	(rxq)->rxq_tailp = &(m)->m_next;				\
474 } while (/*CONSTCOND*/0)
475 
476 #ifdef WM_EVENT_COUNTERS
477 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
478 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
479 #else
480 #define	WM_EVCNT_INCR(ev)	/* nothing */
481 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
482 #endif
483 
484 #define	CSR_READ(sc, reg)						\
485 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
486 #define	CSR_WRITE(sc, reg, val)						\
487 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
488 #define	CSR_WRITE_FLUSH(sc)						\
489 	(void) CSR_READ((sc), WMREG_STATUS)
490 
491 #define ICH8_FLASH_READ32(sc, reg) \
492 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
493 #define ICH8_FLASH_WRITE32(sc, reg, data) \
494 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
495 
496 #define ICH8_FLASH_READ16(sc, reg) \
497 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
498 #define ICH8_FLASH_WRITE16(sc, reg, data) \
499 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
500 
501 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((x)))
502 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
503 
504 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
505 #define	WM_CDTXADDR_HI(txq, x)						\
506 	(sizeof(bus_addr_t) == 8 ?					\
507 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
508 
509 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
510 #define	WM_CDRXADDR_HI(rxq, x)						\
511 	(sizeof(bus_addr_t) == 8 ?					\
512 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
513 
514 /*
515  * Register read/write functions.
516  * Other than CSR_{READ|WRITE}().
517  */
518 #if 0
519 static inline uint32_t wm_io_read(struct wm_softc *, int);
520 #endif
521 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
522 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
523 	uint32_t, uint32_t);
524 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
525 
526 /*
527  * Descriptor sync/init functions.
528  */
529 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
530 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
531 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
532 
533 /*
534  * Device driver interface functions and commonly used functions.
535  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
536  */
537 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
538 static int	wm_match(device_t, cfdata_t, void *);
539 static void	wm_attach(device_t, device_t, void *);
540 static int	wm_detach(device_t, int);
541 static bool	wm_suspend(device_t, const pmf_qual_t *);
542 static bool	wm_resume(device_t, const pmf_qual_t *);
543 static void	wm_watchdog(struct ifnet *);
544 static void	wm_tick(void *);
545 static int	wm_ifflags_cb(struct ethercom *);
546 static int	wm_ioctl(struct ifnet *, u_long, void *);
547 /* MAC address related */
548 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
549 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
550 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
551 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
552 static void	wm_set_filter(struct wm_softc *);
553 /* Reset and init related */
554 static void	wm_set_vlan(struct wm_softc *);
555 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
556 static void	wm_get_auto_rd_done(struct wm_softc *);
557 static void	wm_lan_init_done(struct wm_softc *);
558 static void	wm_get_cfg_done(struct wm_softc *);
559 static void	wm_initialize_hardware_bits(struct wm_softc *);
560 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
561 static void	wm_reset(struct wm_softc *);
562 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
563 static void	wm_rxdrain(struct wm_rxqueue *);
564 static void	wm_rss_getkey(uint8_t *);
565 static void	wm_init_rss(struct wm_softc *);
566 static void	wm_adjust_qnum(struct wm_softc *, int);
567 static int	wm_setup_legacy(struct wm_softc *);
568 static int	wm_setup_msix(struct wm_softc *);
569 static int	wm_init(struct ifnet *);
570 static int	wm_init_locked(struct ifnet *);
571 static void	wm_stop(struct ifnet *, int);
572 static void	wm_stop_locked(struct ifnet *, int);
573 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
574 static void	wm_82547_txfifo_stall(void *);
575 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
576 /* DMA related */
577 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
578 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
579 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
580 static void	wm_init_tx_regs(struct wm_softc *, struct wm_txqueue *);
581 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
582 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
583 static void	wm_init_rx_regs(struct wm_softc *, struct wm_rxqueue *);
584 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
585 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
586 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
587 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
588 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
589 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
590 static void	wm_init_tx_queue(struct wm_softc *, struct wm_txqueue *);
591 static int	wm_init_rx_queue(struct wm_softc *, struct wm_rxqueue *);
592 static int	wm_alloc_txrx_queues(struct wm_softc *);
593 static void	wm_free_txrx_queues(struct wm_softc *);
594 static int	wm_init_txrx_queues(struct wm_softc *);
595 /* Start */
596 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
597     uint32_t *, uint8_t *);
598 static void	wm_start(struct ifnet *);
599 static void	wm_start_locked(struct ifnet *);
600 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
601     uint32_t *, uint32_t *, bool *);
602 static void	wm_nq_start(struct ifnet *);
603 static void	wm_nq_start_locked(struct ifnet *);
604 /* Interrupt */
605 static int	wm_txeof(struct wm_softc *);
606 static void	wm_rxeof(struct wm_rxqueue *);
607 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
608 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
609 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
610 static void	wm_linkintr(struct wm_softc *, uint32_t);
611 static int	wm_intr_legacy(void *);
612 static int	wm_txintr_msix(void *);
613 static int	wm_rxintr_msix(void *);
614 static int	wm_linkintr_msix(void *);
615 
616 /*
617  * Media related.
618  * GMII, SGMII, TBI, SERDES and SFP.
619  */
620 /* Common */
621 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
622 /* GMII related */
623 static void	wm_gmii_reset(struct wm_softc *);
624 static int	wm_get_phy_id_82575(struct wm_softc *);
625 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
626 static int	wm_gmii_mediachange(struct ifnet *);
627 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
628 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
629 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
630 static int	wm_gmii_i82543_readreg(device_t, int, int);
631 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
632 static int	wm_gmii_i82544_readreg(device_t, int, int);
633 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
634 static int	wm_gmii_i80003_readreg(device_t, int, int);
635 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
636 static int	wm_gmii_bm_readreg(device_t, int, int);
637 static void	wm_gmii_bm_writereg(device_t, int, int, int);
638 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
639 static int	wm_gmii_hv_readreg(device_t, int, int);
640 static void	wm_gmii_hv_writereg(device_t, int, int, int);
641 static int	wm_gmii_82580_readreg(device_t, int, int);
642 static void	wm_gmii_82580_writereg(device_t, int, int, int);
643 static int	wm_gmii_gs40g_readreg(device_t, int, int);
644 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
645 static void	wm_gmii_statchg(struct ifnet *);
646 static int	wm_kmrn_readreg(struct wm_softc *, int);
647 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
648 /* SGMII */
649 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
650 static int	wm_sgmii_readreg(device_t, int, int);
651 static void	wm_sgmii_writereg(device_t, int, int, int);
652 /* TBI related */
653 static void	wm_tbi_mediainit(struct wm_softc *);
654 static int	wm_tbi_mediachange(struct ifnet *);
655 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
656 static int	wm_check_for_link(struct wm_softc *);
657 static void	wm_tbi_tick(struct wm_softc *);
658 /* SERDES related */
659 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
660 static int	wm_serdes_mediachange(struct ifnet *);
661 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
662 static void	wm_serdes_tick(struct wm_softc *);
663 /* SFP related */
664 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
665 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
666 
667 /*
668  * NVM related.
669  * Microwire, SPI (w/wo EERD) and Flash.
670  */
671 /* Misc functions */
672 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
673 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
674 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
675 /* Microwire */
676 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
677 /* SPI */
678 static int	wm_nvm_ready_spi(struct wm_softc *);
679 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
680 /* Using with EERD */
681 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
682 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
683 /* Flash */
684 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
685     unsigned int *);
686 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
687 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
688 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
689 	uint16_t *);
690 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
691 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
692 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
693 /* iNVM */
694 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
695 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
696 /* Lock, detecting NVM type, validate checksum and read */
697 static int	wm_nvm_acquire(struct wm_softc *);
698 static void	wm_nvm_release(struct wm_softc *);
699 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
700 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
701 static int	wm_nvm_validate_checksum(struct wm_softc *);
702 static void	wm_nvm_version_invm(struct wm_softc *);
703 static void	wm_nvm_version(struct wm_softc *);
704 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
705 
706 /*
707  * Hardware semaphores.
708  * Very complexed...
709  */
710 static int	wm_get_swsm_semaphore(struct wm_softc *);
711 static void	wm_put_swsm_semaphore(struct wm_softc *);
712 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
713 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
714 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
715 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
716 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
717 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
718 
719 /*
720  * Management mode and power management related subroutines.
721  * BMC, AMT, suspend/resume and EEE.
722  */
723 static int	wm_check_mng_mode(struct wm_softc *);
724 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
725 static int	wm_check_mng_mode_82574(struct wm_softc *);
726 static int	wm_check_mng_mode_generic(struct wm_softc *);
727 static int	wm_enable_mng_pass_thru(struct wm_softc *);
728 static int	wm_check_reset_block(struct wm_softc *);
729 static void	wm_get_hw_control(struct wm_softc *);
730 static void	wm_release_hw_control(struct wm_softc *);
731 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
732 static void	wm_smbustopci(struct wm_softc *);
733 static void	wm_init_manageability(struct wm_softc *);
734 static void	wm_release_manageability(struct wm_softc *);
735 static void	wm_get_wakeup(struct wm_softc *);
736 #ifdef WM_WOL
737 static void	wm_enable_phy_wakeup(struct wm_softc *);
738 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
739 static void	wm_enable_wakeup(struct wm_softc *);
740 #endif
741 /* EEE */
742 static void	wm_set_eee_i350(struct wm_softc *);
743 
744 /*
745  * Workarounds (mainly PHY related).
746  * Basically, PHY's workarounds are in the PHY drivers.
747  */
748 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
749 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
750 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
751 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
752 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
753 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
754 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
755 static void	wm_reset_init_script_82575(struct wm_softc *);
756 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
757 static void	wm_pll_workaround_i210(struct wm_softc *);
758 
759 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
760     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
761 
762 /*
763  * Devices supported by this driver.
764  */
765 static const struct wm_product {
766 	pci_vendor_id_t		wmp_vendor;
767 	pci_product_id_t	wmp_product;
768 	const char		*wmp_name;
769 	wm_chip_type		wmp_type;
770 	uint32_t		wmp_flags;
771 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
772 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
773 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
774 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
775 #define WMP_MEDIATYPE(x)	((x) & 0x03)
776 } wm_products[] = {
777 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
778 	  "Intel i82542 1000BASE-X Ethernet",
779 	  WM_T_82542_2_1,	WMP_F_FIBER },
780 
781 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
782 	  "Intel i82543GC 1000BASE-X Ethernet",
783 	  WM_T_82543,		WMP_F_FIBER },
784 
785 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
786 	  "Intel i82543GC 1000BASE-T Ethernet",
787 	  WM_T_82543,		WMP_F_COPPER },
788 
789 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
790 	  "Intel i82544EI 1000BASE-T Ethernet",
791 	  WM_T_82544,		WMP_F_COPPER },
792 
793 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
794 	  "Intel i82544EI 1000BASE-X Ethernet",
795 	  WM_T_82544,		WMP_F_FIBER },
796 
797 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
798 	  "Intel i82544GC 1000BASE-T Ethernet",
799 	  WM_T_82544,		WMP_F_COPPER },
800 
801 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
802 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
803 	  WM_T_82544,		WMP_F_COPPER },
804 
805 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
806 	  "Intel i82540EM 1000BASE-T Ethernet",
807 	  WM_T_82540,		WMP_F_COPPER },
808 
809 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
810 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
811 	  WM_T_82540,		WMP_F_COPPER },
812 
813 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
814 	  "Intel i82540EP 1000BASE-T Ethernet",
815 	  WM_T_82540,		WMP_F_COPPER },
816 
817 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
818 	  "Intel i82540EP 1000BASE-T Ethernet",
819 	  WM_T_82540,		WMP_F_COPPER },
820 
821 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
822 	  "Intel i82540EP 1000BASE-T Ethernet",
823 	  WM_T_82540,		WMP_F_COPPER },
824 
825 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
826 	  "Intel i82545EM 1000BASE-T Ethernet",
827 	  WM_T_82545,		WMP_F_COPPER },
828 
829 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
830 	  "Intel i82545GM 1000BASE-T Ethernet",
831 	  WM_T_82545_3,		WMP_F_COPPER },
832 
833 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
834 	  "Intel i82545GM 1000BASE-X Ethernet",
835 	  WM_T_82545_3,		WMP_F_FIBER },
836 
837 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
838 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
839 	  WM_T_82545_3,		WMP_F_SERDES },
840 
841 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
842 	  "Intel i82546EB 1000BASE-T Ethernet",
843 	  WM_T_82546,		WMP_F_COPPER },
844 
845 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
846 	  "Intel i82546EB 1000BASE-T Ethernet",
847 	  WM_T_82546,		WMP_F_COPPER },
848 
849 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
850 	  "Intel i82545EM 1000BASE-X Ethernet",
851 	  WM_T_82545,		WMP_F_FIBER },
852 
853 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
854 	  "Intel i82546EB 1000BASE-X Ethernet",
855 	  WM_T_82546,		WMP_F_FIBER },
856 
857 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
858 	  "Intel i82546GB 1000BASE-T Ethernet",
859 	  WM_T_82546_3,		WMP_F_COPPER },
860 
861 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
862 	  "Intel i82546GB 1000BASE-X Ethernet",
863 	  WM_T_82546_3,		WMP_F_FIBER },
864 
865 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
866 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
867 	  WM_T_82546_3,		WMP_F_SERDES },
868 
869 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
870 	  "i82546GB quad-port Gigabit Ethernet",
871 	  WM_T_82546_3,		WMP_F_COPPER },
872 
873 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
874 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
875 	  WM_T_82546_3,		WMP_F_COPPER },
876 
877 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
878 	  "Intel PRO/1000MT (82546GB)",
879 	  WM_T_82546_3,		WMP_F_COPPER },
880 
881 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
882 	  "Intel i82541EI 1000BASE-T Ethernet",
883 	  WM_T_82541,		WMP_F_COPPER },
884 
885 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
886 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
887 	  WM_T_82541,		WMP_F_COPPER },
888 
889 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
890 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
891 	  WM_T_82541,		WMP_F_COPPER },
892 
893 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
894 	  "Intel i82541ER 1000BASE-T Ethernet",
895 	  WM_T_82541_2,		WMP_F_COPPER },
896 
897 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
898 	  "Intel i82541GI 1000BASE-T Ethernet",
899 	  WM_T_82541_2,		WMP_F_COPPER },
900 
901 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
902 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
903 	  WM_T_82541_2,		WMP_F_COPPER },
904 
905 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
906 	  "Intel i82541PI 1000BASE-T Ethernet",
907 	  WM_T_82541_2,		WMP_F_COPPER },
908 
909 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
910 	  "Intel i82547EI 1000BASE-T Ethernet",
911 	  WM_T_82547,		WMP_F_COPPER },
912 
913 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
914 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
915 	  WM_T_82547,		WMP_F_COPPER },
916 
917 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
918 	  "Intel i82547GI 1000BASE-T Ethernet",
919 	  WM_T_82547_2,		WMP_F_COPPER },
920 
921 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
922 	  "Intel PRO/1000 PT (82571EB)",
923 	  WM_T_82571,		WMP_F_COPPER },
924 
925 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
926 	  "Intel PRO/1000 PF (82571EB)",
927 	  WM_T_82571,		WMP_F_FIBER },
928 
929 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
930 	  "Intel PRO/1000 PB (82571EB)",
931 	  WM_T_82571,		WMP_F_SERDES },
932 
933 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
934 	  "Intel PRO/1000 QT (82571EB)",
935 	  WM_T_82571,		WMP_F_COPPER },
936 
937 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
938 	  "Intel PRO/1000 PT Quad Port Server Adapter",
939 	  WM_T_82571,		WMP_F_COPPER, },
940 
941 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
942 	  "Intel Gigabit PT Quad Port Server ExpressModule",
943 	  WM_T_82571,		WMP_F_COPPER, },
944 
945 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
946 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
947 	  WM_T_82571,		WMP_F_SERDES, },
948 
949 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
950 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
951 	  WM_T_82571,		WMP_F_SERDES, },
952 
953 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
954 	  "Intel 82571EB Quad 1000baseX Ethernet",
955 	  WM_T_82571,		WMP_F_FIBER, },
956 
957 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
958 	  "Intel i82572EI 1000baseT Ethernet",
959 	  WM_T_82572,		WMP_F_COPPER },
960 
961 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
962 	  "Intel i82572EI 1000baseX Ethernet",
963 	  WM_T_82572,		WMP_F_FIBER },
964 
965 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
966 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
967 	  WM_T_82572,		WMP_F_SERDES },
968 
969 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
970 	  "Intel i82572EI 1000baseT Ethernet",
971 	  WM_T_82572,		WMP_F_COPPER },
972 
973 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
974 	  "Intel i82573E",
975 	  WM_T_82573,		WMP_F_COPPER },
976 
977 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
978 	  "Intel i82573E IAMT",
979 	  WM_T_82573,		WMP_F_COPPER },
980 
981 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
982 	  "Intel i82573L Gigabit Ethernet",
983 	  WM_T_82573,		WMP_F_COPPER },
984 
985 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
986 	  "Intel i82574L",
987 	  WM_T_82574,		WMP_F_COPPER },
988 
989 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
990 	  "Intel i82574L",
991 	  WM_T_82574,		WMP_F_COPPER },
992 
993 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
994 	  "Intel i82583V",
995 	  WM_T_82583,		WMP_F_COPPER },
996 
997 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
998 	  "i80003 dual 1000baseT Ethernet",
999 	  WM_T_80003,		WMP_F_COPPER },
1000 
1001 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1002 	  "i80003 dual 1000baseX Ethernet",
1003 	  WM_T_80003,		WMP_F_COPPER },
1004 
1005 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1006 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1007 	  WM_T_80003,		WMP_F_SERDES },
1008 
1009 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1010 	  "Intel i80003 1000baseT Ethernet",
1011 	  WM_T_80003,		WMP_F_COPPER },
1012 
1013 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1014 	  "Intel i80003 Gigabit Ethernet (SERDES)",
1015 	  WM_T_80003,		WMP_F_SERDES },
1016 
1017 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
1018 	  "Intel i82801H (M_AMT) LAN Controller",
1019 	  WM_T_ICH8,		WMP_F_COPPER },
1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
1021 	  "Intel i82801H (AMT) LAN Controller",
1022 	  WM_T_ICH8,		WMP_F_COPPER },
1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
1024 	  "Intel i82801H LAN Controller",
1025 	  WM_T_ICH8,		WMP_F_COPPER },
1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1027 	  "Intel i82801H (IFE) LAN Controller",
1028 	  WM_T_ICH8,		WMP_F_COPPER },
1029 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
1030 	  "Intel i82801H (M) LAN Controller",
1031 	  WM_T_ICH8,		WMP_F_COPPER },
1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
1033 	  "Intel i82801H IFE (GT) LAN Controller",
1034 	  WM_T_ICH8,		WMP_F_COPPER },
1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
1036 	  "Intel i82801H IFE (G) LAN Controller",
1037 	  WM_T_ICH8,		WMP_F_COPPER },
1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1039 	  "82801I (AMT) LAN Controller",
1040 	  WM_T_ICH9,		WMP_F_COPPER },
1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
1042 	  "82801I LAN Controller",
1043 	  WM_T_ICH9,		WMP_F_COPPER },
1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
1045 	  "82801I (G) LAN Controller",
1046 	  WM_T_ICH9,		WMP_F_COPPER },
1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
1048 	  "82801I (GT) LAN Controller",
1049 	  WM_T_ICH9,		WMP_F_COPPER },
1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
1051 	  "82801I (C) LAN Controller",
1052 	  WM_T_ICH9,		WMP_F_COPPER },
1053 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
1054 	  "82801I mobile LAN Controller",
1055 	  WM_T_ICH9,		WMP_F_COPPER },
1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1057 	  "82801I mobile (V) LAN Controller",
1058 	  WM_T_ICH9,		WMP_F_COPPER },
1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1060 	  "82801I mobile (AMT) LAN Controller",
1061 	  WM_T_ICH9,		WMP_F_COPPER },
1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
1063 	  "82567LM-4 LAN Controller",
1064 	  WM_T_ICH9,		WMP_F_COPPER },
1065 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
1066 	  "82567V-3 LAN Controller",
1067 	  WM_T_ICH9,		WMP_F_COPPER },
1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1069 	  "82567LM-2 LAN Controller",
1070 	  WM_T_ICH10,		WMP_F_COPPER },
1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1072 	  "82567LF-2 LAN Controller",
1073 	  WM_T_ICH10,		WMP_F_COPPER },
1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1075 	  "82567LM-3 LAN Controller",
1076 	  WM_T_ICH10,		WMP_F_COPPER },
1077 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1078 	  "82567LF-3 LAN Controller",
1079 	  WM_T_ICH10,		WMP_F_COPPER },
1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
1081 	  "82567V-2 LAN Controller",
1082 	  WM_T_ICH10,		WMP_F_COPPER },
1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
1084 	  "82567V-3? LAN Controller",
1085 	  WM_T_ICH10,		WMP_F_COPPER },
1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
1087 	  "HANKSVILLE LAN Controller",
1088 	  WM_T_ICH10,		WMP_F_COPPER },
1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
1090 	  "PCH LAN (82577LM) Controller",
1091 	  WM_T_PCH,		WMP_F_COPPER },
1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
1093 	  "PCH LAN (82577LC) Controller",
1094 	  WM_T_PCH,		WMP_F_COPPER },
1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
1096 	  "PCH LAN (82578DM) Controller",
1097 	  WM_T_PCH,		WMP_F_COPPER },
1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
1099 	  "PCH LAN (82578DC) Controller",
1100 	  WM_T_PCH,		WMP_F_COPPER },
1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
1102 	  "PCH2 LAN (82579LM) Controller",
1103 	  WM_T_PCH2,		WMP_F_COPPER },
1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
1105 	  "PCH2 LAN (82579V) Controller",
1106 	  WM_T_PCH2,		WMP_F_COPPER },
1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
1108 	  "82575EB dual-1000baseT Ethernet",
1109 	  WM_T_82575,		WMP_F_COPPER },
1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1111 	  "82575EB dual-1000baseX Ethernet (SERDES)",
1112 	  WM_T_82575,		WMP_F_SERDES },
1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1114 	  "82575GB quad-1000baseT Ethernet",
1115 	  WM_T_82575,		WMP_F_COPPER },
1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1117 	  "82575GB quad-1000baseT Ethernet (PM)",
1118 	  WM_T_82575,		WMP_F_COPPER },
1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
1120 	  "82576 1000BaseT Ethernet",
1121 	  WM_T_82576,		WMP_F_COPPER },
1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
1123 	  "82576 1000BaseX Ethernet",
1124 	  WM_T_82576,		WMP_F_FIBER },
1125 
1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
1127 	  "82576 gigabit Ethernet (SERDES)",
1128 	  WM_T_82576,		WMP_F_SERDES },
1129 
1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1131 	  "82576 quad-1000BaseT Ethernet",
1132 	  WM_T_82576,		WMP_F_COPPER },
1133 
1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1135 	  "82576 Gigabit ET2 Quad Port Server Adapter",
1136 	  WM_T_82576,		WMP_F_COPPER },
1137 
1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
1139 	  "82576 gigabit Ethernet",
1140 	  WM_T_82576,		WMP_F_COPPER },
1141 
1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
1143 	  "82576 gigabit Ethernet (SERDES)",
1144 	  WM_T_82576,		WMP_F_SERDES },
1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1146 	  "82576 quad-gigabit Ethernet (SERDES)",
1147 	  WM_T_82576,		WMP_F_SERDES },
1148 
1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
1150 	  "82580 1000BaseT Ethernet",
1151 	  WM_T_82580,		WMP_F_COPPER },
1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
1153 	  "82580 1000BaseX Ethernet",
1154 	  WM_T_82580,		WMP_F_FIBER },
1155 
1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
1157 	  "82580 1000BaseT Ethernet (SERDES)",
1158 	  WM_T_82580,		WMP_F_SERDES },
1159 
1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1161 	  "82580 gigabit Ethernet (SGMII)",
1162 	  WM_T_82580,		WMP_F_COPPER },
1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1164 	  "82580 dual-1000BaseT Ethernet",
1165 	  WM_T_82580,		WMP_F_COPPER },
1166 
1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1168 	  "82580 quad-1000BaseX Ethernet",
1169 	  WM_T_82580,		WMP_F_FIBER },
1170 
1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1172 	  "DH89XXCC Gigabit Ethernet (SGMII)",
1173 	  WM_T_82580,		WMP_F_COPPER },
1174 
1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1176 	  "DH89XXCC Gigabit Ethernet (SERDES)",
1177 	  WM_T_82580,		WMP_F_SERDES },
1178 
1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1180 	  "DH89XXCC 1000BASE-KX Ethernet",
1181 	  WM_T_82580,		WMP_F_SERDES },
1182 
1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1184 	  "DH89XXCC Gigabit Ethernet (SFP)",
1185 	  WM_T_82580,		WMP_F_SERDES },
1186 
1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1188 	  "I350 Gigabit Network Connection",
1189 	  WM_T_I350,		WMP_F_COPPER },
1190 
1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1192 	  "I350 Gigabit Fiber Network Connection",
1193 	  WM_T_I350,		WMP_F_FIBER },
1194 
1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1196 	  "I350 Gigabit Backplane Connection",
1197 	  WM_T_I350,		WMP_F_SERDES },
1198 
1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
1200 	  "I350 Quad Port Gigabit Ethernet",
1201 	  WM_T_I350,		WMP_F_SERDES },
1202 
1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1204 	  "I350 Gigabit Connection",
1205 	  WM_T_I350,		WMP_F_COPPER },
1206 
1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
1208 	  "I354 Gigabit Ethernet (KX)",
1209 	  WM_T_I354,		WMP_F_SERDES },
1210 
1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1212 	  "I354 Gigabit Ethernet (SGMII)",
1213 	  WM_T_I354,		WMP_F_COPPER },
1214 
1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
1216 	  "I354 Gigabit Ethernet (2.5G)",
1217 	  WM_T_I354,		WMP_F_COPPER },
1218 
1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1220 	  "I210-T1 Ethernet Server Adapter",
1221 	  WM_T_I210,		WMP_F_COPPER },
1222 
1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1224 	  "I210 Ethernet (Copper OEM)",
1225 	  WM_T_I210,		WMP_F_COPPER },
1226 
1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1228 	  "I210 Ethernet (Copper IT)",
1229 	  WM_T_I210,		WMP_F_COPPER },
1230 
1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1232 	  "I210 Ethernet (FLASH less)",
1233 	  WM_T_I210,		WMP_F_COPPER },
1234 
1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1236 	  "I210 Gigabit Ethernet (Fiber)",
1237 	  WM_T_I210,		WMP_F_FIBER },
1238 
1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1240 	  "I210 Gigabit Ethernet (SERDES)",
1241 	  WM_T_I210,		WMP_F_SERDES },
1242 
1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1244 	  "I210 Gigabit Ethernet (FLASH less)",
1245 	  WM_T_I210,		WMP_F_SERDES },
1246 
1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1248 	  "I210 Gigabit Ethernet (SGMII)",
1249 	  WM_T_I210,		WMP_F_COPPER },
1250 
1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1252 	  "I211 Ethernet (COPPER)",
1253 	  WM_T_I211,		WMP_F_COPPER },
1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1255 	  "I217 V Ethernet Connection",
1256 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1258 	  "I217 LM Ethernet Connection",
1259 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1261 	  "I218 V Ethernet Connection",
1262 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
1264 	  "I218 V Ethernet Connection",
1265 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
1267 	  "I218 V Ethernet Connection",
1268 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1270 	  "I218 LM Ethernet Connection",
1271 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
1273 	  "I218 LM Ethernet Connection",
1274 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
1276 	  "I218 LM Ethernet Connection",
1277 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1278 	{ 0,			0,
1279 	  NULL,
1280 	  0,			0 },
1281 };
1282 
1283 #ifdef WM_EVENT_COUNTERS
1284 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1285 #endif /* WM_EVENT_COUNTERS */
1286 
1287 
1288 /*
1289  * Register read/write functions.
1290  * Other than CSR_{READ|WRITE}().
1291  */
1292 
1293 #if 0 /* Not currently used */
1294 static inline uint32_t
1295 wm_io_read(struct wm_softc *sc, int reg)
1296 {
1297 
1298 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1299 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1300 }
1301 #endif
1302 
1303 static inline void
1304 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1305 {
1306 
1307 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1308 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1309 }
1310 
1311 static inline void
1312 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1313     uint32_t data)
1314 {
1315 	uint32_t regval;
1316 	int i;
1317 
1318 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1319 
1320 	CSR_WRITE(sc, reg, regval);
1321 
1322 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1323 		delay(5);
1324 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1325 			break;
1326 	}
1327 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1328 		aprint_error("%s: WARNING:"
1329 		    " i82575 reg 0x%08x setup did not indicate ready\n",
1330 		    device_xname(sc->sc_dev), reg);
1331 	}
1332 }
1333 
1334 static inline void
1335 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1336 {
1337 	wa->wa_low = htole32(v & 0xffffffffU);
1338 	if (sizeof(bus_addr_t) == 8)
1339 		wa->wa_high = htole32((uint64_t) v >> 32);
1340 	else
1341 		wa->wa_high = 0;
1342 }
1343 
1344 /*
1345  * Descriptor sync/init functions.
1346  */
1347 static inline void
1348 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1349 {
1350 	struct wm_softc *sc = txq->txq_sc;
1351 
1352 	/* If it will wrap around, sync to the end of the ring. */
1353 	if ((start + num) > WM_NTXDESC(txq)) {
1354 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1355 		    WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) *
1356 		    (WM_NTXDESC(txq) - start), ops);
1357 		num -= (WM_NTXDESC(txq) - start);
1358 		start = 0;
1359 	}
1360 
1361 	/* Now sync whatever is left. */
1362 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1363 	    WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) * num, ops);
1364 }
1365 
1366 static inline void
1367 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1368 {
1369 	struct wm_softc *sc = rxq->rxq_sc;
1370 
1371 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1372 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
1373 }
1374 
1375 static inline void
1376 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1377 {
1378 	struct wm_softc *sc = rxq->rxq_sc;
1379 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1380 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1381 	struct mbuf *m = rxs->rxs_mbuf;
1382 
1383 	/*
1384 	 * Note: We scoot the packet forward 2 bytes in the buffer
1385 	 * so that the payload after the Ethernet header is aligned
1386 	 * to a 4-byte boundary.
1387 
1388 	 * XXX BRAINDAMAGE ALERT!
1389 	 * The stupid chip uses the same size for every buffer, which
1390 	 * is set in the Receive Control register.  We are using the 2K
1391 	 * size option, but what we REALLY want is (2K - 2)!  For this
1392 	 * reason, we can't "scoot" packets longer than the standard
1393 	 * Ethernet MTU.  On strict-alignment platforms, if the total
1394 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
1395 	 * the upper layer copy the headers.
1396 	 */
1397 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1398 
1399 	wm_set_dma_addr(&rxd->wrx_addr,
1400 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1401 	rxd->wrx_len = 0;
1402 	rxd->wrx_cksum = 0;
1403 	rxd->wrx_status = 0;
1404 	rxd->wrx_errors = 0;
1405 	rxd->wrx_special = 0;
1406 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1407 
1408 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1409 }
1410 
1411 /*
1412  * Device driver interface functions and commonly used functions.
1413  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1414  */
1415 
1416 /* Lookup supported device table */
1417 static const struct wm_product *
1418 wm_lookup(const struct pci_attach_args *pa)
1419 {
1420 	const struct wm_product *wmp;
1421 
1422 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1423 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1424 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1425 			return wmp;
1426 	}
1427 	return NULL;
1428 }
1429 
1430 /* The match function (ca_match) */
1431 static int
1432 wm_match(device_t parent, cfdata_t cf, void *aux)
1433 {
1434 	struct pci_attach_args *pa = aux;
1435 
1436 	if (wm_lookup(pa) != NULL)
1437 		return 1;
1438 
1439 	return 0;
1440 }
1441 
1442 /* The attach function (ca_attach) */
1443 static void
1444 wm_attach(device_t parent, device_t self, void *aux)
1445 {
1446 	struct wm_softc *sc = device_private(self);
1447 	struct pci_attach_args *pa = aux;
1448 	prop_dictionary_t dict;
1449 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1450 	pci_chipset_tag_t pc = pa->pa_pc;
1451 	int counts[PCI_INTR_TYPE_SIZE];
1452 	pci_intr_type_t max_type;
1453 	const char *eetype, *xname;
1454 	bus_space_tag_t memt;
1455 	bus_space_handle_t memh;
1456 	bus_size_t memsize;
1457 	int memh_valid;
1458 	int i, error;
1459 	const struct wm_product *wmp;
1460 	prop_data_t ea;
1461 	prop_number_t pn;
1462 	uint8_t enaddr[ETHER_ADDR_LEN];
1463 	uint16_t cfg1, cfg2, swdpin, nvmword;
1464 	pcireg_t preg, memtype;
1465 	uint16_t eeprom_data, apme_mask;
1466 	bool force_clear_smbi;
1467 	uint32_t link_mode;
1468 	uint32_t reg;
1469 
1470 	sc->sc_dev = self;
1471 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1472 	sc->sc_stopping = false;
1473 
1474 	wmp = wm_lookup(pa);
1475 #ifdef DIAGNOSTIC
1476 	if (wmp == NULL) {
1477 		printf("\n");
1478 		panic("wm_attach: impossible");
1479 	}
1480 #endif
1481 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1482 
1483 	sc->sc_pc = pa->pa_pc;
1484 	sc->sc_pcitag = pa->pa_tag;
1485 
1486 	if (pci_dma64_available(pa))
1487 		sc->sc_dmat = pa->pa_dmat64;
1488 	else
1489 		sc->sc_dmat = pa->pa_dmat;
1490 
1491 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1492 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1493 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1494 
1495 	sc->sc_type = wmp->wmp_type;
1496 	if (sc->sc_type < WM_T_82543) {
1497 		if (sc->sc_rev < 2) {
1498 			aprint_error_dev(sc->sc_dev,
1499 			    "i82542 must be at least rev. 2\n");
1500 			return;
1501 		}
1502 		if (sc->sc_rev < 3)
1503 			sc->sc_type = WM_T_82542_2_0;
1504 	}
1505 
1506 	/*
1507 	 * Disable MSI for Errata:
1508 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1509 	 *
1510 	 *  82544: Errata 25
1511 	 *  82540: Errata  6 (easy to reproduce device timeout)
1512 	 *  82545: Errata  4 (easy to reproduce device timeout)
1513 	 *  82546: Errata 26 (easy to reproduce device timeout)
1514 	 *  82541: Errata  7 (easy to reproduce device timeout)
1515 	 *
1516 	 * "Byte Enables 2 and 3 are not set on MSI writes"
1517 	 *
1518 	 *  82571 & 82572: Errata 63
1519 	 */
1520 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1521 	    || (sc->sc_type == WM_T_82572))
1522 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1523 
1524 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1525 	    || (sc->sc_type == WM_T_82580)
1526 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1527 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1528 		sc->sc_flags |= WM_F_NEWQUEUE;
1529 
1530 	/* Set device properties (mactype) */
1531 	dict = device_properties(sc->sc_dev);
1532 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1533 
1534 	/*
1535 	 * Map the device.  All devices support memory-mapped acccess,
1536 	 * and it is really required for normal operation.
1537 	 */
1538 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1539 	switch (memtype) {
1540 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1541 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1542 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1543 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1544 		break;
1545 	default:
1546 		memh_valid = 0;
1547 		break;
1548 	}
1549 
1550 	if (memh_valid) {
1551 		sc->sc_st = memt;
1552 		sc->sc_sh = memh;
1553 		sc->sc_ss = memsize;
1554 	} else {
1555 		aprint_error_dev(sc->sc_dev,
1556 		    "unable to map device registers\n");
1557 		return;
1558 	}
1559 
1560 	/*
1561 	 * In addition, i82544 and later support I/O mapped indirect
1562 	 * register access.  It is not desirable (nor supported in
1563 	 * this driver) to use it for normal operation, though it is
1564 	 * required to work around bugs in some chip versions.
1565 	 */
1566 	if (sc->sc_type >= WM_T_82544) {
1567 		/* First we have to find the I/O BAR. */
1568 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1569 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1570 			if (memtype == PCI_MAPREG_TYPE_IO)
1571 				break;
1572 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
1573 			    PCI_MAPREG_MEM_TYPE_64BIT)
1574 				i += 4;	/* skip high bits, too */
1575 		}
1576 		if (i < PCI_MAPREG_END) {
1577 			/*
1578 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1579 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1580 			 * It's no problem because newer chips has no this
1581 			 * bug.
1582 			 *
1583 			 * The i8254x doesn't apparently respond when the
1584 			 * I/O BAR is 0, which looks somewhat like it's not
1585 			 * been configured.
1586 			 */
1587 			preg = pci_conf_read(pc, pa->pa_tag, i);
1588 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1589 				aprint_error_dev(sc->sc_dev,
1590 				    "WARNING: I/O BAR at zero.\n");
1591 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1592 					0, &sc->sc_iot, &sc->sc_ioh,
1593 					NULL, &sc->sc_ios) == 0) {
1594 				sc->sc_flags |= WM_F_IOH_VALID;
1595 			} else {
1596 				aprint_error_dev(sc->sc_dev,
1597 				    "WARNING: unable to map I/O space\n");
1598 			}
1599 		}
1600 
1601 	}
1602 
1603 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1604 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1605 	preg |= PCI_COMMAND_MASTER_ENABLE;
1606 	if (sc->sc_type < WM_T_82542_2_1)
1607 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1608 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1609 
1610 	/* power up chip */
1611 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1612 	    NULL)) && error != EOPNOTSUPP) {
1613 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1614 		return;
1615 	}
1616 
1617 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1618 	error = wm_alloc_txrx_queues(sc);
1619 	if (error) {
1620 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
1621 		    error);
1622 		return;
1623 	}
1624 
1625 	/* Allocation settings */
1626 	max_type = PCI_INTR_TYPE_MSIX;
1627 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
1628 	counts[PCI_INTR_TYPE_MSI] = 1;
1629 	counts[PCI_INTR_TYPE_INTX] = 1;
1630 
1631 alloc_retry:
1632 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1633 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1634 		return;
1635 	}
1636 
1637 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1638 		error = wm_setup_msix(sc);
1639 		if (error) {
1640 			pci_intr_release(pc, sc->sc_intrs,
1641 			    counts[PCI_INTR_TYPE_MSIX]);
1642 
1643 			/* Setup for MSI: Disable MSI-X */
1644 			max_type = PCI_INTR_TYPE_MSI;
1645 			counts[PCI_INTR_TYPE_MSI] = 1;
1646 			counts[PCI_INTR_TYPE_INTX] = 1;
1647 			goto alloc_retry;
1648 		}
1649 	} else 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
1650 		error = wm_setup_legacy(sc);
1651 		if (error) {
1652 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
1653 			    counts[PCI_INTR_TYPE_MSI]);
1654 
1655 			/* The next try is for INTx: Disable MSI */
1656 			max_type = PCI_INTR_TYPE_INTX;
1657 			counts[PCI_INTR_TYPE_INTX] = 1;
1658 			goto alloc_retry;
1659 		}
1660 	} else {
1661 		error = wm_setup_legacy(sc);
1662 		if (error) {
1663 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
1664 			    counts[PCI_INTR_TYPE_INTX]);
1665 			return;
1666 		}
1667 	}
1668 
1669 	/*
1670 	 * Check the function ID (unit number of the chip).
1671 	 */
1672 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1673 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
1674 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1675 	    || (sc->sc_type == WM_T_82580)
1676 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1677 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1678 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1679 	else
1680 		sc->sc_funcid = 0;
1681 
1682 	/*
1683 	 * Determine a few things about the bus we're connected to.
1684 	 */
1685 	if (sc->sc_type < WM_T_82543) {
1686 		/* We don't really know the bus characteristics here. */
1687 		sc->sc_bus_speed = 33;
1688 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1689 		/*
1690 		 * CSA (Communication Streaming Architecture) is about as fast
1691 		 * a 32-bit 66MHz PCI Bus.
1692 		 */
1693 		sc->sc_flags |= WM_F_CSA;
1694 		sc->sc_bus_speed = 66;
1695 		aprint_verbose_dev(sc->sc_dev,
1696 		    "Communication Streaming Architecture\n");
1697 		if (sc->sc_type == WM_T_82547) {
1698 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1699 			callout_setfunc(&sc->sc_txfifo_ch,
1700 					wm_82547_txfifo_stall, sc);
1701 			aprint_verbose_dev(sc->sc_dev,
1702 			    "using 82547 Tx FIFO stall work-around\n");
1703 		}
1704 	} else if (sc->sc_type >= WM_T_82571) {
1705 		sc->sc_flags |= WM_F_PCIE;
1706 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1707 		    && (sc->sc_type != WM_T_ICH10)
1708 		    && (sc->sc_type != WM_T_PCH)
1709 		    && (sc->sc_type != WM_T_PCH2)
1710 		    && (sc->sc_type != WM_T_PCH_LPT)) {
1711 			/* ICH* and PCH* have no PCIe capability registers */
1712 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1713 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1714 				NULL) == 0)
1715 				aprint_error_dev(sc->sc_dev,
1716 				    "unable to find PCIe capability\n");
1717 		}
1718 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1719 	} else {
1720 		reg = CSR_READ(sc, WMREG_STATUS);
1721 		if (reg & STATUS_BUS64)
1722 			sc->sc_flags |= WM_F_BUS64;
1723 		if ((reg & STATUS_PCIX_MODE) != 0) {
1724 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1725 
1726 			sc->sc_flags |= WM_F_PCIX;
1727 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1728 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1729 				aprint_error_dev(sc->sc_dev,
1730 				    "unable to find PCIX capability\n");
1731 			else if (sc->sc_type != WM_T_82545_3 &&
1732 				 sc->sc_type != WM_T_82546_3) {
1733 				/*
1734 				 * Work around a problem caused by the BIOS
1735 				 * setting the max memory read byte count
1736 				 * incorrectly.
1737 				 */
1738 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1739 				    sc->sc_pcixe_capoff + PCIX_CMD);
1740 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1741 				    sc->sc_pcixe_capoff + PCIX_STATUS);
1742 
1743 				bytecnt =
1744 				    (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1745 				    PCIX_CMD_BYTECNT_SHIFT;
1746 				maxb =
1747 				    (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1748 				    PCIX_STATUS_MAXB_SHIFT;
1749 				if (bytecnt > maxb) {
1750 					aprint_verbose_dev(sc->sc_dev,
1751 					    "resetting PCI-X MMRBC: %d -> %d\n",
1752 					    512 << bytecnt, 512 << maxb);
1753 					pcix_cmd = (pcix_cmd &
1754 					    ~PCIX_CMD_BYTECNT_MASK) |
1755 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
1756 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1757 					    sc->sc_pcixe_capoff + PCIX_CMD,
1758 					    pcix_cmd);
1759 				}
1760 			}
1761 		}
1762 		/*
1763 		 * The quad port adapter is special; it has a PCIX-PCIX
1764 		 * bridge on the board, and can run the secondary bus at
1765 		 * a higher speed.
1766 		 */
1767 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1768 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1769 								      : 66;
1770 		} else if (sc->sc_flags & WM_F_PCIX) {
1771 			switch (reg & STATUS_PCIXSPD_MASK) {
1772 			case STATUS_PCIXSPD_50_66:
1773 				sc->sc_bus_speed = 66;
1774 				break;
1775 			case STATUS_PCIXSPD_66_100:
1776 				sc->sc_bus_speed = 100;
1777 				break;
1778 			case STATUS_PCIXSPD_100_133:
1779 				sc->sc_bus_speed = 133;
1780 				break;
1781 			default:
1782 				aprint_error_dev(sc->sc_dev,
1783 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1784 				    reg & STATUS_PCIXSPD_MASK);
1785 				sc->sc_bus_speed = 66;
1786 				break;
1787 			}
1788 		} else
1789 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1790 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1791 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1792 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1793 	}
1794 
1795 	/* clear interesting stat counters */
1796 	CSR_READ(sc, WMREG_COLC);
1797 	CSR_READ(sc, WMREG_RXERRC);
1798 
1799 	/* get PHY control from SMBus to PCIe */
1800 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1801 	    || (sc->sc_type == WM_T_PCH_LPT))
1802 		wm_smbustopci(sc);
1803 
1804 	/* Reset the chip to a known state. */
1805 	wm_reset(sc);
1806 
1807 	/* Get some information about the EEPROM. */
1808 	switch (sc->sc_type) {
1809 	case WM_T_82542_2_0:
1810 	case WM_T_82542_2_1:
1811 	case WM_T_82543:
1812 	case WM_T_82544:
1813 		/* Microwire */
1814 		sc->sc_nvm_wordsize = 64;
1815 		sc->sc_nvm_addrbits = 6;
1816 		break;
1817 	case WM_T_82540:
1818 	case WM_T_82545:
1819 	case WM_T_82545_3:
1820 	case WM_T_82546:
1821 	case WM_T_82546_3:
1822 		/* Microwire */
1823 		reg = CSR_READ(sc, WMREG_EECD);
1824 		if (reg & EECD_EE_SIZE) {
1825 			sc->sc_nvm_wordsize = 256;
1826 			sc->sc_nvm_addrbits = 8;
1827 		} else {
1828 			sc->sc_nvm_wordsize = 64;
1829 			sc->sc_nvm_addrbits = 6;
1830 		}
1831 		sc->sc_flags |= WM_F_LOCK_EECD;
1832 		break;
1833 	case WM_T_82541:
1834 	case WM_T_82541_2:
1835 	case WM_T_82547:
1836 	case WM_T_82547_2:
1837 		sc->sc_flags |= WM_F_LOCK_EECD;
1838 		reg = CSR_READ(sc, WMREG_EECD);
1839 		if (reg & EECD_EE_TYPE) {
1840 			/* SPI */
1841 			sc->sc_flags |= WM_F_EEPROM_SPI;
1842 			wm_nvm_set_addrbits_size_eecd(sc);
1843 		} else {
1844 			/* Microwire */
1845 			if ((reg & EECD_EE_ABITS) != 0) {
1846 				sc->sc_nvm_wordsize = 256;
1847 				sc->sc_nvm_addrbits = 8;
1848 			} else {
1849 				sc->sc_nvm_wordsize = 64;
1850 				sc->sc_nvm_addrbits = 6;
1851 			}
1852 		}
1853 		break;
1854 	case WM_T_82571:
1855 	case WM_T_82572:
1856 		/* SPI */
1857 		sc->sc_flags |= WM_F_EEPROM_SPI;
1858 		wm_nvm_set_addrbits_size_eecd(sc);
1859 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1860 		break;
1861 	case WM_T_82573:
1862 		sc->sc_flags |= WM_F_LOCK_SWSM;
1863 		/* FALLTHROUGH */
1864 	case WM_T_82574:
1865 	case WM_T_82583:
1866 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1867 			sc->sc_flags |= WM_F_EEPROM_FLASH;
1868 			sc->sc_nvm_wordsize = 2048;
1869 		} else {
1870 			/* SPI */
1871 			sc->sc_flags |= WM_F_EEPROM_SPI;
1872 			wm_nvm_set_addrbits_size_eecd(sc);
1873 		}
1874 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1875 		break;
1876 	case WM_T_82575:
1877 	case WM_T_82576:
1878 	case WM_T_82580:
1879 	case WM_T_I350:
1880 	case WM_T_I354:
1881 	case WM_T_80003:
1882 		/* SPI */
1883 		sc->sc_flags |= WM_F_EEPROM_SPI;
1884 		wm_nvm_set_addrbits_size_eecd(sc);
1885 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1886 		    | WM_F_LOCK_SWSM;
1887 		break;
1888 	case WM_T_ICH8:
1889 	case WM_T_ICH9:
1890 	case WM_T_ICH10:
1891 	case WM_T_PCH:
1892 	case WM_T_PCH2:
1893 	case WM_T_PCH_LPT:
1894 		/* FLASH */
1895 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1896 		sc->sc_nvm_wordsize = 2048;
1897 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1898 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1899 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
1900 			aprint_error_dev(sc->sc_dev,
1901 			    "can't map FLASH registers\n");
1902 			goto out;
1903 		}
1904 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1905 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1906 						ICH_FLASH_SECTOR_SIZE;
1907 		sc->sc_ich8_flash_bank_size =
1908 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1909 		sc->sc_ich8_flash_bank_size -=
1910 		    (reg & ICH_GFPREG_BASE_MASK);
1911 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1912 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1913 		break;
1914 	case WM_T_I210:
1915 	case WM_T_I211:
1916 		if (wm_nvm_get_flash_presence_i210(sc)) {
1917 			wm_nvm_set_addrbits_size_eecd(sc);
1918 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1919 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1920 		} else {
1921 			sc->sc_nvm_wordsize = INVM_SIZE;
1922 			sc->sc_flags |= WM_F_EEPROM_INVM;
1923 			sc->sc_flags |= WM_F_LOCK_SWFW;
1924 		}
1925 		break;
1926 	default:
1927 		break;
1928 	}
1929 
1930 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
1931 	switch (sc->sc_type) {
1932 	case WM_T_82571:
1933 	case WM_T_82572:
1934 		reg = CSR_READ(sc, WMREG_SWSM2);
1935 		if ((reg & SWSM2_LOCK) == 0) {
1936 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1937 			force_clear_smbi = true;
1938 		} else
1939 			force_clear_smbi = false;
1940 		break;
1941 	case WM_T_82573:
1942 	case WM_T_82574:
1943 	case WM_T_82583:
1944 		force_clear_smbi = true;
1945 		break;
1946 	default:
1947 		force_clear_smbi = false;
1948 		break;
1949 	}
1950 	if (force_clear_smbi) {
1951 		reg = CSR_READ(sc, WMREG_SWSM);
1952 		if ((reg & SWSM_SMBI) != 0)
1953 			aprint_error_dev(sc->sc_dev,
1954 			    "Please update the Bootagent\n");
1955 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
1956 	}
1957 
1958 	/*
1959 	 * Defer printing the EEPROM type until after verifying the checksum
1960 	 * This allows the EEPROM type to be printed correctly in the case
1961 	 * that no EEPROM is attached.
1962 	 */
1963 	/*
1964 	 * Validate the EEPROM checksum. If the checksum fails, flag
1965 	 * this for later, so we can fail future reads from the EEPROM.
1966 	 */
1967 	if (wm_nvm_validate_checksum(sc)) {
1968 		/*
1969 		 * Read twice again because some PCI-e parts fail the
1970 		 * first check due to the link being in sleep state.
1971 		 */
1972 		if (wm_nvm_validate_checksum(sc))
1973 			sc->sc_flags |= WM_F_EEPROM_INVALID;
1974 	}
1975 
1976 	/* Set device properties (macflags) */
1977 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1978 
1979 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
1980 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
1981 	else {
1982 		aprint_verbose_dev(sc->sc_dev, "%u words ",
1983 		    sc->sc_nvm_wordsize);
1984 		if (sc->sc_flags & WM_F_EEPROM_INVM)
1985 			aprint_verbose("iNVM");
1986 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
1987 			aprint_verbose("FLASH(HW)");
1988 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
1989 			aprint_verbose("FLASH");
1990 		else {
1991 			if (sc->sc_flags & WM_F_EEPROM_SPI)
1992 				eetype = "SPI";
1993 			else
1994 				eetype = "MicroWire";
1995 			aprint_verbose("(%d address bits) %s EEPROM",
1996 			    sc->sc_nvm_addrbits, eetype);
1997 		}
1998 	}
1999 	wm_nvm_version(sc);
2000 	aprint_verbose("\n");
2001 
2002 	/* Check for I21[01] PLL workaround */
2003 	if (sc->sc_type == WM_T_I210)
2004 		sc->sc_flags |= WM_F_PLL_WA_I210;
2005 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2006 		/* NVM image release 3.25 has a workaround */
2007 		if ((sc->sc_nvm_ver_major < 3)
2008 		    || ((sc->sc_nvm_ver_major == 3)
2009 			&& (sc->sc_nvm_ver_minor < 25))) {
2010 			aprint_verbose_dev(sc->sc_dev,
2011 			    "ROM image version %d.%d is older than 3.25\n",
2012 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2013 			sc->sc_flags |= WM_F_PLL_WA_I210;
2014 		}
2015 	}
2016 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2017 		wm_pll_workaround_i210(sc);
2018 
2019 	switch (sc->sc_type) {
2020 	case WM_T_82571:
2021 	case WM_T_82572:
2022 	case WM_T_82573:
2023 	case WM_T_82574:
2024 	case WM_T_82583:
2025 	case WM_T_80003:
2026 	case WM_T_ICH8:
2027 	case WM_T_ICH9:
2028 	case WM_T_ICH10:
2029 	case WM_T_PCH:
2030 	case WM_T_PCH2:
2031 	case WM_T_PCH_LPT:
2032 		if (wm_check_mng_mode(sc) != 0)
2033 			wm_get_hw_control(sc);
2034 		break;
2035 	default:
2036 		break;
2037 	}
2038 	wm_get_wakeup(sc);
2039 	/*
2040 	 * Read the Ethernet address from the EEPROM, if not first found
2041 	 * in device properties.
2042 	 */
2043 	ea = prop_dictionary_get(dict, "mac-address");
2044 	if (ea != NULL) {
2045 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2046 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2047 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2048 	} else {
2049 		if (wm_read_mac_addr(sc, enaddr) != 0) {
2050 			aprint_error_dev(sc->sc_dev,
2051 			    "unable to read Ethernet address\n");
2052 			goto out;
2053 		}
2054 	}
2055 
2056 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2057 	    ether_sprintf(enaddr));
2058 
2059 	/*
2060 	 * Read the config info from the EEPROM, and set up various
2061 	 * bits in the control registers based on their contents.
2062 	 */
2063 	pn = prop_dictionary_get(dict, "i82543-cfg1");
2064 	if (pn != NULL) {
2065 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2066 		cfg1 = (uint16_t) prop_number_integer_value(pn);
2067 	} else {
2068 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2069 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2070 			goto out;
2071 		}
2072 	}
2073 
2074 	pn = prop_dictionary_get(dict, "i82543-cfg2");
2075 	if (pn != NULL) {
2076 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2077 		cfg2 = (uint16_t) prop_number_integer_value(pn);
2078 	} else {
2079 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2080 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2081 			goto out;
2082 		}
2083 	}
2084 
2085 	/* check for WM_F_WOL */
2086 	switch (sc->sc_type) {
2087 	case WM_T_82542_2_0:
2088 	case WM_T_82542_2_1:
2089 	case WM_T_82543:
2090 		/* dummy? */
2091 		eeprom_data = 0;
2092 		apme_mask = NVM_CFG3_APME;
2093 		break;
2094 	case WM_T_82544:
2095 		apme_mask = NVM_CFG2_82544_APM_EN;
2096 		eeprom_data = cfg2;
2097 		break;
2098 	case WM_T_82546:
2099 	case WM_T_82546_3:
2100 	case WM_T_82571:
2101 	case WM_T_82572:
2102 	case WM_T_82573:
2103 	case WM_T_82574:
2104 	case WM_T_82583:
2105 	case WM_T_80003:
2106 	default:
2107 		apme_mask = NVM_CFG3_APME;
2108 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2109 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2110 		break;
2111 	case WM_T_82575:
2112 	case WM_T_82576:
2113 	case WM_T_82580:
2114 	case WM_T_I350:
2115 	case WM_T_I354: /* XXX ok? */
2116 	case WM_T_ICH8:
2117 	case WM_T_ICH9:
2118 	case WM_T_ICH10:
2119 	case WM_T_PCH:
2120 	case WM_T_PCH2:
2121 	case WM_T_PCH_LPT:
2122 		/* XXX The funcid should be checked on some devices */
2123 		apme_mask = WUC_APME;
2124 		eeprom_data = CSR_READ(sc, WMREG_WUC);
2125 		break;
2126 	}
2127 
2128 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2129 	if ((eeprom_data & apme_mask) != 0)
2130 		sc->sc_flags |= WM_F_WOL;
2131 #ifdef WM_DEBUG
2132 	if ((sc->sc_flags & WM_F_WOL) != 0)
2133 		printf("WOL\n");
2134 #endif
2135 
2136 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2137 		/* Check NVM for autonegotiation */
2138 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2139 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2140 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2141 		}
2142 	}
2143 
2144 	/*
2145 	 * XXX need special handling for some multiple port cards
2146 	 * to disable a paticular port.
2147 	 */
2148 
2149 	if (sc->sc_type >= WM_T_82544) {
2150 		pn = prop_dictionary_get(dict, "i82543-swdpin");
2151 		if (pn != NULL) {
2152 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2153 			swdpin = (uint16_t) prop_number_integer_value(pn);
2154 		} else {
2155 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2156 				aprint_error_dev(sc->sc_dev,
2157 				    "unable to read SWDPIN\n");
2158 				goto out;
2159 			}
2160 		}
2161 	}
2162 
2163 	if (cfg1 & NVM_CFG1_ILOS)
2164 		sc->sc_ctrl |= CTRL_ILOS;
2165 
2166 	/*
2167 	 * XXX
2168 	 * This code isn't correct because pin 2 and 3 are located
2169 	 * in different position on newer chips. Check all datasheet.
2170 	 *
2171 	 * Until resolve this problem, check if a chip < 82580
2172 	 */
2173 	if (sc->sc_type <= WM_T_82580) {
2174 		if (sc->sc_type >= WM_T_82544) {
2175 			sc->sc_ctrl |=
2176 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2177 			    CTRL_SWDPIO_SHIFT;
2178 			sc->sc_ctrl |=
2179 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2180 			    CTRL_SWDPINS_SHIFT;
2181 		} else {
2182 			sc->sc_ctrl |=
2183 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2184 			    CTRL_SWDPIO_SHIFT;
2185 		}
2186 	}
2187 
2188 	/* XXX For other than 82580? */
2189 	if (sc->sc_type == WM_T_82580) {
2190 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2191 		printf("CFG3 = %08x\n", (uint32_t)nvmword);
2192 		if (nvmword & __BIT(13)) {
2193 			printf("SET ILOS\n");
2194 			sc->sc_ctrl |= CTRL_ILOS;
2195 		}
2196 	}
2197 
2198 #if 0
2199 	if (sc->sc_type >= WM_T_82544) {
2200 		if (cfg1 & NVM_CFG1_IPS0)
2201 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2202 		if (cfg1 & NVM_CFG1_IPS1)
2203 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2204 		sc->sc_ctrl_ext |=
2205 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2206 		    CTRL_EXT_SWDPIO_SHIFT;
2207 		sc->sc_ctrl_ext |=
2208 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2209 		    CTRL_EXT_SWDPINS_SHIFT;
2210 	} else {
2211 		sc->sc_ctrl_ext |=
2212 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2213 		    CTRL_EXT_SWDPIO_SHIFT;
2214 	}
2215 #endif
2216 
2217 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2218 #if 0
2219 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2220 #endif
2221 
2222 	if (sc->sc_type == WM_T_PCH) {
2223 		uint16_t val;
2224 
2225 		/* Save the NVM K1 bit setting */
2226 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2227 
2228 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2229 			sc->sc_nvm_k1_enabled = 1;
2230 		else
2231 			sc->sc_nvm_k1_enabled = 0;
2232 	}
2233 
2234 	/*
2235 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2236 	 * media structures accordingly.
2237 	 */
2238 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2239 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2240 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2241 	    || sc->sc_type == WM_T_82573
2242 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2243 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
2244 		wm_gmii_mediainit(sc, wmp->wmp_product);
2245 	} else if (sc->sc_type < WM_T_82543 ||
2246 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2247 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2248 			aprint_error_dev(sc->sc_dev,
2249 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
2250 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2251 		}
2252 		wm_tbi_mediainit(sc);
2253 	} else {
2254 		switch (sc->sc_type) {
2255 		case WM_T_82575:
2256 		case WM_T_82576:
2257 		case WM_T_82580:
2258 		case WM_T_I350:
2259 		case WM_T_I354:
2260 		case WM_T_I210:
2261 		case WM_T_I211:
2262 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
2263 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2264 			switch (link_mode) {
2265 			case CTRL_EXT_LINK_MODE_1000KX:
2266 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2267 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2268 				break;
2269 			case CTRL_EXT_LINK_MODE_SGMII:
2270 				if (wm_sgmii_uses_mdio(sc)) {
2271 					aprint_verbose_dev(sc->sc_dev,
2272 					    "SGMII(MDIO)\n");
2273 					sc->sc_flags |= WM_F_SGMII;
2274 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2275 					break;
2276 				}
2277 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2278 				/*FALLTHROUGH*/
2279 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2280 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
2281 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2282 					if (link_mode
2283 					    == CTRL_EXT_LINK_MODE_SGMII) {
2284 						sc->sc_mediatype
2285 						    = WM_MEDIATYPE_COPPER;
2286 						sc->sc_flags |= WM_F_SGMII;
2287 					} else {
2288 						sc->sc_mediatype
2289 						    = WM_MEDIATYPE_SERDES;
2290 						aprint_verbose_dev(sc->sc_dev,
2291 						    "SERDES\n");
2292 					}
2293 					break;
2294 				}
2295 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2296 					aprint_verbose_dev(sc->sc_dev,
2297 					    "SERDES\n");
2298 
2299 				/* Change current link mode setting */
2300 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
2301 				switch (sc->sc_mediatype) {
2302 				case WM_MEDIATYPE_COPPER:
2303 					reg |= CTRL_EXT_LINK_MODE_SGMII;
2304 					break;
2305 				case WM_MEDIATYPE_SERDES:
2306 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2307 					break;
2308 				default:
2309 					break;
2310 				}
2311 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2312 				break;
2313 			case CTRL_EXT_LINK_MODE_GMII:
2314 			default:
2315 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
2316 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2317 				break;
2318 			}
2319 
2320 			reg &= ~CTRL_EXT_I2C_ENA;
2321 			if ((sc->sc_flags & WM_F_SGMII) != 0)
2322 				reg |= CTRL_EXT_I2C_ENA;
2323 			else
2324 				reg &= ~CTRL_EXT_I2C_ENA;
2325 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2326 
2327 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2328 				wm_gmii_mediainit(sc, wmp->wmp_product);
2329 			else
2330 				wm_tbi_mediainit(sc);
2331 			break;
2332 		default:
2333 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2334 				aprint_error_dev(sc->sc_dev,
2335 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2336 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2337 			wm_gmii_mediainit(sc, wmp->wmp_product);
2338 		}
2339 	}
2340 
2341 	ifp = &sc->sc_ethercom.ec_if;
2342 	xname = device_xname(sc->sc_dev);
2343 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2344 	ifp->if_softc = sc;
2345 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2346 	ifp->if_ioctl = wm_ioctl;
2347 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2348 		ifp->if_start = wm_nq_start;
2349 	else
2350 		ifp->if_start = wm_start;
2351 	ifp->if_watchdog = wm_watchdog;
2352 	ifp->if_init = wm_init;
2353 	ifp->if_stop = wm_stop;
2354 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2355 	IFQ_SET_READY(&ifp->if_snd);
2356 
2357 	/* Check for jumbo frame */
2358 	switch (sc->sc_type) {
2359 	case WM_T_82573:
2360 		/* XXX limited to 9234 if ASPM is disabled */
2361 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2362 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2363 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2364 		break;
2365 	case WM_T_82571:
2366 	case WM_T_82572:
2367 	case WM_T_82574:
2368 	case WM_T_82575:
2369 	case WM_T_82576:
2370 	case WM_T_82580:
2371 	case WM_T_I350:
2372 	case WM_T_I354: /* XXXX ok? */
2373 	case WM_T_I210:
2374 	case WM_T_I211:
2375 	case WM_T_80003:
2376 	case WM_T_ICH9:
2377 	case WM_T_ICH10:
2378 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
2379 	case WM_T_PCH_LPT:
2380 		/* XXX limited to 9234 */
2381 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2382 		break;
2383 	case WM_T_PCH:
2384 		/* XXX limited to 4096 */
2385 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2386 		break;
2387 	case WM_T_82542_2_0:
2388 	case WM_T_82542_2_1:
2389 	case WM_T_82583:
2390 	case WM_T_ICH8:
2391 		/* No support for jumbo frame */
2392 		break;
2393 	default:
2394 		/* ETHER_MAX_LEN_JUMBO */
2395 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2396 		break;
2397 	}
2398 
2399 	/* If we're a i82543 or greater, we can support VLANs. */
2400 	if (sc->sc_type >= WM_T_82543)
2401 		sc->sc_ethercom.ec_capabilities |=
2402 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2403 
2404 	/*
2405 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
2406 	 * on i82543 and later.
2407 	 */
2408 	if (sc->sc_type >= WM_T_82543) {
2409 		ifp->if_capabilities |=
2410 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2411 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2412 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2413 		    IFCAP_CSUM_TCPv6_Tx |
2414 		    IFCAP_CSUM_UDPv6_Tx;
2415 	}
2416 
2417 	/*
2418 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2419 	 *
2420 	 *	82541GI (8086:1076) ... no
2421 	 *	82572EI (8086:10b9) ... yes
2422 	 */
2423 	if (sc->sc_type >= WM_T_82571) {
2424 		ifp->if_capabilities |=
2425 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2426 	}
2427 
2428 	/*
2429 	 * If we're a i82544 or greater (except i82547), we can do
2430 	 * TCP segmentation offload.
2431 	 */
2432 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2433 		ifp->if_capabilities |= IFCAP_TSOv4;
2434 	}
2435 
2436 	if (sc->sc_type >= WM_T_82571) {
2437 		ifp->if_capabilities |= IFCAP_TSOv6;
2438 	}
2439 
2440 #ifdef WM_MPSAFE
2441 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2442 #else
2443 	sc->sc_core_lock = NULL;
2444 #endif
2445 
2446 	/* Attach the interface. */
2447 	if_attach(ifp);
2448 	ether_ifattach(ifp, enaddr);
2449 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2450 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2451 			  RND_FLAG_DEFAULT);
2452 
2453 #ifdef WM_EVENT_COUNTERS
2454 	/* Attach event counters. */
2455 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2456 	    NULL, xname, "txsstall");
2457 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2458 	    NULL, xname, "txdstall");
2459 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2460 	    NULL, xname, "txfifo_stall");
2461 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2462 	    NULL, xname, "txdw");
2463 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2464 	    NULL, xname, "txqe");
2465 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2466 	    NULL, xname, "rxintr");
2467 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2468 	    NULL, xname, "linkintr");
2469 
2470 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2471 	    NULL, xname, "rxipsum");
2472 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2473 	    NULL, xname, "rxtusum");
2474 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2475 	    NULL, xname, "txipsum");
2476 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2477 	    NULL, xname, "txtusum");
2478 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2479 	    NULL, xname, "txtusum6");
2480 
2481 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2482 	    NULL, xname, "txtso");
2483 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2484 	    NULL, xname, "txtso6");
2485 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2486 	    NULL, xname, "txtsopain");
2487 
2488 	for (i = 0; i < WM_NTXSEGS; i++) {
2489 		snprintf(wm_txseg_evcnt_names[i],
2490 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2491 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2492 		    NULL, xname, wm_txseg_evcnt_names[i]);
2493 	}
2494 
2495 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2496 	    NULL, xname, "txdrop");
2497 
2498 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2499 	    NULL, xname, "tu");
2500 
2501 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2502 	    NULL, xname, "tx_xoff");
2503 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2504 	    NULL, xname, "tx_xon");
2505 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2506 	    NULL, xname, "rx_xoff");
2507 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2508 	    NULL, xname, "rx_xon");
2509 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2510 	    NULL, xname, "rx_macctl");
2511 #endif /* WM_EVENT_COUNTERS */
2512 
2513 	if (pmf_device_register(self, wm_suspend, wm_resume))
2514 		pmf_class_network_register(self, ifp);
2515 	else
2516 		aprint_error_dev(self, "couldn't establish power handler\n");
2517 
2518 	sc->sc_flags |= WM_F_ATTACHED;
2519  out:
2520 	return;
2521 }
2522 
2523 /* The detach function (ca_detach) */
2524 static int
2525 wm_detach(device_t self, int flags __unused)
2526 {
2527 	struct wm_softc *sc = device_private(self);
2528 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2529 	int i;
2530 #ifndef WM_MPSAFE
2531 	int s;
2532 #endif
2533 
2534 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2535 		return 0;
2536 
2537 #ifndef WM_MPSAFE
2538 	s = splnet();
2539 #endif
2540 	/* Stop the interface. Callouts are stopped in it. */
2541 	wm_stop(ifp, 1);
2542 
2543 #ifndef WM_MPSAFE
2544 	splx(s);
2545 #endif
2546 
2547 	pmf_device_deregister(self);
2548 
2549 	/* Tell the firmware about the release */
2550 	WM_CORE_LOCK(sc);
2551 	wm_release_manageability(sc);
2552 	wm_release_hw_control(sc);
2553 	WM_CORE_UNLOCK(sc);
2554 
2555 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2556 
2557 	/* Delete all remaining media. */
2558 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2559 
2560 	ether_ifdetach(ifp);
2561 	if_detach(ifp);
2562 
2563 
2564 	/* Unload RX dmamaps and free mbufs */
2565 	for (i = 0; i < sc->sc_nrxqueues; i++) {
2566 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
2567 		WM_RX_LOCK(rxq);
2568 		wm_rxdrain(rxq);
2569 		WM_RX_UNLOCK(rxq);
2570 	}
2571 	/* Must unlock here */
2572 
2573 	wm_free_txrx_queues(sc);
2574 
2575 	/* Disestablish the interrupt handler */
2576 	for (i = 0; i < sc->sc_nintrs; i++) {
2577 		if (sc->sc_ihs[i] != NULL) {
2578 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2579 			sc->sc_ihs[i] = NULL;
2580 		}
2581 	}
2582 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2583 
2584 	/* Unmap the registers */
2585 	if (sc->sc_ss) {
2586 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2587 		sc->sc_ss = 0;
2588 	}
2589 	if (sc->sc_ios) {
2590 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2591 		sc->sc_ios = 0;
2592 	}
2593 	if (sc->sc_flashs) {
2594 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2595 		sc->sc_flashs = 0;
2596 	}
2597 
2598 	if (sc->sc_core_lock)
2599 		mutex_obj_free(sc->sc_core_lock);
2600 
2601 	return 0;
2602 }
2603 
2604 static bool
2605 wm_suspend(device_t self, const pmf_qual_t *qual)
2606 {
2607 	struct wm_softc *sc = device_private(self);
2608 
2609 	wm_release_manageability(sc);
2610 	wm_release_hw_control(sc);
2611 #ifdef WM_WOL
2612 	wm_enable_wakeup(sc);
2613 #endif
2614 
2615 	return true;
2616 }
2617 
2618 static bool
2619 wm_resume(device_t self, const pmf_qual_t *qual)
2620 {
2621 	struct wm_softc *sc = device_private(self);
2622 
2623 	wm_init_manageability(sc);
2624 
2625 	return true;
2626 }
2627 
2628 /*
2629  * wm_watchdog:		[ifnet interface function]
2630  *
2631  *	Watchdog timer handler.
2632  */
2633 static void
2634 wm_watchdog(struct ifnet *ifp)
2635 {
2636 	struct wm_softc *sc = ifp->if_softc;
2637 	struct wm_txqueue *txq = &sc->sc_txq[0];
2638 
2639 	/*
2640 	 * Since we're using delayed interrupts, sweep up
2641 	 * before we report an error.
2642 	 */
2643 	WM_TX_LOCK(txq);
2644 	wm_txeof(sc);
2645 	WM_TX_UNLOCK(txq);
2646 
2647 	if (txq->txq_free != WM_NTXDESC(txq)) {
2648 #ifdef WM_DEBUG
2649 		int i, j;
2650 		struct wm_txsoft *txs;
2651 #endif
2652 		log(LOG_ERR,
2653 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2654 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
2655 		    txq->txq_next);
2656 		ifp->if_oerrors++;
2657 #ifdef WM_DEBUG
2658 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
2659 		    i = WM_NEXTTXS(txq, i)) {
2660 		    txs = &txq->txq_soft[i];
2661 		    printf("txs %d tx %d -> %d\n",
2662 			i, txs->txs_firstdesc, txs->txs_lastdesc);
2663 		    for (j = txs->txs_firstdesc; ;
2664 			j = WM_NEXTTX(txq, j)) {
2665 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2666 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
2667 			printf("\t %#08x%08x\n",
2668 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
2669 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
2670 			if (j == txs->txs_lastdesc)
2671 				break;
2672 			}
2673 		}
2674 #endif
2675 		/* Reset the interface. */
2676 		(void) wm_init(ifp);
2677 	}
2678 
2679 	/* Try to get more packets going. */
2680 	ifp->if_start(ifp);
2681 }
2682 
2683 /*
2684  * wm_tick:
2685  *
2686  *	One second timer, used to check link status, sweep up
2687  *	completed transmit jobs, etc.
2688  */
2689 static void
2690 wm_tick(void *arg)
2691 {
2692 	struct wm_softc *sc = arg;
2693 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2694 #ifndef WM_MPSAFE
2695 	int s;
2696 
2697 	s = splnet();
2698 #endif
2699 
2700 	WM_CORE_LOCK(sc);
2701 
2702 	if (sc->sc_stopping)
2703 		goto out;
2704 
2705 	if (sc->sc_type >= WM_T_82542_2_1) {
2706 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2707 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2708 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2709 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2710 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2711 	}
2712 
2713 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2714 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
2715 	    + CSR_READ(sc, WMREG_CRCERRS)
2716 	    + CSR_READ(sc, WMREG_ALGNERRC)
2717 	    + CSR_READ(sc, WMREG_SYMERRC)
2718 	    + CSR_READ(sc, WMREG_RXERRC)
2719 	    + CSR_READ(sc, WMREG_SEC)
2720 	    + CSR_READ(sc, WMREG_CEXTERR)
2721 	    + CSR_READ(sc, WMREG_RLEC);
2722 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2723 
2724 	if (sc->sc_flags & WM_F_HAS_MII)
2725 		mii_tick(&sc->sc_mii);
2726 	else if ((sc->sc_type >= WM_T_82575)
2727 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2728 		wm_serdes_tick(sc);
2729 	else
2730 		wm_tbi_tick(sc);
2731 
2732 out:
2733 	WM_CORE_UNLOCK(sc);
2734 #ifndef WM_MPSAFE
2735 	splx(s);
2736 #endif
2737 
2738 	if (!sc->sc_stopping)
2739 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2740 }
2741 
2742 static int
2743 wm_ifflags_cb(struct ethercom *ec)
2744 {
2745 	struct ifnet *ifp = &ec->ec_if;
2746 	struct wm_softc *sc = ifp->if_softc;
2747 	int change = ifp->if_flags ^ sc->sc_if_flags;
2748 	int rc = 0;
2749 
2750 	WM_CORE_LOCK(sc);
2751 
2752 	if (change != 0)
2753 		sc->sc_if_flags = ifp->if_flags;
2754 
2755 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
2756 		rc = ENETRESET;
2757 		goto out;
2758 	}
2759 
2760 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2761 		wm_set_filter(sc);
2762 
2763 	wm_set_vlan(sc);
2764 
2765 out:
2766 	WM_CORE_UNLOCK(sc);
2767 
2768 	return rc;
2769 }
2770 
2771 /*
2772  * wm_ioctl:		[ifnet interface function]
2773  *
2774  *	Handle control requests from the operator.
2775  */
2776 static int
2777 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2778 {
2779 	struct wm_softc *sc = ifp->if_softc;
2780 	struct ifreq *ifr = (struct ifreq *) data;
2781 	struct ifaddr *ifa = (struct ifaddr *)data;
2782 	struct sockaddr_dl *sdl;
2783 	int s, error;
2784 
2785 #ifndef WM_MPSAFE
2786 	s = splnet();
2787 #endif
2788 	switch (cmd) {
2789 	case SIOCSIFMEDIA:
2790 	case SIOCGIFMEDIA:
2791 		WM_CORE_LOCK(sc);
2792 		/* Flow control requires full-duplex mode. */
2793 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2794 		    (ifr->ifr_media & IFM_FDX) == 0)
2795 			ifr->ifr_media &= ~IFM_ETH_FMASK;
2796 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2797 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2798 				/* We can do both TXPAUSE and RXPAUSE. */
2799 				ifr->ifr_media |=
2800 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2801 			}
2802 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2803 		}
2804 		WM_CORE_UNLOCK(sc);
2805 #ifdef WM_MPSAFE
2806 		s = splnet();
2807 #endif
2808 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2809 #ifdef WM_MPSAFE
2810 		splx(s);
2811 #endif
2812 		break;
2813 	case SIOCINITIFADDR:
2814 		WM_CORE_LOCK(sc);
2815 		if (ifa->ifa_addr->sa_family == AF_LINK) {
2816 			sdl = satosdl(ifp->if_dl->ifa_addr);
2817 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2818 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2819 			/* unicast address is first multicast entry */
2820 			wm_set_filter(sc);
2821 			error = 0;
2822 			WM_CORE_UNLOCK(sc);
2823 			break;
2824 		}
2825 		WM_CORE_UNLOCK(sc);
2826 		/*FALLTHROUGH*/
2827 	default:
2828 #ifdef WM_MPSAFE
2829 		s = splnet();
2830 #endif
2831 		/* It may call wm_start, so unlock here */
2832 		error = ether_ioctl(ifp, cmd, data);
2833 #ifdef WM_MPSAFE
2834 		splx(s);
2835 #endif
2836 		if (error != ENETRESET)
2837 			break;
2838 
2839 		error = 0;
2840 
2841 		if (cmd == SIOCSIFCAP) {
2842 			error = (*ifp->if_init)(ifp);
2843 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2844 			;
2845 		else if (ifp->if_flags & IFF_RUNNING) {
2846 			/*
2847 			 * Multicast list has changed; set the hardware filter
2848 			 * accordingly.
2849 			 */
2850 			WM_CORE_LOCK(sc);
2851 			wm_set_filter(sc);
2852 			WM_CORE_UNLOCK(sc);
2853 		}
2854 		break;
2855 	}
2856 
2857 #ifndef WM_MPSAFE
2858 	splx(s);
2859 #endif
2860 	return error;
2861 }
2862 
2863 /* MAC address related */
2864 
2865 /*
2866  * Get the offset of MAC address and return it.
2867  * If error occured, use offset 0.
2868  */
2869 static uint16_t
2870 wm_check_alt_mac_addr(struct wm_softc *sc)
2871 {
2872 	uint16_t myea[ETHER_ADDR_LEN / 2];
2873 	uint16_t offset = NVM_OFF_MACADDR;
2874 
2875 	/* Try to read alternative MAC address pointer */
2876 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2877 		return 0;
2878 
2879 	/* Check pointer if it's valid or not. */
2880 	if ((offset == 0x0000) || (offset == 0xffff))
2881 		return 0;
2882 
2883 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
2884 	/*
2885 	 * Check whether alternative MAC address is valid or not.
2886 	 * Some cards have non 0xffff pointer but those don't use
2887 	 * alternative MAC address in reality.
2888 	 *
2889 	 * Check whether the broadcast bit is set or not.
2890 	 */
2891 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
2892 		if (((myea[0] & 0xff) & 0x01) == 0)
2893 			return offset; /* Found */
2894 
2895 	/* Not found */
2896 	return 0;
2897 }
2898 
2899 static int
2900 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2901 {
2902 	uint16_t myea[ETHER_ADDR_LEN / 2];
2903 	uint16_t offset = NVM_OFF_MACADDR;
2904 	int do_invert = 0;
2905 
2906 	switch (sc->sc_type) {
2907 	case WM_T_82580:
2908 	case WM_T_I350:
2909 	case WM_T_I354:
2910 		/* EEPROM Top Level Partitioning */
2911 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
2912 		break;
2913 	case WM_T_82571:
2914 	case WM_T_82575:
2915 	case WM_T_82576:
2916 	case WM_T_80003:
2917 	case WM_T_I210:
2918 	case WM_T_I211:
2919 		offset = wm_check_alt_mac_addr(sc);
2920 		if (offset == 0)
2921 			if ((sc->sc_funcid & 0x01) == 1)
2922 				do_invert = 1;
2923 		break;
2924 	default:
2925 		if ((sc->sc_funcid & 0x01) == 1)
2926 			do_invert = 1;
2927 		break;
2928 	}
2929 
2930 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2931 		myea) != 0)
2932 		goto bad;
2933 
2934 	enaddr[0] = myea[0] & 0xff;
2935 	enaddr[1] = myea[0] >> 8;
2936 	enaddr[2] = myea[1] & 0xff;
2937 	enaddr[3] = myea[1] >> 8;
2938 	enaddr[4] = myea[2] & 0xff;
2939 	enaddr[5] = myea[2] >> 8;
2940 
2941 	/*
2942 	 * Toggle the LSB of the MAC address on the second port
2943 	 * of some dual port cards.
2944 	 */
2945 	if (do_invert != 0)
2946 		enaddr[5] ^= 1;
2947 
2948 	return 0;
2949 
2950  bad:
2951 	return -1;
2952 }
2953 
2954 /*
2955  * wm_set_ral:
2956  *
2957  *	Set an entery in the receive address list.
2958  */
2959 static void
2960 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2961 {
2962 	uint32_t ral_lo, ral_hi;
2963 
2964 	if (enaddr != NULL) {
2965 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2966 		    (enaddr[3] << 24);
2967 		ral_hi = enaddr[4] | (enaddr[5] << 8);
2968 		ral_hi |= RAL_AV;
2969 	} else {
2970 		ral_lo = 0;
2971 		ral_hi = 0;
2972 	}
2973 
2974 	if (sc->sc_type >= WM_T_82544) {
2975 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2976 		    ral_lo);
2977 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2978 		    ral_hi);
2979 	} else {
2980 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2981 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2982 	}
2983 }
2984 
2985 /*
2986  * wm_mchash:
2987  *
2988  *	Compute the hash of the multicast address for the 4096-bit
2989  *	multicast filter.
2990  */
2991 static uint32_t
2992 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2993 {
2994 	static const int lo_shift[4] = { 4, 3, 2, 0 };
2995 	static const int hi_shift[4] = { 4, 5, 6, 8 };
2996 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
2997 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
2998 	uint32_t hash;
2999 
3000 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3001 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3002 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
3003 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3004 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3005 		return (hash & 0x3ff);
3006 	}
3007 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3008 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3009 
3010 	return (hash & 0xfff);
3011 }
3012 
3013 /*
3014  * wm_set_filter:
3015  *
3016  *	Set up the receive filter.
3017  */
3018 static void
3019 wm_set_filter(struct wm_softc *sc)
3020 {
3021 	struct ethercom *ec = &sc->sc_ethercom;
3022 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3023 	struct ether_multi *enm;
3024 	struct ether_multistep step;
3025 	bus_addr_t mta_reg;
3026 	uint32_t hash, reg, bit;
3027 	int i, size;
3028 
3029 	if (sc->sc_type >= WM_T_82544)
3030 		mta_reg = WMREG_CORDOVA_MTA;
3031 	else
3032 		mta_reg = WMREG_MTA;
3033 
3034 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3035 
3036 	if (ifp->if_flags & IFF_BROADCAST)
3037 		sc->sc_rctl |= RCTL_BAM;
3038 	if (ifp->if_flags & IFF_PROMISC) {
3039 		sc->sc_rctl |= RCTL_UPE;
3040 		goto allmulti;
3041 	}
3042 
3043 	/*
3044 	 * Set the station address in the first RAL slot, and
3045 	 * clear the remaining slots.
3046 	 */
3047 	if (sc->sc_type == WM_T_ICH8)
3048 		size = WM_RAL_TABSIZE_ICH8 -1;
3049 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3050 	    || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
3051 	    || (sc->sc_type == WM_T_PCH_LPT))
3052 		size = WM_RAL_TABSIZE_ICH8;
3053 	else if (sc->sc_type == WM_T_82575)
3054 		size = WM_RAL_TABSIZE_82575;
3055 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3056 		size = WM_RAL_TABSIZE_82576;
3057 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3058 		size = WM_RAL_TABSIZE_I350;
3059 	else
3060 		size = WM_RAL_TABSIZE;
3061 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3062 	for (i = 1; i < size; i++)
3063 		wm_set_ral(sc, NULL, i);
3064 
3065 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3066 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3067 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
3068 		size = WM_ICH8_MC_TABSIZE;
3069 	else
3070 		size = WM_MC_TABSIZE;
3071 	/* Clear out the multicast table. */
3072 	for (i = 0; i < size; i++)
3073 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
3074 
3075 	ETHER_FIRST_MULTI(step, ec, enm);
3076 	while (enm != NULL) {
3077 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3078 			/*
3079 			 * We must listen to a range of multicast addresses.
3080 			 * For now, just accept all multicasts, rather than
3081 			 * trying to set only those filter bits needed to match
3082 			 * the range.  (At this time, the only use of address
3083 			 * ranges is for IP multicast routing, for which the
3084 			 * range is big enough to require all bits set.)
3085 			 */
3086 			goto allmulti;
3087 		}
3088 
3089 		hash = wm_mchash(sc, enm->enm_addrlo);
3090 
3091 		reg = (hash >> 5);
3092 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3093 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3094 		    || (sc->sc_type == WM_T_PCH2)
3095 		    || (sc->sc_type == WM_T_PCH_LPT))
3096 			reg &= 0x1f;
3097 		else
3098 			reg &= 0x7f;
3099 		bit = hash & 0x1f;
3100 
3101 		hash = CSR_READ(sc, mta_reg + (reg << 2));
3102 		hash |= 1U << bit;
3103 
3104 		/* XXX Hardware bug?? */
3105 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3106 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3107 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3108 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3109 		} else
3110 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3111 
3112 		ETHER_NEXT_MULTI(step, enm);
3113 	}
3114 
3115 	ifp->if_flags &= ~IFF_ALLMULTI;
3116 	goto setit;
3117 
3118  allmulti:
3119 	ifp->if_flags |= IFF_ALLMULTI;
3120 	sc->sc_rctl |= RCTL_MPE;
3121 
3122  setit:
3123 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3124 }
3125 
3126 /* Reset and init related */
3127 
3128 static void
3129 wm_set_vlan(struct wm_softc *sc)
3130 {
3131 	/* Deal with VLAN enables. */
3132 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3133 		sc->sc_ctrl |= CTRL_VME;
3134 	else
3135 		sc->sc_ctrl &= ~CTRL_VME;
3136 
3137 	/* Write the control registers. */
3138 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3139 }
3140 
3141 static void
3142 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3143 {
3144 	uint32_t gcr;
3145 	pcireg_t ctrl2;
3146 
3147 	gcr = CSR_READ(sc, WMREG_GCR);
3148 
3149 	/* Only take action if timeout value is defaulted to 0 */
3150 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3151 		goto out;
3152 
3153 	if ((gcr & GCR_CAP_VER2) == 0) {
3154 		gcr |= GCR_CMPL_TMOUT_10MS;
3155 		goto out;
3156 	}
3157 
3158 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3159 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
3160 	ctrl2 |= WM_PCIE_DCSR2_16MS;
3161 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3162 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3163 
3164 out:
3165 	/* Disable completion timeout resend */
3166 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
3167 
3168 	CSR_WRITE(sc, WMREG_GCR, gcr);
3169 }
3170 
3171 void
3172 wm_get_auto_rd_done(struct wm_softc *sc)
3173 {
3174 	int i;
3175 
3176 	/* wait for eeprom to reload */
3177 	switch (sc->sc_type) {
3178 	case WM_T_82571:
3179 	case WM_T_82572:
3180 	case WM_T_82573:
3181 	case WM_T_82574:
3182 	case WM_T_82583:
3183 	case WM_T_82575:
3184 	case WM_T_82576:
3185 	case WM_T_82580:
3186 	case WM_T_I350:
3187 	case WM_T_I354:
3188 	case WM_T_I210:
3189 	case WM_T_I211:
3190 	case WM_T_80003:
3191 	case WM_T_ICH8:
3192 	case WM_T_ICH9:
3193 		for (i = 0; i < 10; i++) {
3194 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3195 				break;
3196 			delay(1000);
3197 		}
3198 		if (i == 10) {
3199 			log(LOG_ERR, "%s: auto read from eeprom failed to "
3200 			    "complete\n", device_xname(sc->sc_dev));
3201 		}
3202 		break;
3203 	default:
3204 		break;
3205 	}
3206 }
3207 
3208 void
3209 wm_lan_init_done(struct wm_softc *sc)
3210 {
3211 	uint32_t reg = 0;
3212 	int i;
3213 
3214 	/* wait for eeprom to reload */
3215 	switch (sc->sc_type) {
3216 	case WM_T_ICH10:
3217 	case WM_T_PCH:
3218 	case WM_T_PCH2:
3219 	case WM_T_PCH_LPT:
3220 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3221 			reg = CSR_READ(sc, WMREG_STATUS);
3222 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
3223 				break;
3224 			delay(100);
3225 		}
3226 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3227 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
3228 			    "complete\n", device_xname(sc->sc_dev), __func__);
3229 		}
3230 		break;
3231 	default:
3232 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3233 		    __func__);
3234 		break;
3235 	}
3236 
3237 	reg &= ~STATUS_LAN_INIT_DONE;
3238 	CSR_WRITE(sc, WMREG_STATUS, reg);
3239 }
3240 
3241 void
3242 wm_get_cfg_done(struct wm_softc *sc)
3243 {
3244 	int mask;
3245 	uint32_t reg;
3246 	int i;
3247 
3248 	/* wait for eeprom to reload */
3249 	switch (sc->sc_type) {
3250 	case WM_T_82542_2_0:
3251 	case WM_T_82542_2_1:
3252 		/* null */
3253 		break;
3254 	case WM_T_82543:
3255 	case WM_T_82544:
3256 	case WM_T_82540:
3257 	case WM_T_82545:
3258 	case WM_T_82545_3:
3259 	case WM_T_82546:
3260 	case WM_T_82546_3:
3261 	case WM_T_82541:
3262 	case WM_T_82541_2:
3263 	case WM_T_82547:
3264 	case WM_T_82547_2:
3265 	case WM_T_82573:
3266 	case WM_T_82574:
3267 	case WM_T_82583:
3268 		/* generic */
3269 		delay(10*1000);
3270 		break;
3271 	case WM_T_80003:
3272 	case WM_T_82571:
3273 	case WM_T_82572:
3274 	case WM_T_82575:
3275 	case WM_T_82576:
3276 	case WM_T_82580:
3277 	case WM_T_I350:
3278 	case WM_T_I354:
3279 	case WM_T_I210:
3280 	case WM_T_I211:
3281 		if (sc->sc_type == WM_T_82571) {
3282 			/* Only 82571 shares port 0 */
3283 			mask = EEMNGCTL_CFGDONE_0;
3284 		} else
3285 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3286 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3287 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3288 				break;
3289 			delay(1000);
3290 		}
3291 		if (i >= WM_PHY_CFG_TIMEOUT) {
3292 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3293 				device_xname(sc->sc_dev), __func__));
3294 		}
3295 		break;
3296 	case WM_T_ICH8:
3297 	case WM_T_ICH9:
3298 	case WM_T_ICH10:
3299 	case WM_T_PCH:
3300 	case WM_T_PCH2:
3301 	case WM_T_PCH_LPT:
3302 		delay(10*1000);
3303 		if (sc->sc_type >= WM_T_ICH10)
3304 			wm_lan_init_done(sc);
3305 		else
3306 			wm_get_auto_rd_done(sc);
3307 
3308 		reg = CSR_READ(sc, WMREG_STATUS);
3309 		if ((reg & STATUS_PHYRA) != 0)
3310 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3311 		break;
3312 	default:
3313 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3314 		    __func__);
3315 		break;
3316 	}
3317 }
3318 
3319 /* Init hardware bits */
3320 void
3321 wm_initialize_hardware_bits(struct wm_softc *sc)
3322 {
3323 	uint32_t tarc0, tarc1, reg;
3324 
3325 	/* For 82571 variant, 80003 and ICHs */
3326 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3327 	    || (sc->sc_type >= WM_T_80003)) {
3328 
3329 		/* Transmit Descriptor Control 0 */
3330 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
3331 		reg |= TXDCTL_COUNT_DESC;
3332 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3333 
3334 		/* Transmit Descriptor Control 1 */
3335 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
3336 		reg |= TXDCTL_COUNT_DESC;
3337 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3338 
3339 		/* TARC0 */
3340 		tarc0 = CSR_READ(sc, WMREG_TARC0);
3341 		switch (sc->sc_type) {
3342 		case WM_T_82571:
3343 		case WM_T_82572:
3344 		case WM_T_82573:
3345 		case WM_T_82574:
3346 		case WM_T_82583:
3347 		case WM_T_80003:
3348 			/* Clear bits 30..27 */
3349 			tarc0 &= ~__BITS(30, 27);
3350 			break;
3351 		default:
3352 			break;
3353 		}
3354 
3355 		switch (sc->sc_type) {
3356 		case WM_T_82571:
3357 		case WM_T_82572:
3358 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3359 
3360 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3361 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3362 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3363 			/* 8257[12] Errata No.7 */
3364 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
3365 
3366 			/* TARC1 bit 28 */
3367 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3368 				tarc1 &= ~__BIT(28);
3369 			else
3370 				tarc1 |= __BIT(28);
3371 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3372 
3373 			/*
3374 			 * 8257[12] Errata No.13
3375 			 * Disable Dyamic Clock Gating.
3376 			 */
3377 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3378 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
3379 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3380 			break;
3381 		case WM_T_82573:
3382 		case WM_T_82574:
3383 		case WM_T_82583:
3384 			if ((sc->sc_type == WM_T_82574)
3385 			    || (sc->sc_type == WM_T_82583))
3386 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
3387 
3388 			/* Extended Device Control */
3389 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3390 			reg &= ~__BIT(23);	/* Clear bit 23 */
3391 			reg |= __BIT(22);	/* Set bit 22 */
3392 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3393 
3394 			/* Device Control */
3395 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
3396 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3397 
3398 			/* PCIe Control Register */
3399 			/*
3400 			 * 82573 Errata (unknown).
3401 			 *
3402 			 * 82574 Errata 25 and 82583 Errata 12
3403 			 * "Dropped Rx Packets":
3404 			 *   NVM Image Version 2.1.4 and newer has no this bug.
3405 			 */
3406 			reg = CSR_READ(sc, WMREG_GCR);
3407 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
3408 			CSR_WRITE(sc, WMREG_GCR, reg);
3409 
3410 			if ((sc->sc_type == WM_T_82574)
3411 			    || (sc->sc_type == WM_T_82583)) {
3412 				/*
3413 				 * Document says this bit must be set for
3414 				 * proper operation.
3415 				 */
3416 				reg = CSR_READ(sc, WMREG_GCR);
3417 				reg |= __BIT(22);
3418 				CSR_WRITE(sc, WMREG_GCR, reg);
3419 
3420 				/*
3421 				 * Apply workaround for hardware errata
3422 				 * documented in errata docs Fixes issue where
3423 				 * some error prone or unreliable PCIe
3424 				 * completions are occurring, particularly
3425 				 * with ASPM enabled. Without fix, issue can
3426 				 * cause Tx timeouts.
3427 				 */
3428 				reg = CSR_READ(sc, WMREG_GCR2);
3429 				reg |= __BIT(0);
3430 				CSR_WRITE(sc, WMREG_GCR2, reg);
3431 			}
3432 			break;
3433 		case WM_T_80003:
3434 			/* TARC0 */
3435 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3436 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3437 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
3438 
3439 			/* TARC1 bit 28 */
3440 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3441 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3442 				tarc1 &= ~__BIT(28);
3443 			else
3444 				tarc1 |= __BIT(28);
3445 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3446 			break;
3447 		case WM_T_ICH8:
3448 		case WM_T_ICH9:
3449 		case WM_T_ICH10:
3450 		case WM_T_PCH:
3451 		case WM_T_PCH2:
3452 		case WM_T_PCH_LPT:
3453 			/* TARC 0 */
3454 			if (sc->sc_type == WM_T_ICH8) {
3455 				/* Set TARC0 bits 29 and 28 */
3456 				tarc0 |= __BITS(29, 28);
3457 			}
3458 			/* Set TARC0 bits 23,24,26,27 */
3459 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3460 
3461 			/* CTRL_EXT */
3462 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3463 			reg |= __BIT(22);	/* Set bit 22 */
3464 			/*
3465 			 * Enable PHY low-power state when MAC is at D3
3466 			 * w/o WoL
3467 			 */
3468 			if (sc->sc_type >= WM_T_PCH)
3469 				reg |= CTRL_EXT_PHYPDEN;
3470 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3471 
3472 			/* TARC1 */
3473 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3474 			/* bit 28 */
3475 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3476 				tarc1 &= ~__BIT(28);
3477 			else
3478 				tarc1 |= __BIT(28);
3479 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3480 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3481 
3482 			/* Device Status */
3483 			if (sc->sc_type == WM_T_ICH8) {
3484 				reg = CSR_READ(sc, WMREG_STATUS);
3485 				reg &= ~__BIT(31);
3486 				CSR_WRITE(sc, WMREG_STATUS, reg);
3487 
3488 			}
3489 
3490 			/*
3491 			 * Work-around descriptor data corruption issue during
3492 			 * NFS v2 UDP traffic, just disable the NFS filtering
3493 			 * capability.
3494 			 */
3495 			reg = CSR_READ(sc, WMREG_RFCTL);
3496 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3497 			CSR_WRITE(sc, WMREG_RFCTL, reg);
3498 			break;
3499 		default:
3500 			break;
3501 		}
3502 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
3503 
3504 		/*
3505 		 * 8257[12] Errata No.52 and some others.
3506 		 * Avoid RSS Hash Value bug.
3507 		 */
3508 		switch (sc->sc_type) {
3509 		case WM_T_82571:
3510 		case WM_T_82572:
3511 		case WM_T_82573:
3512 		case WM_T_80003:
3513 		case WM_T_ICH8:
3514 			reg = CSR_READ(sc, WMREG_RFCTL);
3515 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3516 			CSR_WRITE(sc, WMREG_RFCTL, reg);
3517 			break;
3518 		default:
3519 			break;
3520 		}
3521 	}
3522 }
3523 
3524 static uint32_t
3525 wm_rxpbs_adjust_82580(uint32_t val)
3526 {
3527 	uint32_t rv = 0;
3528 
3529 	if (val < __arraycount(wm_82580_rxpbs_table))
3530 		rv = wm_82580_rxpbs_table[val];
3531 
3532 	return rv;
3533 }
3534 
3535 /*
3536  * wm_reset:
3537  *
3538  *	Reset the i82542 chip.
3539  */
3540 static void
3541 wm_reset(struct wm_softc *sc)
3542 {
3543 	int phy_reset = 0;
3544 	int i, error = 0;
3545 	uint32_t reg, mask;
3546 
3547 	/*
3548 	 * Allocate on-chip memory according to the MTU size.
3549 	 * The Packet Buffer Allocation register must be written
3550 	 * before the chip is reset.
3551 	 */
3552 	switch (sc->sc_type) {
3553 	case WM_T_82547:
3554 	case WM_T_82547_2:
3555 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3556 		    PBA_22K : PBA_30K;
3557 		for (i = 0; i < sc->sc_ntxqueues; i++) {
3558 			struct wm_txqueue *txq = &sc->sc_txq[i];
3559 			txq->txq_fifo_head = 0;
3560 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3561 			txq->txq_fifo_size =
3562 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3563 			txq->txq_fifo_stall = 0;
3564 		}
3565 		break;
3566 	case WM_T_82571:
3567 	case WM_T_82572:
3568 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
3569 	case WM_T_80003:
3570 		sc->sc_pba = PBA_32K;
3571 		break;
3572 	case WM_T_82573:
3573 		sc->sc_pba = PBA_12K;
3574 		break;
3575 	case WM_T_82574:
3576 	case WM_T_82583:
3577 		sc->sc_pba = PBA_20K;
3578 		break;
3579 	case WM_T_82576:
3580 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3581 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3582 		break;
3583 	case WM_T_82580:
3584 	case WM_T_I350:
3585 	case WM_T_I354:
3586 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3587 		break;
3588 	case WM_T_I210:
3589 	case WM_T_I211:
3590 		sc->sc_pba = PBA_34K;
3591 		break;
3592 	case WM_T_ICH8:
3593 		/* Workaround for a bit corruption issue in FIFO memory */
3594 		sc->sc_pba = PBA_8K;
3595 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3596 		break;
3597 	case WM_T_ICH9:
3598 	case WM_T_ICH10:
3599 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3600 		    PBA_14K : PBA_10K;
3601 		break;
3602 	case WM_T_PCH:
3603 	case WM_T_PCH2:
3604 	case WM_T_PCH_LPT:
3605 		sc->sc_pba = PBA_26K;
3606 		break;
3607 	default:
3608 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3609 		    PBA_40K : PBA_48K;
3610 		break;
3611 	}
3612 	/*
3613 	 * Only old or non-multiqueue devices have the PBA register
3614 	 * XXX Need special handling for 82575.
3615 	 */
3616 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3617 	    || (sc->sc_type == WM_T_82575))
3618 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3619 
3620 	/* Prevent the PCI-E bus from sticking */
3621 	if (sc->sc_flags & WM_F_PCIE) {
3622 		int timeout = 800;
3623 
3624 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
3625 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3626 
3627 		while (timeout--) {
3628 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3629 			    == 0)
3630 				break;
3631 			delay(100);
3632 		}
3633 	}
3634 
3635 	/* Set the completion timeout for interface */
3636 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3637 	    || (sc->sc_type == WM_T_82580)
3638 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3639 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3640 		wm_set_pcie_completion_timeout(sc);
3641 
3642 	/* Clear interrupt */
3643 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3644 	if (sc->sc_nintrs > 1) {
3645 		if (sc->sc_type != WM_T_82574) {
3646 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3647 			CSR_WRITE(sc, WMREG_EIAC, 0);
3648 		} else {
3649 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3650 		}
3651 	}
3652 
3653 	/* Stop the transmit and receive processes. */
3654 	CSR_WRITE(sc, WMREG_RCTL, 0);
3655 	sc->sc_rctl &= ~RCTL_EN;
3656 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3657 	CSR_WRITE_FLUSH(sc);
3658 
3659 	/* XXX set_tbi_sbp_82543() */
3660 
3661 	delay(10*1000);
3662 
3663 	/* Must acquire the MDIO ownership before MAC reset */
3664 	switch (sc->sc_type) {
3665 	case WM_T_82573:
3666 	case WM_T_82574:
3667 	case WM_T_82583:
3668 		error = wm_get_hw_semaphore_82573(sc);
3669 		break;
3670 	default:
3671 		break;
3672 	}
3673 
3674 	/*
3675 	 * 82541 Errata 29? & 82547 Errata 28?
3676 	 * See also the description about PHY_RST bit in CTRL register
3677 	 * in 8254x_GBe_SDM.pdf.
3678 	 */
3679 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3680 		CSR_WRITE(sc, WMREG_CTRL,
3681 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3682 		CSR_WRITE_FLUSH(sc);
3683 		delay(5000);
3684 	}
3685 
3686 	switch (sc->sc_type) {
3687 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3688 	case WM_T_82541:
3689 	case WM_T_82541_2:
3690 	case WM_T_82547:
3691 	case WM_T_82547_2:
3692 		/*
3693 		 * On some chipsets, a reset through a memory-mapped write
3694 		 * cycle can cause the chip to reset before completing the
3695 		 * write cycle.  This causes major headache that can be
3696 		 * avoided by issuing the reset via indirect register writes
3697 		 * through I/O space.
3698 		 *
3699 		 * So, if we successfully mapped the I/O BAR at attach time,
3700 		 * use that.  Otherwise, try our luck with a memory-mapped
3701 		 * reset.
3702 		 */
3703 		if (sc->sc_flags & WM_F_IOH_VALID)
3704 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3705 		else
3706 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3707 		break;
3708 	case WM_T_82545_3:
3709 	case WM_T_82546_3:
3710 		/* Use the shadow control register on these chips. */
3711 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3712 		break;
3713 	case WM_T_80003:
3714 		mask = swfwphysem[sc->sc_funcid];
3715 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3716 		wm_get_swfw_semaphore(sc, mask);
3717 		CSR_WRITE(sc, WMREG_CTRL, reg);
3718 		wm_put_swfw_semaphore(sc, mask);
3719 		break;
3720 	case WM_T_ICH8:
3721 	case WM_T_ICH9:
3722 	case WM_T_ICH10:
3723 	case WM_T_PCH:
3724 	case WM_T_PCH2:
3725 	case WM_T_PCH_LPT:
3726 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3727 		if (wm_check_reset_block(sc) == 0) {
3728 			/*
3729 			 * Gate automatic PHY configuration by hardware on
3730 			 * non-managed 82579
3731 			 */
3732 			if ((sc->sc_type == WM_T_PCH2)
3733 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3734 				!= 0))
3735 				wm_gate_hw_phy_config_ich8lan(sc, 1);
3736 
3737 
3738 			reg |= CTRL_PHY_RESET;
3739 			phy_reset = 1;
3740 		}
3741 		wm_get_swfwhw_semaphore(sc);
3742 		CSR_WRITE(sc, WMREG_CTRL, reg);
3743 		/* Don't insert a completion barrier when reset */
3744 		delay(20*1000);
3745 		wm_put_swfwhw_semaphore(sc);
3746 		break;
3747 	case WM_T_82580:
3748 	case WM_T_I350:
3749 	case WM_T_I354:
3750 	case WM_T_I210:
3751 	case WM_T_I211:
3752 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3753 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3754 			CSR_WRITE_FLUSH(sc);
3755 		delay(5000);
3756 		break;
3757 	case WM_T_82542_2_0:
3758 	case WM_T_82542_2_1:
3759 	case WM_T_82543:
3760 	case WM_T_82540:
3761 	case WM_T_82545:
3762 	case WM_T_82546:
3763 	case WM_T_82571:
3764 	case WM_T_82572:
3765 	case WM_T_82573:
3766 	case WM_T_82574:
3767 	case WM_T_82575:
3768 	case WM_T_82576:
3769 	case WM_T_82583:
3770 	default:
3771 		/* Everything else can safely use the documented method. */
3772 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3773 		break;
3774 	}
3775 
3776 	/* Must release the MDIO ownership after MAC reset */
3777 	switch (sc->sc_type) {
3778 	case WM_T_82573:
3779 	case WM_T_82574:
3780 	case WM_T_82583:
3781 		if (error == 0)
3782 			wm_put_hw_semaphore_82573(sc);
3783 		break;
3784 	default:
3785 		break;
3786 	}
3787 
3788 	if (phy_reset != 0)
3789 		wm_get_cfg_done(sc);
3790 
3791 	/* reload EEPROM */
3792 	switch (sc->sc_type) {
3793 	case WM_T_82542_2_0:
3794 	case WM_T_82542_2_1:
3795 	case WM_T_82543:
3796 	case WM_T_82544:
3797 		delay(10);
3798 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3799 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3800 		CSR_WRITE_FLUSH(sc);
3801 		delay(2000);
3802 		break;
3803 	case WM_T_82540:
3804 	case WM_T_82545:
3805 	case WM_T_82545_3:
3806 	case WM_T_82546:
3807 	case WM_T_82546_3:
3808 		delay(5*1000);
3809 		/* XXX Disable HW ARPs on ASF enabled adapters */
3810 		break;
3811 	case WM_T_82541:
3812 	case WM_T_82541_2:
3813 	case WM_T_82547:
3814 	case WM_T_82547_2:
3815 		delay(20000);
3816 		/* XXX Disable HW ARPs on ASF enabled adapters */
3817 		break;
3818 	case WM_T_82571:
3819 	case WM_T_82572:
3820 	case WM_T_82573:
3821 	case WM_T_82574:
3822 	case WM_T_82583:
3823 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3824 			delay(10);
3825 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3826 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3827 			CSR_WRITE_FLUSH(sc);
3828 		}
3829 		/* check EECD_EE_AUTORD */
3830 		wm_get_auto_rd_done(sc);
3831 		/*
3832 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
3833 		 * is set.
3834 		 */
3835 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3836 		    || (sc->sc_type == WM_T_82583))
3837 			delay(25*1000);
3838 		break;
3839 	case WM_T_82575:
3840 	case WM_T_82576:
3841 	case WM_T_82580:
3842 	case WM_T_I350:
3843 	case WM_T_I354:
3844 	case WM_T_I210:
3845 	case WM_T_I211:
3846 	case WM_T_80003:
3847 		/* check EECD_EE_AUTORD */
3848 		wm_get_auto_rd_done(sc);
3849 		break;
3850 	case WM_T_ICH8:
3851 	case WM_T_ICH9:
3852 	case WM_T_ICH10:
3853 	case WM_T_PCH:
3854 	case WM_T_PCH2:
3855 	case WM_T_PCH_LPT:
3856 		break;
3857 	default:
3858 		panic("%s: unknown type\n", __func__);
3859 	}
3860 
3861 	/* Check whether EEPROM is present or not */
3862 	switch (sc->sc_type) {
3863 	case WM_T_82575:
3864 	case WM_T_82576:
3865 	case WM_T_82580:
3866 	case WM_T_I350:
3867 	case WM_T_I354:
3868 	case WM_T_ICH8:
3869 	case WM_T_ICH9:
3870 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3871 			/* Not found */
3872 			sc->sc_flags |= WM_F_EEPROM_INVALID;
3873 			if (sc->sc_type == WM_T_82575)
3874 				wm_reset_init_script_82575(sc);
3875 		}
3876 		break;
3877 	default:
3878 		break;
3879 	}
3880 
3881 	if ((sc->sc_type == WM_T_82580)
3882 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3883 		/* clear global device reset status bit */
3884 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3885 	}
3886 
3887 	/* Clear any pending interrupt events. */
3888 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3889 	reg = CSR_READ(sc, WMREG_ICR);
3890 	if (sc->sc_nintrs > 1) {
3891 		if (sc->sc_type != WM_T_82574) {
3892 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3893 			CSR_WRITE(sc, WMREG_EIAC, 0);
3894 		} else
3895 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3896 	}
3897 
3898 	/* reload sc_ctrl */
3899 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3900 
3901 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
3902 		wm_set_eee_i350(sc);
3903 
3904 	/* dummy read from WUC */
3905 	if (sc->sc_type == WM_T_PCH)
3906 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3907 	/*
3908 	 * For PCH, this write will make sure that any noise will be detected
3909 	 * as a CRC error and be dropped rather than show up as a bad packet
3910 	 * to the DMA engine
3911 	 */
3912 	if (sc->sc_type == WM_T_PCH)
3913 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3914 
3915 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3916 		CSR_WRITE(sc, WMREG_WUC, 0);
3917 
3918 	wm_reset_mdicnfg_82580(sc);
3919 
3920 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
3921 		wm_pll_workaround_i210(sc);
3922 }
3923 
3924 /*
3925  * wm_add_rxbuf:
3926  *
3927  *	Add a receive buffer to the indiciated descriptor.
3928  */
3929 static int
3930 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
3931 {
3932 	struct wm_softc *sc = rxq->rxq_sc;
3933 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
3934 	struct mbuf *m;
3935 	int error;
3936 
3937 	KASSERT(WM_RX_LOCKED(rxq));
3938 
3939 	MGETHDR(m, M_DONTWAIT, MT_DATA);
3940 	if (m == NULL)
3941 		return ENOBUFS;
3942 
3943 	MCLGET(m, M_DONTWAIT);
3944 	if ((m->m_flags & M_EXT) == 0) {
3945 		m_freem(m);
3946 		return ENOBUFS;
3947 	}
3948 
3949 	if (rxs->rxs_mbuf != NULL)
3950 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3951 
3952 	rxs->rxs_mbuf = m;
3953 
3954 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3955 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3956 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
3957 	if (error) {
3958 		/* XXX XXX XXX */
3959 		aprint_error_dev(sc->sc_dev,
3960 		    "unable to load rx DMA map %d, error = %d\n",
3961 		    idx, error);
3962 		panic("wm_add_rxbuf");
3963 	}
3964 
3965 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3966 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3967 
3968 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3969 		if ((sc->sc_rctl & RCTL_EN) != 0)
3970 			wm_init_rxdesc(rxq, idx);
3971 	} else
3972 		wm_init_rxdesc(rxq, idx);
3973 
3974 	return 0;
3975 }
3976 
3977 /*
3978  * wm_rxdrain:
3979  *
3980  *	Drain the receive queue.
3981  */
3982 static void
3983 wm_rxdrain(struct wm_rxqueue *rxq)
3984 {
3985 	struct wm_softc *sc = rxq->rxq_sc;
3986 	struct wm_rxsoft *rxs;
3987 	int i;
3988 
3989 	KASSERT(WM_RX_LOCKED(rxq));
3990 
3991 	for (i = 0; i < WM_NRXDESC; i++) {
3992 		rxs = &rxq->rxq_soft[i];
3993 		if (rxs->rxs_mbuf != NULL) {
3994 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3995 			m_freem(rxs->rxs_mbuf);
3996 			rxs->rxs_mbuf = NULL;
3997 		}
3998 	}
3999 }
4000 
4001 
4002 /*
4003  * XXX copy from FreeBSD's sys/net/rss_config.c
4004  */
4005 /*
4006  * RSS secret key, intended to prevent attacks on load-balancing.  Its
4007  * effectiveness may be limited by algorithm choice and available entropy
4008  * during the boot.
4009  *
4010  * XXXRW: And that we don't randomize it yet!
4011  *
4012  * This is the default Microsoft RSS specification key which is also
4013  * the Chelsio T5 firmware default key.
4014  */
4015 #define RSS_KEYSIZE 40
4016 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
4017 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
4018 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
4019 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
4020 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
4021 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
4022 };
4023 
4024 /*
4025  * Caller must pass an array of size sizeof(rss_key).
4026  *
4027  * XXX
4028  * As if_ixgbe may use this function, this function should not be
4029  * if_wm specific function.
4030  */
4031 static void
4032 wm_rss_getkey(uint8_t *key)
4033 {
4034 
4035 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
4036 }
4037 
4038 /*
4039  * Setup registers for RSS.
4040  *
4041  * XXX not yet VMDq support
4042  */
4043 static void
4044 wm_init_rss(struct wm_softc *sc)
4045 {
4046 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
4047 	int i;
4048 
4049 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
4050 
4051 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
4052 		int qid, reta_ent;
4053 
4054 		qid  = i % sc->sc_nrxqueues;
4055 		switch(sc->sc_type) {
4056 		case WM_T_82574:
4057 			reta_ent = __SHIFTIN(qid,
4058 			    RETA_ENT_QINDEX_MASK_82574);
4059 			break;
4060 		case WM_T_82575:
4061 			reta_ent = __SHIFTIN(qid,
4062 			    RETA_ENT_QINDEX1_MASK_82575);
4063 			break;
4064 		default:
4065 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
4066 			break;
4067 		}
4068 
4069 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
4070 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
4071 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
4072 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
4073 	}
4074 
4075 	wm_rss_getkey((uint8_t *)rss_key);
4076 	for (i = 0; i < RSSRK_NUM_REGS; i++)
4077 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
4078 
4079 	if (sc->sc_type == WM_T_82574)
4080 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
4081 	else
4082 		mrqc = MRQC_ENABLE_RSS_MQ;
4083 
4084 	/* XXXX
4085 	 * The same as FreeBSD igb.
4086 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
4087 	 */
4088 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
4089 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
4090 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
4091 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
4092 
4093 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
4094 }
4095 
4096 /*
4097  * Adjust TX and RX queue numbers which the system actulally uses.
4098  *
4099  * The numbers are affected by below parameters.
4100  *     - The nubmer of hardware queues
4101  *     - The number of MSI-X vectors (= "nvectors" argument)
4102  *     - ncpu
4103  */
4104 static void
4105 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
4106 {
4107 	int hw_ntxqueues, hw_nrxqueues;
4108 
4109 	if (nvectors < 3) {
4110 		sc->sc_ntxqueues = 1;
4111 		sc->sc_nrxqueues = 1;
4112 		return;
4113 	}
4114 
4115 	switch(sc->sc_type) {
4116 	case WM_T_82572:
4117 		hw_ntxqueues = 2;
4118 		hw_nrxqueues = 2;
4119 		break;
4120 	case WM_T_82574:
4121 		hw_ntxqueues = 2;
4122 		hw_nrxqueues = 2;
4123 		break;
4124 	case WM_T_82575:
4125 		hw_ntxqueues = 4;
4126 		hw_nrxqueues = 4;
4127 		break;
4128 	case WM_T_82576:
4129 		hw_ntxqueues = 16;
4130 		hw_nrxqueues = 16;
4131 		break;
4132 	case WM_T_82580:
4133 	case WM_T_I350:
4134 	case WM_T_I354:
4135 		hw_ntxqueues = 8;
4136 		hw_nrxqueues = 8;
4137 		break;
4138 	case WM_T_I210:
4139 		hw_ntxqueues = 4;
4140 		hw_nrxqueues = 4;
4141 		break;
4142 	case WM_T_I211:
4143 		hw_ntxqueues = 2;
4144 		hw_nrxqueues = 2;
4145 		break;
4146 		/*
4147 		 * As below ethernet controllers does not support MSI-X,
4148 		 * this driver let them not use multiqueue.
4149 		 *     - WM_T_80003
4150 		 *     - WM_T_ICH8
4151 		 *     - WM_T_ICH9
4152 		 *     - WM_T_ICH10
4153 		 *     - WM_T_PCH
4154 		 *     - WM_T_PCH2
4155 		 *     - WM_T_PCH_LPT
4156 		 */
4157 	default:
4158 		hw_ntxqueues = 1;
4159 		hw_nrxqueues = 1;
4160 		break;
4161 	}
4162 
4163 	/*
4164 	 * As queues more then MSI-X vectors cannot improve scaling, we limit
4165 	 * the number of queues used actually.
4166 	 *
4167 	 * XXX
4168 	 * Currently, we separate TX queue interrupts and RX queue interrupts.
4169 	 * Howerver, the number of MSI-X vectors of recent controllers (such as
4170 	 * I354) expects that drivers bundle a TX queue interrupt and a RX
4171 	 * interrupt to one interrupt. e.g. FreeBSD's igb deals interrupts in
4172 	 * such a way.
4173 	 */
4174 	if (nvectors < hw_ntxqueues + hw_nrxqueues + 1) {
4175 		sc->sc_ntxqueues = (nvectors - 1) / 2;
4176 		sc->sc_nrxqueues = (nvectors - 1) / 2;
4177 	} else {
4178 		sc->sc_ntxqueues = hw_ntxqueues;
4179 		sc->sc_nrxqueues = hw_nrxqueues;
4180 	}
4181 
4182 	/*
4183 	 * As queues more then cpus cannot improve scaling, we limit
4184 	 * the number of queues used actually.
4185 	 */
4186 	if (ncpu < sc->sc_ntxqueues)
4187 		sc->sc_ntxqueues = ncpu;
4188 	if (ncpu < sc->sc_nrxqueues)
4189 		sc->sc_nrxqueues = ncpu;
4190 
4191 	/* XXX Currently, this driver supports RX multiqueue only. */
4192 	sc->sc_ntxqueues = 1;
4193 }
4194 
4195 /*
4196  * Both single interrupt MSI and INTx can use this function.
4197  */
4198 static int
4199 wm_setup_legacy(struct wm_softc *sc)
4200 {
4201 	pci_chipset_tag_t pc = sc->sc_pc;
4202 	const char *intrstr = NULL;
4203 	char intrbuf[PCI_INTRSTR_LEN];
4204 
4205 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
4206 	    sizeof(intrbuf));
4207 #ifdef WM_MPSAFE
4208 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
4209 #endif
4210 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
4211 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
4212 	if (sc->sc_ihs[0] == NULL) {
4213 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
4214 		    (pci_intr_type(sc->sc_intrs[0])
4215 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
4216 		return ENOMEM;
4217 	}
4218 
4219 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
4220 	sc->sc_nintrs = 1;
4221 	return 0;
4222 }
4223 
4224 static int
4225 wm_setup_msix(struct wm_softc *sc)
4226 {
4227 	void *vih;
4228 	kcpuset_t *affinity;
4229 	int qidx, error, intr_idx, tx_established, rx_established;
4230 	pci_chipset_tag_t pc = sc->sc_pc;
4231 	const char *intrstr = NULL;
4232 	char intrbuf[PCI_INTRSTR_LEN];
4233 	char intr_xname[INTRDEVNAMEBUF];
4234 
4235 	kcpuset_create(&affinity, false);
4236 	intr_idx = 0;
4237 
4238 	/*
4239 	 * TX
4240 	 */
4241 	tx_established = 0;
4242 	for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
4243 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
4244 
4245 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4246 		    sizeof(intrbuf));
4247 #ifdef WM_MPSAFE
4248 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4249 		    PCI_INTR_MPSAFE, true);
4250 #endif
4251 		memset(intr_xname, 0, sizeof(intr_xname));
4252 		snprintf(intr_xname, sizeof(intr_xname), "%sTX%d",
4253 		    device_xname(sc->sc_dev), qidx);
4254 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4255 		    IPL_NET, wm_txintr_msix, txq, intr_xname);
4256 		if (vih == NULL) {
4257 			aprint_error_dev(sc->sc_dev,
4258 			    "unable to establish MSI-X(for TX)%s%s\n",
4259 			    intrstr ? " at " : "",
4260 			    intrstr ? intrstr : "");
4261 
4262 			goto fail_0;
4263 		}
4264 		kcpuset_zero(affinity);
4265 		/* Round-robin affinity */
4266 		kcpuset_set(affinity, intr_idx % ncpu);
4267 		error = interrupt_distribute(vih, affinity, NULL);
4268 		if (error == 0) {
4269 			aprint_normal_dev(sc->sc_dev,
4270 			    "for TX interrupting at %s affinity to %u\n",
4271 			    intrstr, intr_idx % ncpu);
4272 		} else {
4273 			aprint_normal_dev(sc->sc_dev,
4274 			    "for TX interrupting at %s\n", intrstr);
4275 		}
4276 		sc->sc_ihs[intr_idx] = vih;
4277 		txq->txq_id = qidx;
4278 		txq->txq_intr_idx = intr_idx;
4279 
4280 		tx_established++;
4281 		intr_idx++;
4282 	}
4283 
4284 	/*
4285 	 * RX
4286 	 */
4287 	rx_established = 0;
4288 	for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
4289 		struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4290 
4291 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4292 		    sizeof(intrbuf));
4293 #ifdef WM_MPSAFE
4294 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4295 		    PCI_INTR_MPSAFE, true);
4296 #endif
4297 		memset(intr_xname, 0, sizeof(intr_xname));
4298 		snprintf(intr_xname, sizeof(intr_xname), "%sRX%d",
4299 		    device_xname(sc->sc_dev), qidx);
4300 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4301 		    IPL_NET, wm_rxintr_msix, rxq, intr_xname);
4302 		if (vih == NULL) {
4303 			aprint_error_dev(sc->sc_dev,
4304 			    "unable to establish MSI-X(for RX)%s%s\n",
4305 			    intrstr ? " at " : "",
4306 			    intrstr ? intrstr : "");
4307 
4308 			goto fail_1;
4309 		}
4310 		kcpuset_zero(affinity);
4311 		/* Round-robin affinity */
4312 		kcpuset_set(affinity, intr_idx % ncpu);
4313 		error = interrupt_distribute(vih, affinity, NULL);
4314 		if (error == 0) {
4315 			aprint_normal_dev(sc->sc_dev,
4316 			    "for RX interrupting at %s affinity to %u\n",
4317 			    intrstr, intr_idx % ncpu);
4318 		} else {
4319 			aprint_normal_dev(sc->sc_dev,
4320 			    "for RX interrupting at %s\n", intrstr);
4321 		}
4322 		sc->sc_ihs[intr_idx] = vih;
4323 		rxq->rxq_id = qidx;
4324 		rxq->rxq_intr_idx = intr_idx;
4325 
4326 		rx_established++;
4327 		intr_idx++;
4328 	}
4329 
4330 	/*
4331 	 * LINK
4332 	 */
4333 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4334 	    sizeof(intrbuf));
4335 #ifdef WM_MPSAFE
4336 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4337 	    PCI_INTR_MPSAFE, true);
4338 #endif
4339 	memset(intr_xname, 0, sizeof(intr_xname));
4340 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
4341 	    device_xname(sc->sc_dev));
4342 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4343 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
4344 	if (vih == NULL) {
4345 		aprint_error_dev(sc->sc_dev,
4346 		    "unable to establish MSI-X(for LINK)%s%s\n",
4347 		    intrstr ? " at " : "",
4348 		    intrstr ? intrstr : "");
4349 
4350 		goto fail_1;
4351 	}
4352 	/* keep default affinity to LINK interrupt */
4353 	aprint_normal_dev(sc->sc_dev,
4354 	    "for LINK interrupting at %s\n", intrstr);
4355 	sc->sc_ihs[intr_idx] = vih;
4356 	sc->sc_link_intr_idx = intr_idx;
4357 
4358 	sc->sc_nintrs = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
4359 	kcpuset_destroy(affinity);
4360 	return 0;
4361 
4362  fail_1:
4363 	for (qidx = 0; qidx < rx_established; qidx++) {
4364 		struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4365 		pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[rxq->rxq_intr_idx]);
4366 		sc->sc_ihs[rxq->rxq_intr_idx] = NULL;
4367 	}
4368  fail_0:
4369 	for (qidx = 0; qidx < tx_established; qidx++) {
4370 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
4371 		pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[txq->txq_intr_idx]);
4372 		sc->sc_ihs[txq->txq_intr_idx] = NULL;
4373 	}
4374 
4375 	kcpuset_destroy(affinity);
4376 	return ENOMEM;
4377 }
4378 
4379 /*
4380  * wm_init:		[ifnet interface function]
4381  *
4382  *	Initialize the interface.
4383  */
4384 static int
4385 wm_init(struct ifnet *ifp)
4386 {
4387 	struct wm_softc *sc = ifp->if_softc;
4388 	int ret;
4389 
4390 	WM_CORE_LOCK(sc);
4391 	ret = wm_init_locked(ifp);
4392 	WM_CORE_UNLOCK(sc);
4393 
4394 	return ret;
4395 }
4396 
4397 static int
4398 wm_init_locked(struct ifnet *ifp)
4399 {
4400 	struct wm_softc *sc = ifp->if_softc;
4401 	int i, j, trynum, error = 0;
4402 	uint32_t reg;
4403 
4404 	KASSERT(WM_CORE_LOCKED(sc));
4405 	/*
4406 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4407 	 * There is a small but measurable benefit to avoiding the adjusment
4408 	 * of the descriptor so that the headers are aligned, for normal mtu,
4409 	 * on such platforms.  One possibility is that the DMA itself is
4410 	 * slightly more efficient if the front of the entire packet (instead
4411 	 * of the front of the headers) is aligned.
4412 	 *
4413 	 * Note we must always set align_tweak to 0 if we are using
4414 	 * jumbo frames.
4415 	 */
4416 #ifdef __NO_STRICT_ALIGNMENT
4417 	sc->sc_align_tweak = 0;
4418 #else
4419 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4420 		sc->sc_align_tweak = 0;
4421 	else
4422 		sc->sc_align_tweak = 2;
4423 #endif /* __NO_STRICT_ALIGNMENT */
4424 
4425 	/* Cancel any pending I/O. */
4426 	wm_stop_locked(ifp, 0);
4427 
4428 	/* update statistics before reset */
4429 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4430 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4431 
4432 	/* Reset the chip to a known state. */
4433 	wm_reset(sc);
4434 
4435 	switch (sc->sc_type) {
4436 	case WM_T_82571:
4437 	case WM_T_82572:
4438 	case WM_T_82573:
4439 	case WM_T_82574:
4440 	case WM_T_82583:
4441 	case WM_T_80003:
4442 	case WM_T_ICH8:
4443 	case WM_T_ICH9:
4444 	case WM_T_ICH10:
4445 	case WM_T_PCH:
4446 	case WM_T_PCH2:
4447 	case WM_T_PCH_LPT:
4448 		if (wm_check_mng_mode(sc) != 0)
4449 			wm_get_hw_control(sc);
4450 		break;
4451 	default:
4452 		break;
4453 	}
4454 
4455 	/* Init hardware bits */
4456 	wm_initialize_hardware_bits(sc);
4457 
4458 	/* Reset the PHY. */
4459 	if (sc->sc_flags & WM_F_HAS_MII)
4460 		wm_gmii_reset(sc);
4461 
4462 	/* Calculate (E)ITR value */
4463 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4464 		sc->sc_itr = 450;	/* For EITR */
4465 	} else if (sc->sc_type >= WM_T_82543) {
4466 		/*
4467 		 * Set up the interrupt throttling register (units of 256ns)
4468 		 * Note that a footnote in Intel's documentation says this
4469 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4470 		 * or 10Mbit mode.  Empirically, it appears to be the case
4471 		 * that that is also true for the 1024ns units of the other
4472 		 * interrupt-related timer registers -- so, really, we ought
4473 		 * to divide this value by 4 when the link speed is low.
4474 		 *
4475 		 * XXX implement this division at link speed change!
4476 		 */
4477 
4478 		/*
4479 		 * For N interrupts/sec, set this value to:
4480 		 * 1000000000 / (N * 256).  Note that we set the
4481 		 * absolute and packet timer values to this value
4482 		 * divided by 4 to get "simple timer" behavior.
4483 		 */
4484 
4485 		sc->sc_itr = 1500;		/* 2604 ints/sec */
4486 	}
4487 
4488 	error = wm_init_txrx_queues(sc);
4489 	if (error)
4490 		goto out;
4491 
4492 	/*
4493 	 * Clear out the VLAN table -- we don't use it (yet).
4494 	 */
4495 	CSR_WRITE(sc, WMREG_VET, 0);
4496 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4497 		trynum = 10; /* Due to hw errata */
4498 	else
4499 		trynum = 1;
4500 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
4501 		for (j = 0; j < trynum; j++)
4502 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4503 
4504 	/*
4505 	 * Set up flow-control parameters.
4506 	 *
4507 	 * XXX Values could probably stand some tuning.
4508 	 */
4509 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4510 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4511 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4512 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4513 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4514 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4515 	}
4516 
4517 	sc->sc_fcrtl = FCRTL_DFLT;
4518 	if (sc->sc_type < WM_T_82543) {
4519 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4520 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4521 	} else {
4522 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4523 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4524 	}
4525 
4526 	if (sc->sc_type == WM_T_80003)
4527 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4528 	else
4529 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4530 
4531 	/* Writes the control register. */
4532 	wm_set_vlan(sc);
4533 
4534 	if (sc->sc_flags & WM_F_HAS_MII) {
4535 		int val;
4536 
4537 		switch (sc->sc_type) {
4538 		case WM_T_80003:
4539 		case WM_T_ICH8:
4540 		case WM_T_ICH9:
4541 		case WM_T_ICH10:
4542 		case WM_T_PCH:
4543 		case WM_T_PCH2:
4544 		case WM_T_PCH_LPT:
4545 			/*
4546 			 * Set the mac to wait the maximum time between each
4547 			 * iteration and increase the max iterations when
4548 			 * polling the phy; this fixes erroneous timeouts at
4549 			 * 10Mbps.
4550 			 */
4551 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4552 			    0xFFFF);
4553 			val = wm_kmrn_readreg(sc,
4554 			    KUMCTRLSTA_OFFSET_INB_PARAM);
4555 			val |= 0x3F;
4556 			wm_kmrn_writereg(sc,
4557 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
4558 			break;
4559 		default:
4560 			break;
4561 		}
4562 
4563 		if (sc->sc_type == WM_T_80003) {
4564 			val = CSR_READ(sc, WMREG_CTRL_EXT);
4565 			val &= ~CTRL_EXT_LINK_MODE_MASK;
4566 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4567 
4568 			/* Bypass RX and TX FIFO's */
4569 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4570 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4571 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4572 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4573 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4574 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4575 		}
4576 	}
4577 #if 0
4578 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4579 #endif
4580 
4581 	/* Set up checksum offload parameters. */
4582 	reg = CSR_READ(sc, WMREG_RXCSUM);
4583 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4584 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4585 		reg |= RXCSUM_IPOFL;
4586 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4587 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4588 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4589 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4590 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
4591 
4592 	/* Set up MSI-X */
4593 	if (sc->sc_nintrs > 1) {
4594 		uint32_t ivar;
4595 
4596 		if (sc->sc_type == WM_T_82575) {
4597 			/* Interrupt control */
4598 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4599 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
4600 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4601 
4602 			/* TX */
4603 			for (i = 0; i < sc->sc_ntxqueues; i++) {
4604 				struct wm_txqueue *txq = &sc->sc_txq[i];
4605 				CSR_WRITE(sc, WMREG_MSIXBM(txq->txq_intr_idx),
4606 				    EITR_TX_QUEUE(txq->txq_id));
4607 			}
4608 			/* RX */
4609 			for (i = 0; i < sc->sc_nrxqueues; i++) {
4610 				struct wm_rxqueue *rxq = &sc->sc_rxq[i];
4611 				CSR_WRITE(sc, WMREG_MSIXBM(rxq->rxq_intr_idx),
4612 				    EITR_RX_QUEUE(rxq->rxq_id));
4613 			}
4614 			/* Link status */
4615 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
4616 			    EITR_OTHER);
4617 		} else if (sc->sc_type == WM_T_82574) {
4618 			/* Interrupt control */
4619 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4620 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
4621 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4622 
4623 			ivar = 0;
4624 			/* TX */
4625 			for (i = 0; i < sc->sc_ntxqueues; i++) {
4626 				struct wm_txqueue *txq = &sc->sc_txq[i];
4627 				ivar |= __SHIFTIN((IVAR_VALID_82574|txq->txq_intr_idx),
4628 				    IVAR_TX_MASK_Q_82574(txq->txq_id));
4629 			}
4630 			/* RX */
4631 			for (i = 0; i < sc->sc_nrxqueues; i++) {
4632 				struct wm_rxqueue *rxq = &sc->sc_rxq[i];
4633 				ivar |= __SHIFTIN((IVAR_VALID_82574|rxq->rxq_intr_idx),
4634 				    IVAR_RX_MASK_Q_82574(rxq->rxq_id));
4635 			}
4636 			/* Link status */
4637 			ivar |= __SHIFTIN((IVAR_VALID_82574|sc->sc_link_intr_idx),
4638 			    IVAR_OTHER_MASK);
4639 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
4640 		} else {
4641 			/* Interrupt control */
4642 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR
4643 			    | GPIE_MULTI_MSIX | GPIE_EIAME
4644 			    | GPIE_PBA);
4645 
4646 			switch (sc->sc_type) {
4647 			case WM_T_82580:
4648 			case WM_T_I350:
4649 			case WM_T_I354:
4650 			case WM_T_I210:
4651 			case WM_T_I211:
4652 				/* TX */
4653 				for (i = 0; i < sc->sc_ntxqueues; i++) {
4654 					struct wm_txqueue *txq = &sc->sc_txq[i];
4655 					int qid = txq->txq_id;
4656 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
4657 					ivar &= ~IVAR_TX_MASK_Q(qid);
4658 					ivar |= __SHIFTIN(
4659 						(txq->txq_intr_idx | IVAR_VALID),
4660 						IVAR_TX_MASK_Q(qid));
4661 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
4662 				}
4663 
4664 				/* RX */
4665 				for (i = 0; i < sc->sc_nrxqueues; i++) {
4666 					struct wm_rxqueue *rxq = &sc->sc_rxq[i];
4667 					int qid = rxq->rxq_id;
4668 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
4669 					ivar &= ~IVAR_RX_MASK_Q(qid);
4670 					ivar |= __SHIFTIN(
4671 						(rxq->rxq_intr_idx | IVAR_VALID),
4672 						IVAR_RX_MASK_Q(qid));
4673 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
4674 				}
4675 				break;
4676 			case WM_T_82576:
4677 				/* TX */
4678 				for (i = 0; i < sc->sc_ntxqueues; i++) {
4679 					struct wm_txqueue *txq = &sc->sc_txq[i];
4680 					int qid = txq->txq_id;
4681 					ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(qid));
4682 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
4683 					ivar |= __SHIFTIN(
4684 						(txq->txq_intr_idx | IVAR_VALID),
4685 						IVAR_TX_MASK_Q_82576(qid));
4686 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid), ivar);
4687 				}
4688 
4689 				/* RX */
4690 				for (i = 0; i < sc->sc_nrxqueues; i++) {
4691 					struct wm_rxqueue *rxq = &sc->sc_rxq[i];
4692 					int qid = rxq->rxq_id;
4693 					ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(qid));
4694 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
4695 					ivar |= __SHIFTIN(
4696 						(rxq->rxq_intr_idx | IVAR_VALID),
4697 						IVAR_RX_MASK_Q_82576(qid));
4698 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid), ivar);
4699 				}
4700 				break;
4701 			default:
4702 				break;
4703 			}
4704 
4705 			/* Link status */
4706 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
4707 			    IVAR_MISC_OTHER);
4708 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
4709 		}
4710 
4711 		if (sc->sc_nrxqueues > 1) {
4712 			wm_init_rss(sc);
4713 
4714 			/*
4715 			** NOTE: Receive Full-Packet Checksum Offload
4716 			** is mutually exclusive with Multiqueue. However
4717 			** this is not the same as TCP/IP checksums which
4718 			** still work.
4719 			*/
4720 			reg = CSR_READ(sc, WMREG_RXCSUM);
4721 			reg |= RXCSUM_PCSD;
4722 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
4723 		}
4724 	}
4725 
4726 	/* Set up the interrupt registers. */
4727 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4728 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4729 	    ICR_RXO | ICR_RXT0;
4730 	if (sc->sc_nintrs > 1) {
4731 		uint32_t mask;
4732 		switch (sc->sc_type) {
4733 		case WM_T_82574:
4734 			CSR_WRITE(sc, WMREG_EIAC_82574,
4735 			    WMREG_EIAC_82574_MSIX_MASK);
4736 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
4737 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4738 			break;
4739 		default:
4740 			if (sc->sc_type == WM_T_82575) {
4741 				mask = 0;
4742 				for (i = 0; i < sc->sc_ntxqueues; i++) {
4743 					struct wm_txqueue *txq = &sc->sc_txq[i];
4744 					mask |= EITR_TX_QUEUE(txq->txq_id);
4745 				}
4746 				for (i = 0; i < sc->sc_nrxqueues; i++) {
4747 					struct wm_rxqueue *rxq = &sc->sc_rxq[i];
4748 					mask |= EITR_RX_QUEUE(rxq->rxq_id);
4749 				}
4750 				mask |= EITR_OTHER;
4751 			} else {
4752 				mask = 0;
4753 				for (i = 0; i < sc->sc_ntxqueues; i++) {
4754 					struct wm_txqueue *txq = &sc->sc_txq[i];
4755 					mask |= 1 << txq->txq_intr_idx;
4756 				}
4757 				for (i = 0; i < sc->sc_nrxqueues; i++) {
4758 					struct wm_rxqueue *rxq = &sc->sc_rxq[i];
4759 					mask |= 1 << rxq->rxq_intr_idx;
4760 				}
4761 				mask |= 1 << sc->sc_link_intr_idx;
4762 			}
4763 			CSR_WRITE(sc, WMREG_EIAC, mask);
4764 			CSR_WRITE(sc, WMREG_EIAM, mask);
4765 			CSR_WRITE(sc, WMREG_EIMS, mask);
4766 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
4767 			break;
4768 		}
4769 	} else
4770 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4771 
4772 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4773 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4774 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4775 		reg = CSR_READ(sc, WMREG_KABGTXD);
4776 		reg |= KABGTXD_BGSQLBIAS;
4777 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
4778 	}
4779 
4780 	/* Set up the inter-packet gap. */
4781 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4782 
4783 	if (sc->sc_type >= WM_T_82543) {
4784 		/*
4785 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
4786 		 * the multi queue function with MSI-X.
4787 		 */
4788 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4789 			int qidx;
4790 			for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
4791 				struct wm_txqueue *txq = &sc->sc_txq[qidx];
4792 				CSR_WRITE(sc, WMREG_EITR(txq->txq_intr_idx),
4793 				    sc->sc_itr);
4794 			}
4795 			for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
4796 				struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4797 				CSR_WRITE(sc, WMREG_EITR(rxq->rxq_intr_idx),
4798 				    sc->sc_itr);
4799 			}
4800 			/*
4801 			 * Link interrupts occur much less than TX
4802 			 * interrupts and RX interrupts. So, we don't
4803 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
4804 			 * FreeBSD's if_igb.
4805 			 */
4806 		} else
4807 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4808 	}
4809 
4810 	/* Set the VLAN ethernetype. */
4811 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4812 
4813 	/*
4814 	 * Set up the transmit control register; we start out with
4815 	 * a collision distance suitable for FDX, but update it whe
4816 	 * we resolve the media type.
4817 	 */
4818 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4819 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
4820 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4821 	if (sc->sc_type >= WM_T_82571)
4822 		sc->sc_tctl |= TCTL_MULR;
4823 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4824 
4825 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4826 		/* Write TDT after TCTL.EN is set. See the document. */
4827 		CSR_WRITE(sc, WMREG_TDT(0), 0);
4828 	}
4829 
4830 	if (sc->sc_type == WM_T_80003) {
4831 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
4832 		reg &= ~TCTL_EXT_GCEX_MASK;
4833 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4834 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4835 	}
4836 
4837 	/* Set the media. */
4838 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4839 		goto out;
4840 
4841 	/* Configure for OS presence */
4842 	wm_init_manageability(sc);
4843 
4844 	/*
4845 	 * Set up the receive control register; we actually program
4846 	 * the register when we set the receive filter.  Use multicast
4847 	 * address offset type 0.
4848 	 *
4849 	 * Only the i82544 has the ability to strip the incoming
4850 	 * CRC, so we don't enable that feature.
4851 	 */
4852 	sc->sc_mchash_type = 0;
4853 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4854 	    | RCTL_MO(sc->sc_mchash_type);
4855 
4856 	/*
4857 	 * The I350 has a bug where it always strips the CRC whether
4858 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
4859 	 */
4860 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4861 	    || (sc->sc_type == WM_T_I210))
4862 		sc->sc_rctl |= RCTL_SECRC;
4863 
4864 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4865 	    && (ifp->if_mtu > ETHERMTU)) {
4866 		sc->sc_rctl |= RCTL_LPE;
4867 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4868 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4869 	}
4870 
4871 	if (MCLBYTES == 2048) {
4872 		sc->sc_rctl |= RCTL_2k;
4873 	} else {
4874 		if (sc->sc_type >= WM_T_82543) {
4875 			switch (MCLBYTES) {
4876 			case 4096:
4877 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4878 				break;
4879 			case 8192:
4880 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4881 				break;
4882 			case 16384:
4883 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4884 				break;
4885 			default:
4886 				panic("wm_init: MCLBYTES %d unsupported",
4887 				    MCLBYTES);
4888 				break;
4889 			}
4890 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
4891 	}
4892 
4893 	/* Set the receive filter. */
4894 	wm_set_filter(sc);
4895 
4896 	/* Enable ECC */
4897 	switch (sc->sc_type) {
4898 	case WM_T_82571:
4899 		reg = CSR_READ(sc, WMREG_PBA_ECC);
4900 		reg |= PBA_ECC_CORR_EN;
4901 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4902 		break;
4903 	case WM_T_PCH_LPT:
4904 		reg = CSR_READ(sc, WMREG_PBECCSTS);
4905 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4906 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4907 
4908 		reg = CSR_READ(sc, WMREG_CTRL);
4909 		reg |= CTRL_MEHE;
4910 		CSR_WRITE(sc, WMREG_CTRL, reg);
4911 		break;
4912 	default:
4913 		break;
4914 	}
4915 
4916 	/* On 575 and later set RDT only if RX enabled */
4917 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4918 		int qidx;
4919 		for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
4920 			struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4921 			for (i = 0; i < WM_NRXDESC; i++) {
4922 				WM_RX_LOCK(rxq);
4923 				wm_init_rxdesc(rxq, i);
4924 				WM_RX_UNLOCK(rxq);
4925 
4926 			}
4927 		}
4928 	}
4929 
4930 	sc->sc_stopping = false;
4931 
4932 	/* Start the one second link check clock. */
4933 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4934 
4935 	/* ...all done! */
4936 	ifp->if_flags |= IFF_RUNNING;
4937 	ifp->if_flags &= ~IFF_OACTIVE;
4938 
4939  out:
4940 	sc->sc_if_flags = ifp->if_flags;
4941 	if (error)
4942 		log(LOG_ERR, "%s: interface not running\n",
4943 		    device_xname(sc->sc_dev));
4944 	return error;
4945 }
4946 
4947 /*
4948  * wm_stop:		[ifnet interface function]
4949  *
4950  *	Stop transmission on the interface.
4951  */
4952 static void
4953 wm_stop(struct ifnet *ifp, int disable)
4954 {
4955 	struct wm_softc *sc = ifp->if_softc;
4956 
4957 	WM_CORE_LOCK(sc);
4958 	wm_stop_locked(ifp, disable);
4959 	WM_CORE_UNLOCK(sc);
4960 }
4961 
4962 static void
4963 wm_stop_locked(struct ifnet *ifp, int disable)
4964 {
4965 	struct wm_softc *sc = ifp->if_softc;
4966 	struct wm_txsoft *txs;
4967 	int i, qidx;
4968 
4969 	KASSERT(WM_CORE_LOCKED(sc));
4970 
4971 	sc->sc_stopping = true;
4972 
4973 	/* Stop the one second clock. */
4974 	callout_stop(&sc->sc_tick_ch);
4975 
4976 	/* Stop the 82547 Tx FIFO stall check timer. */
4977 	if (sc->sc_type == WM_T_82547)
4978 		callout_stop(&sc->sc_txfifo_ch);
4979 
4980 	if (sc->sc_flags & WM_F_HAS_MII) {
4981 		/* Down the MII. */
4982 		mii_down(&sc->sc_mii);
4983 	} else {
4984 #if 0
4985 		/* Should we clear PHY's status properly? */
4986 		wm_reset(sc);
4987 #endif
4988 	}
4989 
4990 	/* Stop the transmit and receive processes. */
4991 	CSR_WRITE(sc, WMREG_TCTL, 0);
4992 	CSR_WRITE(sc, WMREG_RCTL, 0);
4993 	sc->sc_rctl &= ~RCTL_EN;
4994 
4995 	/*
4996 	 * Clear the interrupt mask to ensure the device cannot assert its
4997 	 * interrupt line.
4998 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
4999 	 * service any currently pending or shared interrupt.
5000 	 */
5001 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5002 	sc->sc_icr = 0;
5003 	if (sc->sc_nintrs > 1) {
5004 		if (sc->sc_type != WM_T_82574) {
5005 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5006 			CSR_WRITE(sc, WMREG_EIAC, 0);
5007 		} else
5008 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5009 	}
5010 
5011 	/* Release any queued transmit buffers. */
5012 	for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
5013 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
5014 		WM_TX_LOCK(txq);
5015 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5016 			txs = &txq->txq_soft[i];
5017 			if (txs->txs_mbuf != NULL) {
5018 				bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
5019 				m_freem(txs->txs_mbuf);
5020 				txs->txs_mbuf = NULL;
5021 			}
5022 		}
5023 		WM_TX_UNLOCK(txq);
5024 	}
5025 
5026 	/* Mark the interface as down and cancel the watchdog timer. */
5027 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5028 	ifp->if_timer = 0;
5029 
5030 	if (disable) {
5031 		for (i = 0; i < sc->sc_nrxqueues; i++) {
5032 			struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5033 			WM_RX_LOCK(rxq);
5034 			wm_rxdrain(rxq);
5035 			WM_RX_UNLOCK(rxq);
5036 		}
5037 	}
5038 
5039 #if 0 /* notyet */
5040 	if (sc->sc_type >= WM_T_82544)
5041 		CSR_WRITE(sc, WMREG_WUC, 0);
5042 #endif
5043 }
5044 
5045 static void
5046 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5047 {
5048 	struct mbuf *m;
5049 	int i;
5050 
5051 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5052 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5053 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5054 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5055 		    m->m_data, m->m_len, m->m_flags);
5056 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5057 	    i, i == 1 ? "" : "s");
5058 }
5059 
5060 /*
5061  * wm_82547_txfifo_stall:
5062  *
5063  *	Callout used to wait for the 82547 Tx FIFO to drain,
5064  *	reset the FIFO pointers, and restart packet transmission.
5065  */
5066 static void
5067 wm_82547_txfifo_stall(void *arg)
5068 {
5069 	struct wm_softc *sc = arg;
5070 	struct wm_txqueue *txq = sc->sc_txq;
5071 #ifndef WM_MPSAFE
5072 	int s;
5073 
5074 	s = splnet();
5075 #endif
5076 	WM_TX_LOCK(txq);
5077 
5078 	if (sc->sc_stopping)
5079 		goto out;
5080 
5081 	if (txq->txq_fifo_stall) {
5082 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
5083 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
5084 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
5085 			/*
5086 			 * Packets have drained.  Stop transmitter, reset
5087 			 * FIFO pointers, restart transmitter, and kick
5088 			 * the packet queue.
5089 			 */
5090 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
5091 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
5092 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
5093 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
5094 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
5095 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
5096 			CSR_WRITE(sc, WMREG_TCTL, tctl);
5097 			CSR_WRITE_FLUSH(sc);
5098 
5099 			txq->txq_fifo_head = 0;
5100 			txq->txq_fifo_stall = 0;
5101 			wm_start_locked(&sc->sc_ethercom.ec_if);
5102 		} else {
5103 			/*
5104 			 * Still waiting for packets to drain; try again in
5105 			 * another tick.
5106 			 */
5107 			callout_schedule(&sc->sc_txfifo_ch, 1);
5108 		}
5109 	}
5110 
5111 out:
5112 	WM_TX_UNLOCK(txq);
5113 #ifndef WM_MPSAFE
5114 	splx(s);
5115 #endif
5116 }
5117 
5118 /*
5119  * wm_82547_txfifo_bugchk:
5120  *
5121  *	Check for bug condition in the 82547 Tx FIFO.  We need to
5122  *	prevent enqueueing a packet that would wrap around the end
5123  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
5124  *
5125  *	We do this by checking the amount of space before the end
5126  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
5127  *	the Tx FIFO, wait for all remaining packets to drain, reset
5128  *	the internal FIFO pointers to the beginning, and restart
5129  *	transmission on the interface.
5130  */
5131 #define	WM_FIFO_HDR		0x10
5132 #define	WM_82547_PAD_LEN	0x3e0
5133 static int
5134 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
5135 {
5136 	struct wm_txqueue *txq = &sc->sc_txq[0];
5137 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
5138 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
5139 
5140 	/* Just return if already stalled. */
5141 	if (txq->txq_fifo_stall)
5142 		return 1;
5143 
5144 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
5145 		/* Stall only occurs in half-duplex mode. */
5146 		goto send_packet;
5147 	}
5148 
5149 	if (len >= WM_82547_PAD_LEN + space) {
5150 		txq->txq_fifo_stall = 1;
5151 		callout_schedule(&sc->sc_txfifo_ch, 1);
5152 		return 1;
5153 	}
5154 
5155  send_packet:
5156 	txq->txq_fifo_head += len;
5157 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
5158 		txq->txq_fifo_head -= txq->txq_fifo_size;
5159 
5160 	return 0;
5161 }
5162 
5163 static int
5164 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5165 {
5166 	int error;
5167 
5168 	/*
5169 	 * Allocate the control data structures, and create and load the
5170 	 * DMA map for it.
5171 	 *
5172 	 * NOTE: All Tx descriptors must be in the same 4G segment of
5173 	 * memory.  So must Rx descriptors.  We simplify by allocating
5174 	 * both sets within the same 4G segment.
5175 	 */
5176 	if (sc->sc_type < WM_T_82544) {
5177 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
5178 		txq->txq_desc_size = sizeof(wiseman_txdesc_t) * WM_NTXDESC(txq);
5179 	} else {
5180 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
5181 		txq->txq_desc_size = sizeof(txdescs_t);
5182 	}
5183 
5184 	if ((error = bus_dmamem_alloc(sc->sc_dmat, txq->txq_desc_size, PAGE_SIZE,
5185 		    (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg, 1,
5186 		    &txq->txq_desc_rseg, 0)) != 0) {
5187 		aprint_error_dev(sc->sc_dev,
5188 		    "unable to allocate TX control data, error = %d\n",
5189 		    error);
5190 		goto fail_0;
5191 	}
5192 
5193 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
5194 		    txq->txq_desc_rseg, txq->txq_desc_size,
5195 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
5196 		aprint_error_dev(sc->sc_dev,
5197 		    "unable to map TX control data, error = %d\n", error);
5198 		goto fail_1;
5199 	}
5200 
5201 	if ((error = bus_dmamap_create(sc->sc_dmat, txq->txq_desc_size, 1,
5202 		    txq->txq_desc_size, 0, 0, &txq->txq_desc_dmamap)) != 0) {
5203 		aprint_error_dev(sc->sc_dev,
5204 		    "unable to create TX control data DMA map, error = %d\n",
5205 		    error);
5206 		goto fail_2;
5207 	}
5208 
5209 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
5210 		    txq->txq_descs_u, txq->txq_desc_size, NULL, 0)) != 0) {
5211 		aprint_error_dev(sc->sc_dev,
5212 		    "unable to load TX control data DMA map, error = %d\n",
5213 		    error);
5214 		goto fail_3;
5215 	}
5216 
5217 	return 0;
5218 
5219  fail_3:
5220 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5221  fail_2:
5222 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5223 	    txq->txq_desc_size);
5224  fail_1:
5225 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5226  fail_0:
5227 	return error;
5228 }
5229 
5230 static void
5231 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5232 {
5233 
5234 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
5235 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5236 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5237 	    txq->txq_desc_size);
5238 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5239 }
5240 
5241 static int
5242 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5243 {
5244 	int error;
5245 
5246 	/*
5247 	 * Allocate the control data structures, and create and load the
5248 	 * DMA map for it.
5249 	 *
5250 	 * NOTE: All Tx descriptors must be in the same 4G segment of
5251 	 * memory.  So must Rx descriptors.  We simplify by allocating
5252 	 * both sets within the same 4G segment.
5253 	 */
5254 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
5255 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size, PAGE_SIZE,
5256 		    (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg, 1,
5257 		    &rxq->rxq_desc_rseg, 0)) != 0) {
5258 		aprint_error_dev(sc->sc_dev,
5259 		    "unable to allocate RX control data, error = %d\n",
5260 		    error);
5261 		goto fail_0;
5262 	}
5263 
5264 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
5265 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
5266 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
5267 		aprint_error_dev(sc->sc_dev,
5268 		    "unable to map RX control data, error = %d\n", error);
5269 		goto fail_1;
5270 	}
5271 
5272 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
5273 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
5274 		aprint_error_dev(sc->sc_dev,
5275 		    "unable to create RX control data DMA map, error = %d\n",
5276 		    error);
5277 		goto fail_2;
5278 	}
5279 
5280 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
5281 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
5282 		aprint_error_dev(sc->sc_dev,
5283 		    "unable to load RX control data DMA map, error = %d\n",
5284 		    error);
5285 		goto fail_3;
5286 	}
5287 
5288 	return 0;
5289 
5290  fail_3:
5291 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5292  fail_2:
5293 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5294 	    rxq->rxq_desc_size);
5295  fail_1:
5296 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5297  fail_0:
5298 	return error;
5299 }
5300 
5301 static void
5302 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5303 {
5304 
5305 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
5306 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5307 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5308 	    rxq->rxq_desc_size);
5309 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5310 }
5311 
5312 
5313 static int
5314 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5315 {
5316 	int i, error;
5317 
5318 	/* Create the transmit buffer DMA maps. */
5319 	WM_TXQUEUELEN(txq) =
5320 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
5321 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
5322 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5323 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
5324 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
5325 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
5326 			aprint_error_dev(sc->sc_dev,
5327 			    "unable to create Tx DMA map %d, error = %d\n",
5328 			    i, error);
5329 			goto fail;
5330 		}
5331 	}
5332 
5333 	return 0;
5334 
5335  fail:
5336 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5337 		if (txq->txq_soft[i].txs_dmamap != NULL)
5338 			bus_dmamap_destroy(sc->sc_dmat,
5339 			    txq->txq_soft[i].txs_dmamap);
5340 	}
5341 	return error;
5342 }
5343 
5344 static void
5345 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5346 {
5347 	int i;
5348 
5349 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5350 		if (txq->txq_soft[i].txs_dmamap != NULL)
5351 			bus_dmamap_destroy(sc->sc_dmat,
5352 			    txq->txq_soft[i].txs_dmamap);
5353 	}
5354 }
5355 
5356 static int
5357 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5358 {
5359 	int i, error;
5360 
5361 	/* Create the receive buffer DMA maps. */
5362 	for (i = 0; i < WM_NRXDESC; i++) {
5363 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
5364 			    MCLBYTES, 0, 0,
5365 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
5366 			aprint_error_dev(sc->sc_dev,
5367 			    "unable to create Rx DMA map %d error = %d\n",
5368 			    i, error);
5369 			goto fail;
5370 		}
5371 		rxq->rxq_soft[i].rxs_mbuf = NULL;
5372 	}
5373 
5374 	return 0;
5375 
5376  fail:
5377 	for (i = 0; i < WM_NRXDESC; i++) {
5378 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5379 			bus_dmamap_destroy(sc->sc_dmat,
5380 			    rxq->rxq_soft[i].rxs_dmamap);
5381 	}
5382 	return error;
5383 }
5384 
5385 static void
5386 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5387 {
5388 	int i;
5389 
5390 	for (i = 0; i < WM_NRXDESC; i++) {
5391 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5392 			bus_dmamap_destroy(sc->sc_dmat,
5393 			    rxq->rxq_soft[i].rxs_dmamap);
5394 	}
5395 }
5396 
5397 /*
5398  * wm_alloc_quques:
5399  *	Allocate {tx,rx}descs and {tx,rx} buffers
5400  */
5401 static int
5402 wm_alloc_txrx_queues(struct wm_softc *sc)
5403 {
5404 	int i, error, tx_done, rx_done;
5405 
5406 	/*
5407 	 * For transmission
5408 	 */
5409 	sc->sc_txq = kmem_zalloc(sizeof(struct wm_txqueue) * sc->sc_ntxqueues,
5410 	    KM_SLEEP);
5411 	if (sc->sc_txq == NULL) {
5412 		aprint_error_dev(sc->sc_dev, "unable to allocate wm_txqueue\n");
5413 		error = ENOMEM;
5414 		goto fail_0;
5415 	}
5416 
5417 	error = 0;
5418 	tx_done = 0;
5419 	for (i = 0; i < sc->sc_ntxqueues; i++) {
5420 		struct wm_txqueue *txq = &sc->sc_txq[i];
5421 		txq->txq_sc = sc;
5422 #ifdef WM_MPSAFE
5423 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5424 #else
5425 		txq->txq_lock = NULL;
5426 #endif
5427 		error = wm_alloc_tx_descs(sc, txq);
5428 		if (error)
5429 			break;
5430 		error = wm_alloc_tx_buffer(sc, txq);
5431 		if (error) {
5432 			wm_free_tx_descs(sc, txq);
5433 			break;
5434 		}
5435 		tx_done++;
5436 	}
5437 	if (error)
5438 		goto fail_1;
5439 
5440 	/*
5441 	 * For recieve
5442 	 */
5443 	sc->sc_rxq = kmem_zalloc(sizeof(struct wm_rxqueue) * sc->sc_nrxqueues,
5444 	    KM_SLEEP);
5445 	if (sc->sc_rxq == NULL) {
5446 		aprint_error_dev(sc->sc_dev, "unable to allocate wm_rxqueue\n");
5447 		error = ENOMEM;
5448 		goto fail_1;
5449 	}
5450 
5451 	error = 0;
5452 	rx_done = 0;
5453 	for (i = 0; i < sc->sc_nrxqueues; i++) {
5454 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5455 		rxq->rxq_sc = sc;
5456 #ifdef WM_MPSAFE
5457 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5458 #else
5459 		rxq->rxq_lock = NULL;
5460 #endif
5461 		error = wm_alloc_rx_descs(sc, rxq);
5462 		if (error)
5463 			break;
5464 
5465 		error = wm_alloc_rx_buffer(sc, rxq);
5466 		if (error) {
5467 			wm_free_rx_descs(sc, rxq);
5468 			break;
5469 		}
5470 
5471 		rx_done++;
5472 	}
5473 	if (error)
5474 		goto fail_2;
5475 
5476 	return 0;
5477 
5478  fail_2:
5479 	for (i = 0; i < rx_done; i++) {
5480 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5481 		wm_free_rx_buffer(sc, rxq);
5482 		wm_free_rx_descs(sc, rxq);
5483 		if (rxq->rxq_lock)
5484 			mutex_obj_free(rxq->rxq_lock);
5485 	}
5486 	kmem_free(sc->sc_rxq,
5487 	    sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
5488  fail_1:
5489 	for (i = 0; i < tx_done; i++) {
5490 		struct wm_txqueue *txq = &sc->sc_txq[i];
5491 		wm_free_tx_buffer(sc, txq);
5492 		wm_free_tx_descs(sc, txq);
5493 		if (txq->txq_lock)
5494 			mutex_obj_free(txq->txq_lock);
5495 	}
5496 	kmem_free(sc->sc_txq,
5497 	    sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
5498  fail_0:
5499 	return error;
5500 }
5501 
5502 /*
5503  * wm_free_quques:
5504  *	Free {tx,rx}descs and {tx,rx} buffers
5505  */
5506 static void
5507 wm_free_txrx_queues(struct wm_softc *sc)
5508 {
5509 	int i;
5510 
5511 	for (i = 0; i < sc->sc_nrxqueues; i++) {
5512 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5513 		wm_free_rx_buffer(sc, rxq);
5514 		wm_free_rx_descs(sc, rxq);
5515 		if (rxq->rxq_lock)
5516 			mutex_obj_free(rxq->rxq_lock);
5517 	}
5518 	kmem_free(sc->sc_rxq, sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
5519 
5520 	for (i = 0; i < sc->sc_ntxqueues; i++) {
5521 		struct wm_txqueue *txq = &sc->sc_txq[i];
5522 		wm_free_tx_buffer(sc, txq);
5523 		wm_free_tx_descs(sc, txq);
5524 		if (txq->txq_lock)
5525 			mutex_obj_free(txq->txq_lock);
5526 	}
5527 	kmem_free(sc->sc_txq, sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
5528 }
5529 
5530 static void
5531 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5532 {
5533 
5534 	KASSERT(WM_TX_LOCKED(txq));
5535 
5536 	/* Initialize the transmit descriptor ring. */
5537 	memset(txq->txq_descs, 0, WM_TXDESCSIZE(txq));
5538 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5539 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5540 	txq->txq_free = WM_NTXDESC(txq);
5541 	txq->txq_next = 0;
5542 }
5543 
5544 static void
5545 wm_init_tx_regs(struct wm_softc *sc, struct wm_txqueue *txq)
5546 {
5547 
5548 	KASSERT(WM_TX_LOCKED(txq));
5549 
5550 	if (sc->sc_type < WM_T_82543) {
5551 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
5552 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
5553 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(txq));
5554 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
5555 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
5556 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
5557 	} else {
5558 		int qid = txq->txq_id;
5559 
5560 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
5561 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
5562 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCSIZE(txq));
5563 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
5564 
5565 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5566 			/*
5567 			 * Don't write TDT before TCTL.EN is set.
5568 			 * See the document.
5569 			 */
5570 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
5571 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
5572 			    | TXDCTL_WTHRESH(0));
5573 		else {
5574 			/* ITR / 4 */
5575 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
5576 			if (sc->sc_type >= WM_T_82540) {
5577 				/* should be same */
5578 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
5579 			}
5580 
5581 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
5582 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
5583 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
5584 		}
5585 	}
5586 }
5587 
5588 static void
5589 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5590 {
5591 	int i;
5592 
5593 	KASSERT(WM_TX_LOCKED(txq));
5594 
5595 	/* Initialize the transmit job descriptors. */
5596 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
5597 		txq->txq_soft[i].txs_mbuf = NULL;
5598 	txq->txq_sfree = WM_TXQUEUELEN(txq);
5599 	txq->txq_snext = 0;
5600 	txq->txq_sdirty = 0;
5601 }
5602 
5603 static void
5604 wm_init_tx_queue(struct wm_softc *sc, struct wm_txqueue *txq)
5605 {
5606 
5607 	KASSERT(WM_TX_LOCKED(txq));
5608 
5609 	/*
5610 	 * Set up some register offsets that are different between
5611 	 * the i82542 and the i82543 and later chips.
5612 	 */
5613 	if (sc->sc_type < WM_T_82543) {
5614 		txq->txq_tdt_reg = WMREG_OLD_TDT;
5615 	} else {
5616 		txq->txq_tdt_reg = WMREG_TDT(0);
5617 	}
5618 
5619 	wm_init_tx_descs(sc, txq);
5620 	wm_init_tx_regs(sc, txq);
5621 	wm_init_tx_buffer(sc, txq);
5622 }
5623 
5624 static void
5625 wm_init_rx_regs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5626 {
5627 
5628 	KASSERT(WM_RX_LOCKED(rxq));
5629 
5630 	/*
5631 	 * Initialize the receive descriptor and receive job
5632 	 * descriptor rings.
5633 	 */
5634 	if (sc->sc_type < WM_T_82543) {
5635 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
5636 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
5637 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
5638 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
5639 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
5640 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
5641 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
5642 
5643 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
5644 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
5645 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
5646 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
5647 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
5648 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
5649 	} else {
5650 		int qid = rxq->rxq_id;
5651 
5652 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
5653 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
5654 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
5655 
5656 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5657 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
5658 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
5659 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
5660 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
5661 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
5662 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
5663 			    | RXDCTL_WTHRESH(1));
5664 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
5665 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
5666 		} else {
5667 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
5668 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
5669 			/* ITR / 4 */
5670 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
5671 			/* MUST be same */
5672 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
5673 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
5674 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
5675 		}
5676 	}
5677 }
5678 
5679 static int
5680 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5681 {
5682 	struct wm_rxsoft *rxs;
5683 	int error, i;
5684 
5685 	KASSERT(WM_RX_LOCKED(rxq));
5686 
5687 	for (i = 0; i < WM_NRXDESC; i++) {
5688 		rxs = &rxq->rxq_soft[i];
5689 		if (rxs->rxs_mbuf == NULL) {
5690 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
5691 				log(LOG_ERR, "%s: unable to allocate or map "
5692 				    "rx buffer %d, error = %d\n",
5693 				    device_xname(sc->sc_dev), i, error);
5694 				/*
5695 				 * XXX Should attempt to run with fewer receive
5696 				 * XXX buffers instead of just failing.
5697 				 */
5698 				wm_rxdrain(rxq);
5699 				return ENOMEM;
5700 			}
5701 		} else {
5702 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5703 				wm_init_rxdesc(rxq, i);
5704 			/*
5705 			 * For 82575 and newer device, the RX descriptors
5706 			 * must be initialized after the setting of RCTL.EN in
5707 			 * wm_set_filter()
5708 			 */
5709 		}
5710 	}
5711 	rxq->rxq_ptr = 0;
5712 	rxq->rxq_discard = 0;
5713 	WM_RXCHAIN_RESET(rxq);
5714 
5715 	return 0;
5716 }
5717 
5718 static int
5719 wm_init_rx_queue(struct wm_softc *sc, struct wm_rxqueue *rxq)
5720 {
5721 
5722 	KASSERT(WM_RX_LOCKED(rxq));
5723 
5724 	/*
5725 	 * Set up some register offsets that are different between
5726 	 * the i82542 and the i82543 and later chips.
5727 	 */
5728 	if (sc->sc_type < WM_T_82543) {
5729 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
5730 	} else {
5731 		rxq->rxq_rdt_reg = WMREG_RDT(rxq->rxq_id);
5732 	}
5733 
5734 	wm_init_rx_regs(sc, rxq);
5735 	return wm_init_rx_buffer(sc, rxq);
5736 }
5737 
5738 /*
5739  * wm_init_quques:
5740  *	Initialize {tx,rx}descs and {tx,rx} buffers
5741  */
5742 static int
5743 wm_init_txrx_queues(struct wm_softc *sc)
5744 {
5745 	int i, error;
5746 
5747 	for (i = 0; i < sc->sc_ntxqueues; i++) {
5748 		struct wm_txqueue *txq = &sc->sc_txq[i];
5749 		WM_TX_LOCK(txq);
5750 		wm_init_tx_queue(sc, txq);
5751 		WM_TX_UNLOCK(txq);
5752 	}
5753 
5754 	error = 0;
5755 	for (i = 0; i < sc->sc_nrxqueues; i++) {
5756 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5757 		WM_RX_LOCK(rxq);
5758 		error = wm_init_rx_queue(sc, rxq);
5759 		WM_RX_UNLOCK(rxq);
5760 		if (error)
5761 			break;
5762 	}
5763 
5764 	return error;
5765 }
5766 
5767 /*
5768  * wm_tx_offload:
5769  *
5770  *	Set up TCP/IP checksumming parameters for the
5771  *	specified packet.
5772  */
5773 static int
5774 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
5775     uint8_t *fieldsp)
5776 {
5777 	struct wm_txqueue *txq = &sc->sc_txq[0];
5778 	struct mbuf *m0 = txs->txs_mbuf;
5779 	struct livengood_tcpip_ctxdesc *t;
5780 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
5781 	uint32_t ipcse;
5782 	struct ether_header *eh;
5783 	int offset, iphl;
5784 	uint8_t fields;
5785 
5786 	/*
5787 	 * XXX It would be nice if the mbuf pkthdr had offset
5788 	 * fields for the protocol headers.
5789 	 */
5790 
5791 	eh = mtod(m0, struct ether_header *);
5792 	switch (htons(eh->ether_type)) {
5793 	case ETHERTYPE_IP:
5794 	case ETHERTYPE_IPV6:
5795 		offset = ETHER_HDR_LEN;
5796 		break;
5797 
5798 	case ETHERTYPE_VLAN:
5799 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5800 		break;
5801 
5802 	default:
5803 		/*
5804 		 * Don't support this protocol or encapsulation.
5805 		 */
5806 		*fieldsp = 0;
5807 		*cmdp = 0;
5808 		return 0;
5809 	}
5810 
5811 	if ((m0->m_pkthdr.csum_flags &
5812 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
5813 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5814 	} else {
5815 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
5816 	}
5817 	ipcse = offset + iphl - 1;
5818 
5819 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
5820 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
5821 	seg = 0;
5822 	fields = 0;
5823 
5824 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
5825 		int hlen = offset + iphl;
5826 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5827 
5828 		if (__predict_false(m0->m_len <
5829 				    (hlen + sizeof(struct tcphdr)))) {
5830 			/*
5831 			 * TCP/IP headers are not in the first mbuf; we need
5832 			 * to do this the slow and painful way.  Let's just
5833 			 * hope this doesn't happen very often.
5834 			 */
5835 			struct tcphdr th;
5836 
5837 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
5838 
5839 			m_copydata(m0, hlen, sizeof(th), &th);
5840 			if (v4) {
5841 				struct ip ip;
5842 
5843 				m_copydata(m0, offset, sizeof(ip), &ip);
5844 				ip.ip_len = 0;
5845 				m_copyback(m0,
5846 				    offset + offsetof(struct ip, ip_len),
5847 				    sizeof(ip.ip_len), &ip.ip_len);
5848 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5849 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5850 			} else {
5851 				struct ip6_hdr ip6;
5852 
5853 				m_copydata(m0, offset, sizeof(ip6), &ip6);
5854 				ip6.ip6_plen = 0;
5855 				m_copyback(m0,
5856 				    offset + offsetof(struct ip6_hdr, ip6_plen),
5857 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
5858 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
5859 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
5860 			}
5861 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5862 			    sizeof(th.th_sum), &th.th_sum);
5863 
5864 			hlen += th.th_off << 2;
5865 		} else {
5866 			/*
5867 			 * TCP/IP headers are in the first mbuf; we can do
5868 			 * this the easy way.
5869 			 */
5870 			struct tcphdr *th;
5871 
5872 			if (v4) {
5873 				struct ip *ip =
5874 				    (void *)(mtod(m0, char *) + offset);
5875 				th = (void *)(mtod(m0, char *) + hlen);
5876 
5877 				ip->ip_len = 0;
5878 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5879 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5880 			} else {
5881 				struct ip6_hdr *ip6 =
5882 				    (void *)(mtod(m0, char *) + offset);
5883 				th = (void *)(mtod(m0, char *) + hlen);
5884 
5885 				ip6->ip6_plen = 0;
5886 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
5887 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
5888 			}
5889 			hlen += th->th_off << 2;
5890 		}
5891 
5892 		if (v4) {
5893 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
5894 			cmdlen |= WTX_TCPIP_CMD_IP;
5895 		} else {
5896 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
5897 			ipcse = 0;
5898 		}
5899 		cmd |= WTX_TCPIP_CMD_TSE;
5900 		cmdlen |= WTX_TCPIP_CMD_TSE |
5901 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
5902 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
5903 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
5904 	}
5905 
5906 	/*
5907 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
5908 	 * offload feature, if we load the context descriptor, we
5909 	 * MUST provide valid values for IPCSS and TUCSS fields.
5910 	 */
5911 
5912 	ipcs = WTX_TCPIP_IPCSS(offset) |
5913 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
5914 	    WTX_TCPIP_IPCSE(ipcse);
5915 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
5916 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
5917 		fields |= WTX_IXSM;
5918 	}
5919 
5920 	offset += iphl;
5921 
5922 	if (m0->m_pkthdr.csum_flags &
5923 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
5924 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5925 		fields |= WTX_TXSM;
5926 		tucs = WTX_TCPIP_TUCSS(offset) |
5927 		    WTX_TCPIP_TUCSO(offset +
5928 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
5929 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
5930 	} else if ((m0->m_pkthdr.csum_flags &
5931 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
5932 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5933 		fields |= WTX_TXSM;
5934 		tucs = WTX_TCPIP_TUCSS(offset) |
5935 		    WTX_TCPIP_TUCSO(offset +
5936 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
5937 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
5938 	} else {
5939 		/* Just initialize it to a valid TCP context. */
5940 		tucs = WTX_TCPIP_TUCSS(offset) |
5941 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
5942 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
5943 	}
5944 
5945 	/* Fill in the context descriptor. */
5946 	t = (struct livengood_tcpip_ctxdesc *)
5947 	    &txq->txq_descs[txq->txq_next];
5948 	t->tcpip_ipcs = htole32(ipcs);
5949 	t->tcpip_tucs = htole32(tucs);
5950 	t->tcpip_cmdlen = htole32(cmdlen);
5951 	t->tcpip_seg = htole32(seg);
5952 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
5953 
5954 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
5955 	txs->txs_ndesc++;
5956 
5957 	*cmdp = cmd;
5958 	*fieldsp = fields;
5959 
5960 	return 0;
5961 }
5962 
5963 /*
5964  * wm_start:		[ifnet interface function]
5965  *
5966  *	Start packet transmission on the interface.
5967  */
5968 static void
5969 wm_start(struct ifnet *ifp)
5970 {
5971 	struct wm_softc *sc = ifp->if_softc;
5972 	struct wm_txqueue *txq = &sc->sc_txq[0];
5973 
5974 	WM_TX_LOCK(txq);
5975 	if (!sc->sc_stopping)
5976 		wm_start_locked(ifp);
5977 	WM_TX_UNLOCK(txq);
5978 }
5979 
5980 static void
5981 wm_start_locked(struct ifnet *ifp)
5982 {
5983 	struct wm_softc *sc = ifp->if_softc;
5984 	struct wm_txqueue *txq = &sc->sc_txq[0];
5985 	struct mbuf *m0;
5986 	struct m_tag *mtag;
5987 	struct wm_txsoft *txs;
5988 	bus_dmamap_t dmamap;
5989 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
5990 	bus_addr_t curaddr;
5991 	bus_size_t seglen, curlen;
5992 	uint32_t cksumcmd;
5993 	uint8_t cksumfields;
5994 
5995 	KASSERT(WM_TX_LOCKED(txq));
5996 
5997 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
5998 		return;
5999 
6000 	/* Remember the previous number of free descriptors. */
6001 	ofree = txq->txq_free;
6002 
6003 	/*
6004 	 * Loop through the send queue, setting up transmit descriptors
6005 	 * until we drain the queue, or use up all available transmit
6006 	 * descriptors.
6007 	 */
6008 	for (;;) {
6009 		m0 = NULL;
6010 
6011 		/* Get a work queue entry. */
6012 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6013 			wm_txeof(sc);
6014 			if (txq->txq_sfree == 0) {
6015 				DPRINTF(WM_DEBUG_TX,
6016 				    ("%s: TX: no free job descriptors\n",
6017 					device_xname(sc->sc_dev)));
6018 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
6019 				break;
6020 			}
6021 		}
6022 
6023 		/* Grab a packet off the queue. */
6024 		IFQ_DEQUEUE(&ifp->if_snd, m0);
6025 		if (m0 == NULL)
6026 			break;
6027 
6028 		DPRINTF(WM_DEBUG_TX,
6029 		    ("%s: TX: have packet to transmit: %p\n",
6030 		    device_xname(sc->sc_dev), m0));
6031 
6032 		txs = &txq->txq_soft[txq->txq_snext];
6033 		dmamap = txs->txs_dmamap;
6034 
6035 		use_tso = (m0->m_pkthdr.csum_flags &
6036 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
6037 
6038 		/*
6039 		 * So says the Linux driver:
6040 		 * The controller does a simple calculation to make sure
6041 		 * there is enough room in the FIFO before initiating the
6042 		 * DMA for each buffer.  The calc is:
6043 		 *	4 = ceil(buffer len / MSS)
6044 		 * To make sure we don't overrun the FIFO, adjust the max
6045 		 * buffer len if the MSS drops.
6046 		 */
6047 		dmamap->dm_maxsegsz =
6048 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
6049 		    ? m0->m_pkthdr.segsz << 2
6050 		    : WTX_MAX_LEN;
6051 
6052 		/*
6053 		 * Load the DMA map.  If this fails, the packet either
6054 		 * didn't fit in the allotted number of segments, or we
6055 		 * were short on resources.  For the too-many-segments
6056 		 * case, we simply report an error and drop the packet,
6057 		 * since we can't sanely copy a jumbo packet to a single
6058 		 * buffer.
6059 		 */
6060 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6061 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
6062 		if (error) {
6063 			if (error == EFBIG) {
6064 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6065 				log(LOG_ERR, "%s: Tx packet consumes too many "
6066 				    "DMA segments, dropping...\n",
6067 				    device_xname(sc->sc_dev));
6068 				wm_dump_mbuf_chain(sc, m0);
6069 				m_freem(m0);
6070 				continue;
6071 			}
6072 			/*  Short on resources, just stop for now. */
6073 			DPRINTF(WM_DEBUG_TX,
6074 			    ("%s: TX: dmamap load failed: %d\n",
6075 			    device_xname(sc->sc_dev), error));
6076 			break;
6077 		}
6078 
6079 		segs_needed = dmamap->dm_nsegs;
6080 		if (use_tso) {
6081 			/* For sentinel descriptor; see below. */
6082 			segs_needed++;
6083 		}
6084 
6085 		/*
6086 		 * Ensure we have enough descriptors free to describe
6087 		 * the packet.  Note, we always reserve one descriptor
6088 		 * at the end of the ring due to the semantics of the
6089 		 * TDT register, plus one more in the event we need
6090 		 * to load offload context.
6091 		 */
6092 		if (segs_needed > txq->txq_free - 2) {
6093 			/*
6094 			 * Not enough free descriptors to transmit this
6095 			 * packet.  We haven't committed anything yet,
6096 			 * so just unload the DMA map, put the packet
6097 			 * pack on the queue, and punt.  Notify the upper
6098 			 * layer that there are no more slots left.
6099 			 */
6100 			DPRINTF(WM_DEBUG_TX,
6101 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
6102 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
6103 			    segs_needed, txq->txq_free - 1));
6104 			ifp->if_flags |= IFF_OACTIVE;
6105 			bus_dmamap_unload(sc->sc_dmat, dmamap);
6106 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
6107 			break;
6108 		}
6109 
6110 		/*
6111 		 * Check for 82547 Tx FIFO bug.  We need to do this
6112 		 * once we know we can transmit the packet, since we
6113 		 * do some internal FIFO space accounting here.
6114 		 */
6115 		if (sc->sc_type == WM_T_82547 &&
6116 		    wm_82547_txfifo_bugchk(sc, m0)) {
6117 			DPRINTF(WM_DEBUG_TX,
6118 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
6119 			    device_xname(sc->sc_dev)));
6120 			ifp->if_flags |= IFF_OACTIVE;
6121 			bus_dmamap_unload(sc->sc_dmat, dmamap);
6122 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
6123 			break;
6124 		}
6125 
6126 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6127 
6128 		DPRINTF(WM_DEBUG_TX,
6129 		    ("%s: TX: packet has %d (%d) DMA segments\n",
6130 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6131 
6132 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
6133 
6134 		/*
6135 		 * Store a pointer to the packet so that we can free it
6136 		 * later.
6137 		 *
6138 		 * Initially, we consider the number of descriptors the
6139 		 * packet uses the number of DMA segments.  This may be
6140 		 * incremented by 1 if we do checksum offload (a descriptor
6141 		 * is used to set the checksum context).
6142 		 */
6143 		txs->txs_mbuf = m0;
6144 		txs->txs_firstdesc = txq->txq_next;
6145 		txs->txs_ndesc = segs_needed;
6146 
6147 		/* Set up offload parameters for this packet. */
6148 		if (m0->m_pkthdr.csum_flags &
6149 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
6150 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
6151 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
6152 			if (wm_tx_offload(sc, txs, &cksumcmd,
6153 					  &cksumfields) != 0) {
6154 				/* Error message already displayed. */
6155 				bus_dmamap_unload(sc->sc_dmat, dmamap);
6156 				continue;
6157 			}
6158 		} else {
6159 			cksumcmd = 0;
6160 			cksumfields = 0;
6161 		}
6162 
6163 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
6164 
6165 		/* Sync the DMA map. */
6166 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6167 		    BUS_DMASYNC_PREWRITE);
6168 
6169 		/* Initialize the transmit descriptor. */
6170 		for (nexttx = txq->txq_next, seg = 0;
6171 		     seg < dmamap->dm_nsegs; seg++) {
6172 			for (seglen = dmamap->dm_segs[seg].ds_len,
6173 			     curaddr = dmamap->dm_segs[seg].ds_addr;
6174 			     seglen != 0;
6175 			     curaddr += curlen, seglen -= curlen,
6176 			     nexttx = WM_NEXTTX(txq, nexttx)) {
6177 				curlen = seglen;
6178 
6179 				/*
6180 				 * So says the Linux driver:
6181 				 * Work around for premature descriptor
6182 				 * write-backs in TSO mode.  Append a
6183 				 * 4-byte sentinel descriptor.
6184 				 */
6185 				if (use_tso &&
6186 				    seg == dmamap->dm_nsegs - 1 &&
6187 				    curlen > 8)
6188 					curlen -= 4;
6189 
6190 				wm_set_dma_addr(
6191 				    &txq->txq_descs[nexttx].wtx_addr,
6192 				    curaddr);
6193 				txq->txq_descs[nexttx].wtx_cmdlen =
6194 				    htole32(cksumcmd | curlen);
6195 				txq->txq_descs[nexttx].wtx_fields.wtxu_status =
6196 				    0;
6197 				txq->txq_descs[nexttx].wtx_fields.wtxu_options =
6198 				    cksumfields;
6199 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan = 0;
6200 				lasttx = nexttx;
6201 
6202 				DPRINTF(WM_DEBUG_TX,
6203 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
6204 				     "len %#04zx\n",
6205 				    device_xname(sc->sc_dev), nexttx,
6206 				    (uint64_t)curaddr, curlen));
6207 			}
6208 		}
6209 
6210 		KASSERT(lasttx != -1);
6211 
6212 		/*
6213 		 * Set up the command byte on the last descriptor of
6214 		 * the packet.  If we're in the interrupt delay window,
6215 		 * delay the interrupt.
6216 		 */
6217 		txq->txq_descs[lasttx].wtx_cmdlen |=
6218 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
6219 
6220 		/*
6221 		 * If VLANs are enabled and the packet has a VLAN tag, set
6222 		 * up the descriptor to encapsulate the packet for us.
6223 		 *
6224 		 * This is only valid on the last descriptor of the packet.
6225 		 */
6226 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6227 			txq->txq_descs[lasttx].wtx_cmdlen |=
6228 			    htole32(WTX_CMD_VLE);
6229 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
6230 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6231 		}
6232 
6233 		txs->txs_lastdesc = lasttx;
6234 
6235 		DPRINTF(WM_DEBUG_TX,
6236 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
6237 		    device_xname(sc->sc_dev),
6238 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6239 
6240 		/* Sync the descriptors we're using. */
6241 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6242 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
6243 
6244 		/* Give the packet to the chip. */
6245 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6246 
6247 		DPRINTF(WM_DEBUG_TX,
6248 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6249 
6250 		DPRINTF(WM_DEBUG_TX,
6251 		    ("%s: TX: finished transmitting packet, job %d\n",
6252 		    device_xname(sc->sc_dev), txq->txq_snext));
6253 
6254 		/* Advance the tx pointer. */
6255 		txq->txq_free -= txs->txs_ndesc;
6256 		txq->txq_next = nexttx;
6257 
6258 		txq->txq_sfree--;
6259 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6260 
6261 		/* Pass the packet to any BPF listeners. */
6262 		bpf_mtap(ifp, m0);
6263 	}
6264 
6265 	if (m0 != NULL) {
6266 		ifp->if_flags |= IFF_OACTIVE;
6267 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6268 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
6269 		m_freem(m0);
6270 	}
6271 
6272 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6273 		/* No more slots; notify upper layer. */
6274 		ifp->if_flags |= IFF_OACTIVE;
6275 	}
6276 
6277 	if (txq->txq_free != ofree) {
6278 		/* Set a watchdog timer in case the chip flakes out. */
6279 		ifp->if_timer = 5;
6280 	}
6281 }
6282 
6283 /*
6284  * wm_nq_tx_offload:
6285  *
6286  *	Set up TCP/IP checksumming parameters for the
6287  *	specified packet, for NEWQUEUE devices
6288  */
6289 static int
6290 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
6291     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
6292 {
6293 	struct wm_txqueue *txq = &sc->sc_txq[0];
6294 	struct mbuf *m0 = txs->txs_mbuf;
6295 	struct m_tag *mtag;
6296 	uint32_t vl_len, mssidx, cmdc;
6297 	struct ether_header *eh;
6298 	int offset, iphl;
6299 
6300 	/*
6301 	 * XXX It would be nice if the mbuf pkthdr had offset
6302 	 * fields for the protocol headers.
6303 	 */
6304 	*cmdlenp = 0;
6305 	*fieldsp = 0;
6306 
6307 	eh = mtod(m0, struct ether_header *);
6308 	switch (htons(eh->ether_type)) {
6309 	case ETHERTYPE_IP:
6310 	case ETHERTYPE_IPV6:
6311 		offset = ETHER_HDR_LEN;
6312 		break;
6313 
6314 	case ETHERTYPE_VLAN:
6315 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6316 		break;
6317 
6318 	default:
6319 		/* Don't support this protocol or encapsulation. */
6320 		*do_csum = false;
6321 		return 0;
6322 	}
6323 	*do_csum = true;
6324 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
6325 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
6326 
6327 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
6328 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
6329 
6330 	if ((m0->m_pkthdr.csum_flags &
6331 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
6332 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6333 	} else {
6334 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6335 	}
6336 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
6337 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
6338 
6339 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6340 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
6341 		     << NQTXC_VLLEN_VLAN_SHIFT);
6342 		*cmdlenp |= NQTX_CMD_VLE;
6343 	}
6344 
6345 	mssidx = 0;
6346 
6347 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6348 		int hlen = offset + iphl;
6349 		int tcp_hlen;
6350 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6351 
6352 		if (__predict_false(m0->m_len <
6353 				    (hlen + sizeof(struct tcphdr)))) {
6354 			/*
6355 			 * TCP/IP headers are not in the first mbuf; we need
6356 			 * to do this the slow and painful way.  Let's just
6357 			 * hope this doesn't happen very often.
6358 			 */
6359 			struct tcphdr th;
6360 
6361 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
6362 
6363 			m_copydata(m0, hlen, sizeof(th), &th);
6364 			if (v4) {
6365 				struct ip ip;
6366 
6367 				m_copydata(m0, offset, sizeof(ip), &ip);
6368 				ip.ip_len = 0;
6369 				m_copyback(m0,
6370 				    offset + offsetof(struct ip, ip_len),
6371 				    sizeof(ip.ip_len), &ip.ip_len);
6372 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6373 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6374 			} else {
6375 				struct ip6_hdr ip6;
6376 
6377 				m_copydata(m0, offset, sizeof(ip6), &ip6);
6378 				ip6.ip6_plen = 0;
6379 				m_copyback(m0,
6380 				    offset + offsetof(struct ip6_hdr, ip6_plen),
6381 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6382 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6383 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6384 			}
6385 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6386 			    sizeof(th.th_sum), &th.th_sum);
6387 
6388 			tcp_hlen = th.th_off << 2;
6389 		} else {
6390 			/*
6391 			 * TCP/IP headers are in the first mbuf; we can do
6392 			 * this the easy way.
6393 			 */
6394 			struct tcphdr *th;
6395 
6396 			if (v4) {
6397 				struct ip *ip =
6398 				    (void *)(mtod(m0, char *) + offset);
6399 				th = (void *)(mtod(m0, char *) + hlen);
6400 
6401 				ip->ip_len = 0;
6402 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6403 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6404 			} else {
6405 				struct ip6_hdr *ip6 =
6406 				    (void *)(mtod(m0, char *) + offset);
6407 				th = (void *)(mtod(m0, char *) + hlen);
6408 
6409 				ip6->ip6_plen = 0;
6410 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6411 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6412 			}
6413 			tcp_hlen = th->th_off << 2;
6414 		}
6415 		hlen += tcp_hlen;
6416 		*cmdlenp |= NQTX_CMD_TSE;
6417 
6418 		if (v4) {
6419 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
6420 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
6421 		} else {
6422 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
6423 			*fieldsp |= NQTXD_FIELDS_TUXSM;
6424 		}
6425 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
6426 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6427 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
6428 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
6429 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
6430 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
6431 	} else {
6432 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
6433 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6434 	}
6435 
6436 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
6437 		*fieldsp |= NQTXD_FIELDS_IXSM;
6438 		cmdc |= NQTXC_CMD_IP4;
6439 	}
6440 
6441 	if (m0->m_pkthdr.csum_flags &
6442 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6443 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
6444 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6445 			cmdc |= NQTXC_CMD_TCP;
6446 		} else {
6447 			cmdc |= NQTXC_CMD_UDP;
6448 		}
6449 		cmdc |= NQTXC_CMD_IP4;
6450 		*fieldsp |= NQTXD_FIELDS_TUXSM;
6451 	}
6452 	if (m0->m_pkthdr.csum_flags &
6453 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6454 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
6455 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6456 			cmdc |= NQTXC_CMD_TCP;
6457 		} else {
6458 			cmdc |= NQTXC_CMD_UDP;
6459 		}
6460 		cmdc |= NQTXC_CMD_IP6;
6461 		*fieldsp |= NQTXD_FIELDS_TUXSM;
6462 	}
6463 
6464 	/* Fill in the context descriptor. */
6465 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
6466 	    htole32(vl_len);
6467 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
6468 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
6469 	    htole32(cmdc);
6470 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
6471 	    htole32(mssidx);
6472 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6473 	DPRINTF(WM_DEBUG_TX,
6474 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
6475 	    txq->txq_next, 0, vl_len));
6476 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
6477 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6478 	txs->txs_ndesc++;
6479 	return 0;
6480 }
6481 
6482 /*
6483  * wm_nq_start:		[ifnet interface function]
6484  *
6485  *	Start packet transmission on the interface for NEWQUEUE devices
6486  */
6487 static void
6488 wm_nq_start(struct ifnet *ifp)
6489 {
6490 	struct wm_softc *sc = ifp->if_softc;
6491 	struct wm_txqueue *txq = &sc->sc_txq[0];
6492 
6493 	WM_TX_LOCK(txq);
6494 	if (!sc->sc_stopping)
6495 		wm_nq_start_locked(ifp);
6496 	WM_TX_UNLOCK(txq);
6497 }
6498 
6499 static void
6500 wm_nq_start_locked(struct ifnet *ifp)
6501 {
6502 	struct wm_softc *sc = ifp->if_softc;
6503 	struct wm_txqueue *txq = &sc->sc_txq[0];
6504 	struct mbuf *m0;
6505 	struct m_tag *mtag;
6506 	struct wm_txsoft *txs;
6507 	bus_dmamap_t dmamap;
6508 	int error, nexttx, lasttx = -1, seg, segs_needed;
6509 	bool do_csum, sent;
6510 
6511 	KASSERT(WM_TX_LOCKED(txq));
6512 
6513 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
6514 		return;
6515 
6516 	sent = false;
6517 
6518 	/*
6519 	 * Loop through the send queue, setting up transmit descriptors
6520 	 * until we drain the queue, or use up all available transmit
6521 	 * descriptors.
6522 	 */
6523 	for (;;) {
6524 		m0 = NULL;
6525 
6526 		/* Get a work queue entry. */
6527 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6528 			wm_txeof(sc);
6529 			if (txq->txq_sfree == 0) {
6530 				DPRINTF(WM_DEBUG_TX,
6531 				    ("%s: TX: no free job descriptors\n",
6532 					device_xname(sc->sc_dev)));
6533 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
6534 				break;
6535 			}
6536 		}
6537 
6538 		/* Grab a packet off the queue. */
6539 		IFQ_DEQUEUE(&ifp->if_snd, m0);
6540 		if (m0 == NULL)
6541 			break;
6542 
6543 		DPRINTF(WM_DEBUG_TX,
6544 		    ("%s: TX: have packet to transmit: %p\n",
6545 		    device_xname(sc->sc_dev), m0));
6546 
6547 		txs = &txq->txq_soft[txq->txq_snext];
6548 		dmamap = txs->txs_dmamap;
6549 
6550 		/*
6551 		 * Load the DMA map.  If this fails, the packet either
6552 		 * didn't fit in the allotted number of segments, or we
6553 		 * were short on resources.  For the too-many-segments
6554 		 * case, we simply report an error and drop the packet,
6555 		 * since we can't sanely copy a jumbo packet to a single
6556 		 * buffer.
6557 		 */
6558 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6559 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
6560 		if (error) {
6561 			if (error == EFBIG) {
6562 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6563 				log(LOG_ERR, "%s: Tx packet consumes too many "
6564 				    "DMA segments, dropping...\n",
6565 				    device_xname(sc->sc_dev));
6566 				wm_dump_mbuf_chain(sc, m0);
6567 				m_freem(m0);
6568 				continue;
6569 			}
6570 			/* Short on resources, just stop for now. */
6571 			DPRINTF(WM_DEBUG_TX,
6572 			    ("%s: TX: dmamap load failed: %d\n",
6573 			    device_xname(sc->sc_dev), error));
6574 			break;
6575 		}
6576 
6577 		segs_needed = dmamap->dm_nsegs;
6578 
6579 		/*
6580 		 * Ensure we have enough descriptors free to describe
6581 		 * the packet.  Note, we always reserve one descriptor
6582 		 * at the end of the ring due to the semantics of the
6583 		 * TDT register, plus one more in the event we need
6584 		 * to load offload context.
6585 		 */
6586 		if (segs_needed > txq->txq_free - 2) {
6587 			/*
6588 			 * Not enough free descriptors to transmit this
6589 			 * packet.  We haven't committed anything yet,
6590 			 * so just unload the DMA map, put the packet
6591 			 * pack on the queue, and punt.  Notify the upper
6592 			 * layer that there are no more slots left.
6593 			 */
6594 			DPRINTF(WM_DEBUG_TX,
6595 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
6596 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
6597 			    segs_needed, txq->txq_free - 1));
6598 			ifp->if_flags |= IFF_OACTIVE;
6599 			bus_dmamap_unload(sc->sc_dmat, dmamap);
6600 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
6601 			break;
6602 		}
6603 
6604 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6605 
6606 		DPRINTF(WM_DEBUG_TX,
6607 		    ("%s: TX: packet has %d (%d) DMA segments\n",
6608 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6609 
6610 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
6611 
6612 		/*
6613 		 * Store a pointer to the packet so that we can free it
6614 		 * later.
6615 		 *
6616 		 * Initially, we consider the number of descriptors the
6617 		 * packet uses the number of DMA segments.  This may be
6618 		 * incremented by 1 if we do checksum offload (a descriptor
6619 		 * is used to set the checksum context).
6620 		 */
6621 		txs->txs_mbuf = m0;
6622 		txs->txs_firstdesc = txq->txq_next;
6623 		txs->txs_ndesc = segs_needed;
6624 
6625 		/* Set up offload parameters for this packet. */
6626 		uint32_t cmdlen, fields, dcmdlen;
6627 		if (m0->m_pkthdr.csum_flags &
6628 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
6629 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
6630 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
6631 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
6632 			    &do_csum) != 0) {
6633 				/* Error message already displayed. */
6634 				bus_dmamap_unload(sc->sc_dmat, dmamap);
6635 				continue;
6636 			}
6637 		} else {
6638 			do_csum = false;
6639 			cmdlen = 0;
6640 			fields = 0;
6641 		}
6642 
6643 		/* Sync the DMA map. */
6644 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6645 		    BUS_DMASYNC_PREWRITE);
6646 
6647 		/* Initialize the first transmit descriptor. */
6648 		nexttx = txq->txq_next;
6649 		if (!do_csum) {
6650 			/* setup a legacy descriptor */
6651 			wm_set_dma_addr(
6652 			    &txq->txq_descs[nexttx].wtx_addr,
6653 			    dmamap->dm_segs[0].ds_addr);
6654 			txq->txq_descs[nexttx].wtx_cmdlen =
6655 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
6656 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
6657 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
6658 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
6659 			    NULL) {
6660 				txq->txq_descs[nexttx].wtx_cmdlen |=
6661 				    htole32(WTX_CMD_VLE);
6662 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
6663 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6664 			} else {
6665 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6666 			}
6667 			dcmdlen = 0;
6668 		} else {
6669 			/* setup an advanced data descriptor */
6670 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
6671 			    htole64(dmamap->dm_segs[0].ds_addr);
6672 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
6673 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
6674 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
6675 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
6676 			    htole32(fields);
6677 			DPRINTF(WM_DEBUG_TX,
6678 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
6679 			    device_xname(sc->sc_dev), nexttx,
6680 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
6681 			DPRINTF(WM_DEBUG_TX,
6682 			    ("\t 0x%08x%08x\n", fields,
6683 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
6684 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
6685 		}
6686 
6687 		lasttx = nexttx;
6688 		nexttx = WM_NEXTTX(txq, nexttx);
6689 		/*
6690 		 * fill in the next descriptors. legacy or adcanced format
6691 		 * is the same here
6692 		 */
6693 		for (seg = 1; seg < dmamap->dm_nsegs;
6694 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
6695 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
6696 			    htole64(dmamap->dm_segs[seg].ds_addr);
6697 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
6698 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
6699 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
6700 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
6701 			lasttx = nexttx;
6702 
6703 			DPRINTF(WM_DEBUG_TX,
6704 			    ("%s: TX: desc %d: %#" PRIx64 ", "
6705 			     "len %#04zx\n",
6706 			    device_xname(sc->sc_dev), nexttx,
6707 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
6708 			    dmamap->dm_segs[seg].ds_len));
6709 		}
6710 
6711 		KASSERT(lasttx != -1);
6712 
6713 		/*
6714 		 * Set up the command byte on the last descriptor of
6715 		 * the packet.  If we're in the interrupt delay window,
6716 		 * delay the interrupt.
6717 		 */
6718 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
6719 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
6720 		txq->txq_descs[lasttx].wtx_cmdlen |=
6721 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
6722 
6723 		txs->txs_lastdesc = lasttx;
6724 
6725 		DPRINTF(WM_DEBUG_TX,
6726 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
6727 		    device_xname(sc->sc_dev),
6728 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6729 
6730 		/* Sync the descriptors we're using. */
6731 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6732 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
6733 
6734 		/* Give the packet to the chip. */
6735 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6736 		sent = true;
6737 
6738 		DPRINTF(WM_DEBUG_TX,
6739 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6740 
6741 		DPRINTF(WM_DEBUG_TX,
6742 		    ("%s: TX: finished transmitting packet, job %d\n",
6743 		    device_xname(sc->sc_dev), txq->txq_snext));
6744 
6745 		/* Advance the tx pointer. */
6746 		txq->txq_free -= txs->txs_ndesc;
6747 		txq->txq_next = nexttx;
6748 
6749 		txq->txq_sfree--;
6750 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6751 
6752 		/* Pass the packet to any BPF listeners. */
6753 		bpf_mtap(ifp, m0);
6754 	}
6755 
6756 	if (m0 != NULL) {
6757 		ifp->if_flags |= IFF_OACTIVE;
6758 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6759 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
6760 		m_freem(m0);
6761 	}
6762 
6763 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6764 		/* No more slots; notify upper layer. */
6765 		ifp->if_flags |= IFF_OACTIVE;
6766 	}
6767 
6768 	if (sent) {
6769 		/* Set a watchdog timer in case the chip flakes out. */
6770 		ifp->if_timer = 5;
6771 	}
6772 }
6773 
6774 /* Interrupt */
6775 
6776 /*
6777  * wm_txeof:
6778  *
6779  *	Helper; handle transmit interrupts.
6780  */
6781 static int
6782 wm_txeof(struct wm_softc *sc)
6783 {
6784 	struct wm_txqueue *txq = &sc->sc_txq[0];
6785 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6786 	struct wm_txsoft *txs;
6787 	bool processed = false;
6788 	int count = 0;
6789 	int i;
6790 	uint8_t status;
6791 
6792 	if (sc->sc_stopping)
6793 		return 0;
6794 
6795 	ifp->if_flags &= ~IFF_OACTIVE;
6796 
6797 	/*
6798 	 * Go through the Tx list and free mbufs for those
6799 	 * frames which have been transmitted.
6800 	 */
6801 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
6802 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
6803 		txs = &txq->txq_soft[i];
6804 
6805 		DPRINTF(WM_DEBUG_TX,
6806 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
6807 
6808 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
6809 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6810 
6811 		status =
6812 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
6813 		if ((status & WTX_ST_DD) == 0) {
6814 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
6815 			    BUS_DMASYNC_PREREAD);
6816 			break;
6817 		}
6818 
6819 		processed = true;
6820 		count++;
6821 		DPRINTF(WM_DEBUG_TX,
6822 		    ("%s: TX: job %d done: descs %d..%d\n",
6823 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
6824 		    txs->txs_lastdesc));
6825 
6826 		/*
6827 		 * XXX We should probably be using the statistics
6828 		 * XXX registers, but I don't know if they exist
6829 		 * XXX on chips before the i82544.
6830 		 */
6831 
6832 #ifdef WM_EVENT_COUNTERS
6833 		if (status & WTX_ST_TU)
6834 			WM_EVCNT_INCR(&sc->sc_ev_tu);
6835 #endif /* WM_EVENT_COUNTERS */
6836 
6837 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
6838 			ifp->if_oerrors++;
6839 			if (status & WTX_ST_LC)
6840 				log(LOG_WARNING, "%s: late collision\n",
6841 				    device_xname(sc->sc_dev));
6842 			else if (status & WTX_ST_EC) {
6843 				ifp->if_collisions += 16;
6844 				log(LOG_WARNING, "%s: excessive collisions\n",
6845 				    device_xname(sc->sc_dev));
6846 			}
6847 		} else
6848 			ifp->if_opackets++;
6849 
6850 		txq->txq_free += txs->txs_ndesc;
6851 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
6852 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
6853 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
6854 		m_freem(txs->txs_mbuf);
6855 		txs->txs_mbuf = NULL;
6856 	}
6857 
6858 	/* Update the dirty transmit buffer pointer. */
6859 	txq->txq_sdirty = i;
6860 	DPRINTF(WM_DEBUG_TX,
6861 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
6862 
6863 	if (count != 0)
6864 		rnd_add_uint32(&sc->rnd_source, count);
6865 
6866 	/*
6867 	 * If there are no more pending transmissions, cancel the watchdog
6868 	 * timer.
6869 	 */
6870 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
6871 		ifp->if_timer = 0;
6872 
6873 	return processed;
6874 }
6875 
6876 /*
6877  * wm_rxeof:
6878  *
6879  *	Helper; handle receive interrupts.
6880  */
6881 static void
6882 wm_rxeof(struct wm_rxqueue *rxq)
6883 {
6884 	struct wm_softc *sc = rxq->rxq_sc;
6885 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6886 	struct wm_rxsoft *rxs;
6887 	struct mbuf *m;
6888 	int i, len;
6889 	int count = 0;
6890 	uint8_t status, errors;
6891 	uint16_t vlantag;
6892 
6893 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
6894 		rxs = &rxq->rxq_soft[i];
6895 
6896 		DPRINTF(WM_DEBUG_RX,
6897 		    ("%s: RX: checking descriptor %d\n",
6898 		    device_xname(sc->sc_dev), i));
6899 
6900 		wm_cdrxsync(rxq, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6901 
6902 		status = rxq->rxq_descs[i].wrx_status;
6903 		errors = rxq->rxq_descs[i].wrx_errors;
6904 		len = le16toh(rxq->rxq_descs[i].wrx_len);
6905 		vlantag = rxq->rxq_descs[i].wrx_special;
6906 
6907 		if ((status & WRX_ST_DD) == 0) {
6908 			/* We have processed all of the receive descriptors. */
6909 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
6910 			break;
6911 		}
6912 
6913 		count++;
6914 		if (__predict_false(rxq->rxq_discard)) {
6915 			DPRINTF(WM_DEBUG_RX,
6916 			    ("%s: RX: discarding contents of descriptor %d\n",
6917 			    device_xname(sc->sc_dev), i));
6918 			wm_init_rxdesc(rxq, i);
6919 			if (status & WRX_ST_EOP) {
6920 				/* Reset our state. */
6921 				DPRINTF(WM_DEBUG_RX,
6922 				    ("%s: RX: resetting rxdiscard -> 0\n",
6923 				    device_xname(sc->sc_dev)));
6924 				rxq->rxq_discard = 0;
6925 			}
6926 			continue;
6927 		}
6928 
6929 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6930 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
6931 
6932 		m = rxs->rxs_mbuf;
6933 
6934 		/*
6935 		 * Add a new receive buffer to the ring, unless of
6936 		 * course the length is zero. Treat the latter as a
6937 		 * failed mapping.
6938 		 */
6939 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
6940 			/*
6941 			 * Failed, throw away what we've done so
6942 			 * far, and discard the rest of the packet.
6943 			 */
6944 			ifp->if_ierrors++;
6945 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6946 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
6947 			wm_init_rxdesc(rxq, i);
6948 			if ((status & WRX_ST_EOP) == 0)
6949 				rxq->rxq_discard = 1;
6950 			if (rxq->rxq_head != NULL)
6951 				m_freem(rxq->rxq_head);
6952 			WM_RXCHAIN_RESET(rxq);
6953 			DPRINTF(WM_DEBUG_RX,
6954 			    ("%s: RX: Rx buffer allocation failed, "
6955 			    "dropping packet%s\n", device_xname(sc->sc_dev),
6956 			    rxq->rxq_discard ? " (discard)" : ""));
6957 			continue;
6958 		}
6959 
6960 		m->m_len = len;
6961 		rxq->rxq_len += len;
6962 		DPRINTF(WM_DEBUG_RX,
6963 		    ("%s: RX: buffer at %p len %d\n",
6964 		    device_xname(sc->sc_dev), m->m_data, len));
6965 
6966 		/* If this is not the end of the packet, keep looking. */
6967 		if ((status & WRX_ST_EOP) == 0) {
6968 			WM_RXCHAIN_LINK(rxq, m);
6969 			DPRINTF(WM_DEBUG_RX,
6970 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
6971 			    device_xname(sc->sc_dev), rxq->rxq_len));
6972 			continue;
6973 		}
6974 
6975 		/*
6976 		 * Okay, we have the entire packet now.  The chip is
6977 		 * configured to include the FCS except I350 and I21[01]
6978 		 * (not all chips can be configured to strip it),
6979 		 * so we need to trim it.
6980 		 * May need to adjust length of previous mbuf in the
6981 		 * chain if the current mbuf is too short.
6982 		 * For an eratta, the RCTL_SECRC bit in RCTL register
6983 		 * is always set in I350, so we don't trim it.
6984 		 */
6985 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
6986 		    && (sc->sc_type != WM_T_I210)
6987 		    && (sc->sc_type != WM_T_I211)) {
6988 			if (m->m_len < ETHER_CRC_LEN) {
6989 				rxq->rxq_tail->m_len
6990 				    -= (ETHER_CRC_LEN - m->m_len);
6991 				m->m_len = 0;
6992 			} else
6993 				m->m_len -= ETHER_CRC_LEN;
6994 			len = rxq->rxq_len - ETHER_CRC_LEN;
6995 		} else
6996 			len = rxq->rxq_len;
6997 
6998 		WM_RXCHAIN_LINK(rxq, m);
6999 
7000 		*rxq->rxq_tailp = NULL;
7001 		m = rxq->rxq_head;
7002 
7003 		WM_RXCHAIN_RESET(rxq);
7004 
7005 		DPRINTF(WM_DEBUG_RX,
7006 		    ("%s: RX: have entire packet, len -> %d\n",
7007 		    device_xname(sc->sc_dev), len));
7008 
7009 		/* If an error occurred, update stats and drop the packet. */
7010 		if (errors &
7011 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
7012 			if (errors & WRX_ER_SE)
7013 				log(LOG_WARNING, "%s: symbol error\n",
7014 				    device_xname(sc->sc_dev));
7015 			else if (errors & WRX_ER_SEQ)
7016 				log(LOG_WARNING, "%s: receive sequence error\n",
7017 				    device_xname(sc->sc_dev));
7018 			else if (errors & WRX_ER_CE)
7019 				log(LOG_WARNING, "%s: CRC error\n",
7020 				    device_xname(sc->sc_dev));
7021 			m_freem(m);
7022 			continue;
7023 		}
7024 
7025 		/* No errors.  Receive the packet. */
7026 		m->m_pkthdr.rcvif = ifp;
7027 		m->m_pkthdr.len = len;
7028 
7029 		/*
7030 		 * If VLANs are enabled, VLAN packets have been unwrapped
7031 		 * for us.  Associate the tag with the packet.
7032 		 */
7033 		/* XXXX should check for i350 and i354 */
7034 		if ((status & WRX_ST_VP) != 0) {
7035 			VLAN_INPUT_TAG(ifp, m,
7036 			    le16toh(vlantag),
7037 			    continue);
7038 		}
7039 
7040 		/* Set up checksum info for this packet. */
7041 		if ((status & WRX_ST_IXSM) == 0) {
7042 			if (status & WRX_ST_IPCS) {
7043 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
7044 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
7045 				if (errors & WRX_ER_IPE)
7046 					m->m_pkthdr.csum_flags |=
7047 					    M_CSUM_IPv4_BAD;
7048 			}
7049 			if (status & WRX_ST_TCPCS) {
7050 				/*
7051 				 * Note: we don't know if this was TCP or UDP,
7052 				 * so we just set both bits, and expect the
7053 				 * upper layers to deal.
7054 				 */
7055 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
7056 				m->m_pkthdr.csum_flags |=
7057 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7058 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
7059 				if (errors & WRX_ER_TCPE)
7060 					m->m_pkthdr.csum_flags |=
7061 					    M_CSUM_TCP_UDP_BAD;
7062 			}
7063 		}
7064 
7065 		ifp->if_ipackets++;
7066 
7067 		WM_RX_UNLOCK(rxq);
7068 
7069 		/* Pass this up to any BPF listeners. */
7070 		bpf_mtap(ifp, m);
7071 
7072 		/* Pass it on. */
7073 		(*ifp->if_input)(ifp, m);
7074 
7075 		WM_RX_LOCK(rxq);
7076 
7077 		if (sc->sc_stopping)
7078 			break;
7079 	}
7080 
7081 	/* Update the receive pointer. */
7082 	rxq->rxq_ptr = i;
7083 	if (count != 0)
7084 		rnd_add_uint32(&sc->rnd_source, count);
7085 
7086 	DPRINTF(WM_DEBUG_RX,
7087 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
7088 }
7089 
7090 /*
7091  * wm_linkintr_gmii:
7092  *
7093  *	Helper; handle link interrupts for GMII.
7094  */
7095 static void
7096 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
7097 {
7098 
7099 	KASSERT(WM_CORE_LOCKED(sc));
7100 
7101 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7102 		__func__));
7103 
7104 	if (icr & ICR_LSC) {
7105 		DPRINTF(WM_DEBUG_LINK,
7106 		    ("%s: LINK: LSC -> mii_pollstat\n",
7107 			device_xname(sc->sc_dev)));
7108 		mii_pollstat(&sc->sc_mii);
7109 		if (sc->sc_type == WM_T_82543) {
7110 			int miistatus, active;
7111 
7112 			/*
7113 			 * With 82543, we need to force speed and
7114 			 * duplex on the MAC equal to what the PHY
7115 			 * speed and duplex configuration is.
7116 			 */
7117 			miistatus = sc->sc_mii.mii_media_status;
7118 
7119 			if (miistatus & IFM_ACTIVE) {
7120 				active = sc->sc_mii.mii_media_active;
7121 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7122 				switch (IFM_SUBTYPE(active)) {
7123 				case IFM_10_T:
7124 					sc->sc_ctrl |= CTRL_SPEED_10;
7125 					break;
7126 				case IFM_100_TX:
7127 					sc->sc_ctrl |= CTRL_SPEED_100;
7128 					break;
7129 				case IFM_1000_T:
7130 					sc->sc_ctrl |= CTRL_SPEED_1000;
7131 					break;
7132 				default:
7133 					/*
7134 					 * fiber?
7135 					 * Shoud not enter here.
7136 					 */
7137 					printf("unknown media (%x)\n",
7138 					    active);
7139 					break;
7140 				}
7141 				if (active & IFM_FDX)
7142 					sc->sc_ctrl |= CTRL_FD;
7143 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7144 			}
7145 		} else if ((sc->sc_type == WM_T_ICH8)
7146 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
7147 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
7148 		} else if (sc->sc_type == WM_T_PCH) {
7149 			wm_k1_gig_workaround_hv(sc,
7150 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
7151 		}
7152 
7153 		if ((sc->sc_phytype == WMPHY_82578)
7154 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
7155 			== IFM_1000_T)) {
7156 
7157 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
7158 				delay(200*1000); /* XXX too big */
7159 
7160 				/* Link stall fix for link up */
7161 				wm_gmii_hv_writereg(sc->sc_dev, 1,
7162 				    HV_MUX_DATA_CTRL,
7163 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
7164 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
7165 				wm_gmii_hv_writereg(sc->sc_dev, 1,
7166 				    HV_MUX_DATA_CTRL,
7167 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
7168 			}
7169 		}
7170 	} else if (icr & ICR_RXSEQ) {
7171 		DPRINTF(WM_DEBUG_LINK,
7172 		    ("%s: LINK Receive sequence error\n",
7173 			device_xname(sc->sc_dev)));
7174 	}
7175 }
7176 
7177 /*
7178  * wm_linkintr_tbi:
7179  *
7180  *	Helper; handle link interrupts for TBI mode.
7181  */
7182 static void
7183 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
7184 {
7185 	uint32_t status;
7186 
7187 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7188 		__func__));
7189 
7190 	status = CSR_READ(sc, WMREG_STATUS);
7191 	if (icr & ICR_LSC) {
7192 		if (status & STATUS_LU) {
7193 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
7194 			    device_xname(sc->sc_dev),
7195 			    (status & STATUS_FD) ? "FDX" : "HDX"));
7196 			/*
7197 			 * NOTE: CTRL will update TFCE and RFCE automatically,
7198 			 * so we should update sc->sc_ctrl
7199 			 */
7200 
7201 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7202 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7203 			sc->sc_fcrtl &= ~FCRTL_XONE;
7204 			if (status & STATUS_FD)
7205 				sc->sc_tctl |=
7206 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7207 			else
7208 				sc->sc_tctl |=
7209 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7210 			if (sc->sc_ctrl & CTRL_TFCE)
7211 				sc->sc_fcrtl |= FCRTL_XONE;
7212 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7213 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7214 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
7215 				      sc->sc_fcrtl);
7216 			sc->sc_tbi_linkup = 1;
7217 		} else {
7218 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
7219 			    device_xname(sc->sc_dev)));
7220 			sc->sc_tbi_linkup = 0;
7221 		}
7222 		/* Update LED */
7223 		wm_tbi_serdes_set_linkled(sc);
7224 	} else if (icr & ICR_RXSEQ) {
7225 		DPRINTF(WM_DEBUG_LINK,
7226 		    ("%s: LINK: Receive sequence error\n",
7227 		    device_xname(sc->sc_dev)));
7228 	}
7229 }
7230 
7231 /*
7232  * wm_linkintr_serdes:
7233  *
7234  *	Helper; handle link interrupts for TBI mode.
7235  */
7236 static void
7237 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
7238 {
7239 	struct mii_data *mii = &sc->sc_mii;
7240 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7241 	uint32_t pcs_adv, pcs_lpab, reg;
7242 
7243 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7244 		__func__));
7245 
7246 	if (icr & ICR_LSC) {
7247 		/* Check PCS */
7248 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
7249 		if ((reg & PCS_LSTS_LINKOK) != 0) {
7250 			mii->mii_media_status |= IFM_ACTIVE;
7251 			sc->sc_tbi_linkup = 1;
7252 		} else {
7253 			mii->mii_media_status |= IFM_NONE;
7254 			sc->sc_tbi_linkup = 0;
7255 			wm_tbi_serdes_set_linkled(sc);
7256 			return;
7257 		}
7258 		mii->mii_media_active |= IFM_1000_SX;
7259 		if ((reg & PCS_LSTS_FDX) != 0)
7260 			mii->mii_media_active |= IFM_FDX;
7261 		else
7262 			mii->mii_media_active |= IFM_HDX;
7263 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7264 			/* Check flow */
7265 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
7266 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
7267 				DPRINTF(WM_DEBUG_LINK,
7268 				    ("XXX LINKOK but not ACOMP\n"));
7269 				return;
7270 			}
7271 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
7272 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
7273 			DPRINTF(WM_DEBUG_LINK,
7274 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
7275 			if ((pcs_adv & TXCW_SYM_PAUSE)
7276 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
7277 				mii->mii_media_active |= IFM_FLOW
7278 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
7279 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
7280 			    && (pcs_adv & TXCW_ASYM_PAUSE)
7281 			    && (pcs_lpab & TXCW_SYM_PAUSE)
7282 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
7283 				mii->mii_media_active |= IFM_FLOW
7284 				    | IFM_ETH_TXPAUSE;
7285 			else if ((pcs_adv & TXCW_SYM_PAUSE)
7286 			    && (pcs_adv & TXCW_ASYM_PAUSE)
7287 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
7288 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
7289 				mii->mii_media_active |= IFM_FLOW
7290 				    | IFM_ETH_RXPAUSE;
7291 		}
7292 		/* Update LED */
7293 		wm_tbi_serdes_set_linkled(sc);
7294 	} else {
7295 		DPRINTF(WM_DEBUG_LINK,
7296 		    ("%s: LINK: Receive sequence error\n",
7297 		    device_xname(sc->sc_dev)));
7298 	}
7299 }
7300 
7301 /*
7302  * wm_linkintr:
7303  *
7304  *	Helper; handle link interrupts.
7305  */
7306 static void
7307 wm_linkintr(struct wm_softc *sc, uint32_t icr)
7308 {
7309 
7310 	KASSERT(WM_CORE_LOCKED(sc));
7311 
7312 	if (sc->sc_flags & WM_F_HAS_MII)
7313 		wm_linkintr_gmii(sc, icr);
7314 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7315 	    && (sc->sc_type >= WM_T_82575))
7316 		wm_linkintr_serdes(sc, icr);
7317 	else
7318 		wm_linkintr_tbi(sc, icr);
7319 }
7320 
7321 /*
7322  * wm_intr_legacy:
7323  *
7324  *	Interrupt service routine for INTx and MSI.
7325  */
7326 static int
7327 wm_intr_legacy(void *arg)
7328 {
7329 	struct wm_softc *sc = arg;
7330 	struct wm_txqueue *txq = &sc->sc_txq[0];
7331 	struct wm_rxqueue *rxq = &sc->sc_rxq[0];
7332 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7333 	uint32_t icr, rndval = 0;
7334 	int handled = 0;
7335 
7336 	DPRINTF(WM_DEBUG_TX,
7337 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
7338 	while (1 /* CONSTCOND */) {
7339 		icr = CSR_READ(sc, WMREG_ICR);
7340 		if ((icr & sc->sc_icr) == 0)
7341 			break;
7342 		if (rndval == 0)
7343 			rndval = icr;
7344 
7345 		WM_RX_LOCK(rxq);
7346 
7347 		if (sc->sc_stopping) {
7348 			WM_RX_UNLOCK(rxq);
7349 			break;
7350 		}
7351 
7352 		handled = 1;
7353 
7354 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7355 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
7356 			DPRINTF(WM_DEBUG_RX,
7357 			    ("%s: RX: got Rx intr 0x%08x\n",
7358 			    device_xname(sc->sc_dev),
7359 			    icr & (ICR_RXDMT0|ICR_RXT0)));
7360 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
7361 		}
7362 #endif
7363 		wm_rxeof(rxq);
7364 
7365 		WM_RX_UNLOCK(rxq);
7366 		WM_TX_LOCK(txq);
7367 
7368 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7369 		if (icr & ICR_TXDW) {
7370 			DPRINTF(WM_DEBUG_TX,
7371 			    ("%s: TX: got TXDW interrupt\n",
7372 			    device_xname(sc->sc_dev)));
7373 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
7374 		}
7375 #endif
7376 		wm_txeof(sc);
7377 
7378 		WM_TX_UNLOCK(txq);
7379 		WM_CORE_LOCK(sc);
7380 
7381 		if (icr & (ICR_LSC|ICR_RXSEQ)) {
7382 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7383 			wm_linkintr(sc, icr);
7384 		}
7385 
7386 		WM_CORE_UNLOCK(sc);
7387 
7388 		if (icr & ICR_RXO) {
7389 #if defined(WM_DEBUG)
7390 			log(LOG_WARNING, "%s: Receive overrun\n",
7391 			    device_xname(sc->sc_dev));
7392 #endif /* defined(WM_DEBUG) */
7393 		}
7394 	}
7395 
7396 	rnd_add_uint32(&sc->rnd_source, rndval);
7397 
7398 	if (handled) {
7399 		/* Try to get more packets going. */
7400 		ifp->if_start(ifp);
7401 	}
7402 
7403 	return handled;
7404 }
7405 
7406 /*
7407  * wm_txintr_msix:
7408  *
7409  *	Interrupt service routine for TX complete interrupt for MSI-X.
7410  */
7411 static int
7412 wm_txintr_msix(void *arg)
7413 {
7414 	struct wm_txqueue *txq = arg;
7415 	struct wm_softc *sc = txq->txq_sc;
7416 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7417 	int handled = 0;
7418 
7419 	DPRINTF(WM_DEBUG_TX,
7420 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
7421 
7422 	if (sc->sc_type == WM_T_82574)
7423 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(txq->txq_id)); /* 82574 only */
7424 	else if (sc->sc_type == WM_T_82575)
7425 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(txq->txq_id));
7426 	else
7427 		CSR_WRITE(sc, WMREG_EIMC, 1 << txq->txq_intr_idx);
7428 
7429 	WM_TX_LOCK(txq);
7430 
7431 	if (sc->sc_stopping)
7432 		goto out;
7433 
7434 	WM_EVCNT_INCR(&sc->sc_ev_txdw);
7435 	handled = wm_txeof(sc);
7436 
7437 out:
7438 	WM_TX_UNLOCK(txq);
7439 
7440 	if (sc->sc_type == WM_T_82574)
7441 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(txq->txq_id)); /* 82574 only */
7442 	else if (sc->sc_type == WM_T_82575)
7443 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(txq->txq_id));
7444 	else
7445 		CSR_WRITE(sc, WMREG_EIMS, 1 << txq->txq_intr_idx);
7446 
7447 	if (handled) {
7448 		/* Try to get more packets going. */
7449 		ifp->if_start(ifp);
7450 	}
7451 
7452 	return handled;
7453 }
7454 
7455 /*
7456  * wm_rxintr_msix:
7457  *
7458  *	Interrupt service routine for RX interrupt for MSI-X.
7459  */
7460 static int
7461 wm_rxintr_msix(void *arg)
7462 {
7463 	struct wm_rxqueue *rxq = arg;
7464 	struct wm_softc *sc = rxq->rxq_sc;
7465 
7466 	DPRINTF(WM_DEBUG_RX,
7467 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
7468 
7469 	if (sc->sc_type == WM_T_82574)
7470 		CSR_WRITE(sc, WMREG_IMC, ICR_RXQ(rxq->rxq_id)); /* 82574 only */
7471 	else if (sc->sc_type == WM_T_82575)
7472 		CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE(rxq->rxq_id));
7473 	else
7474 		CSR_WRITE(sc, WMREG_EIMC, 1 << rxq->rxq_intr_idx);
7475 
7476 	WM_RX_LOCK(rxq);
7477 
7478 	if (sc->sc_stopping)
7479 		goto out;
7480 
7481 	WM_EVCNT_INCR(&sc->sc_ev_rxintr);
7482 	wm_rxeof(rxq);
7483 
7484 out:
7485 	WM_RX_UNLOCK(rxq);
7486 
7487 	if (sc->sc_type == WM_T_82574)
7488 		CSR_WRITE(sc, WMREG_IMS, ICR_RXQ(rxq->rxq_id));
7489 	else if (sc->sc_type == WM_T_82575)
7490 		CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE(rxq->rxq_id));
7491 	else
7492 		CSR_WRITE(sc, WMREG_EIMS, 1 << rxq->rxq_intr_idx);
7493 
7494 	return 1;
7495 }
7496 
7497 /*
7498  * wm_linkintr_msix:
7499  *
7500  *	Interrupt service routine for link status change for MSI-X.
7501  */
7502 static int
7503 wm_linkintr_msix(void *arg)
7504 {
7505 	struct wm_softc *sc = arg;
7506 	uint32_t reg;
7507 
7508 	DPRINTF(WM_DEBUG_LINK,
7509 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
7510 
7511 	reg = CSR_READ(sc, WMREG_ICR);
7512 	WM_CORE_LOCK(sc);
7513 	if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
7514 		goto out;
7515 
7516 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7517 	wm_linkintr(sc, ICR_LSC);
7518 
7519 out:
7520 	WM_CORE_UNLOCK(sc);
7521 
7522 	if (sc->sc_type == WM_T_82574)
7523 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); /* 82574 only */
7524 	else if (sc->sc_type == WM_T_82575)
7525 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
7526 	else
7527 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
7528 
7529 	return 1;
7530 }
7531 
7532 /*
7533  * Media related.
7534  * GMII, SGMII, TBI (and SERDES)
7535  */
7536 
7537 /* Common */
7538 
7539 /*
7540  * wm_tbi_serdes_set_linkled:
7541  *
7542  *	Update the link LED on TBI and SERDES devices.
7543  */
7544 static void
7545 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
7546 {
7547 
7548 	if (sc->sc_tbi_linkup)
7549 		sc->sc_ctrl |= CTRL_SWDPIN(0);
7550 	else
7551 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7552 
7553 	/* 82540 or newer devices are active low */
7554 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7555 
7556 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7557 }
7558 
7559 /* GMII related */
7560 
7561 /*
7562  * wm_gmii_reset:
7563  *
7564  *	Reset the PHY.
7565  */
7566 static void
7567 wm_gmii_reset(struct wm_softc *sc)
7568 {
7569 	uint32_t reg;
7570 	int rv;
7571 
7572 	/* get phy semaphore */
7573 	switch (sc->sc_type) {
7574 	case WM_T_82571:
7575 	case WM_T_82572:
7576 	case WM_T_82573:
7577 	case WM_T_82574:
7578 	case WM_T_82583:
7579 		 /* XXX should get sw semaphore, too */
7580 		rv = wm_get_swsm_semaphore(sc);
7581 		break;
7582 	case WM_T_82575:
7583 	case WM_T_82576:
7584 	case WM_T_82580:
7585 	case WM_T_I350:
7586 	case WM_T_I354:
7587 	case WM_T_I210:
7588 	case WM_T_I211:
7589 	case WM_T_80003:
7590 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7591 		break;
7592 	case WM_T_ICH8:
7593 	case WM_T_ICH9:
7594 	case WM_T_ICH10:
7595 	case WM_T_PCH:
7596 	case WM_T_PCH2:
7597 	case WM_T_PCH_LPT:
7598 		rv = wm_get_swfwhw_semaphore(sc);
7599 		break;
7600 	default:
7601 		/* nothing to do*/
7602 		rv = 0;
7603 		break;
7604 	}
7605 	if (rv != 0) {
7606 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7607 		    __func__);
7608 		return;
7609 	}
7610 
7611 	switch (sc->sc_type) {
7612 	case WM_T_82542_2_0:
7613 	case WM_T_82542_2_1:
7614 		/* null */
7615 		break;
7616 	case WM_T_82543:
7617 		/*
7618 		 * With 82543, we need to force speed and duplex on the MAC
7619 		 * equal to what the PHY speed and duplex configuration is.
7620 		 * In addition, we need to perform a hardware reset on the PHY
7621 		 * to take it out of reset.
7622 		 */
7623 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
7624 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7625 
7626 		/* The PHY reset pin is active-low. */
7627 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7628 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
7629 		    CTRL_EXT_SWDPIN(4));
7630 		reg |= CTRL_EXT_SWDPIO(4);
7631 
7632 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7633 		CSR_WRITE_FLUSH(sc);
7634 		delay(10*1000);
7635 
7636 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
7637 		CSR_WRITE_FLUSH(sc);
7638 		delay(150);
7639 #if 0
7640 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
7641 #endif
7642 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
7643 		break;
7644 	case WM_T_82544:	/* reset 10000us */
7645 	case WM_T_82540:
7646 	case WM_T_82545:
7647 	case WM_T_82545_3:
7648 	case WM_T_82546:
7649 	case WM_T_82546_3:
7650 	case WM_T_82541:
7651 	case WM_T_82541_2:
7652 	case WM_T_82547:
7653 	case WM_T_82547_2:
7654 	case WM_T_82571:	/* reset 100us */
7655 	case WM_T_82572:
7656 	case WM_T_82573:
7657 	case WM_T_82574:
7658 	case WM_T_82575:
7659 	case WM_T_82576:
7660 	case WM_T_82580:
7661 	case WM_T_I350:
7662 	case WM_T_I354:
7663 	case WM_T_I210:
7664 	case WM_T_I211:
7665 	case WM_T_82583:
7666 	case WM_T_80003:
7667 		/* generic reset */
7668 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7669 		CSR_WRITE_FLUSH(sc);
7670 		delay(20000);
7671 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7672 		CSR_WRITE_FLUSH(sc);
7673 		delay(20000);
7674 
7675 		if ((sc->sc_type == WM_T_82541)
7676 		    || (sc->sc_type == WM_T_82541_2)
7677 		    || (sc->sc_type == WM_T_82547)
7678 		    || (sc->sc_type == WM_T_82547_2)) {
7679 			/* workaround for igp are done in igp_reset() */
7680 			/* XXX add code to set LED after phy reset */
7681 		}
7682 		break;
7683 	case WM_T_ICH8:
7684 	case WM_T_ICH9:
7685 	case WM_T_ICH10:
7686 	case WM_T_PCH:
7687 	case WM_T_PCH2:
7688 	case WM_T_PCH_LPT:
7689 		/* generic reset */
7690 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7691 		CSR_WRITE_FLUSH(sc);
7692 		delay(100);
7693 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7694 		CSR_WRITE_FLUSH(sc);
7695 		delay(150);
7696 		break;
7697 	default:
7698 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
7699 		    __func__);
7700 		break;
7701 	}
7702 
7703 	/* release PHY semaphore */
7704 	switch (sc->sc_type) {
7705 	case WM_T_82571:
7706 	case WM_T_82572:
7707 	case WM_T_82573:
7708 	case WM_T_82574:
7709 	case WM_T_82583:
7710 		 /* XXX should put sw semaphore, too */
7711 		wm_put_swsm_semaphore(sc);
7712 		break;
7713 	case WM_T_82575:
7714 	case WM_T_82576:
7715 	case WM_T_82580:
7716 	case WM_T_I350:
7717 	case WM_T_I354:
7718 	case WM_T_I210:
7719 	case WM_T_I211:
7720 	case WM_T_80003:
7721 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7722 		break;
7723 	case WM_T_ICH8:
7724 	case WM_T_ICH9:
7725 	case WM_T_ICH10:
7726 	case WM_T_PCH:
7727 	case WM_T_PCH2:
7728 	case WM_T_PCH_LPT:
7729 		wm_put_swfwhw_semaphore(sc);
7730 		break;
7731 	default:
7732 		/* nothing to do*/
7733 		rv = 0;
7734 		break;
7735 	}
7736 
7737 	/* get_cfg_done */
7738 	wm_get_cfg_done(sc);
7739 
7740 	/* extra setup */
7741 	switch (sc->sc_type) {
7742 	case WM_T_82542_2_0:
7743 	case WM_T_82542_2_1:
7744 	case WM_T_82543:
7745 	case WM_T_82544:
7746 	case WM_T_82540:
7747 	case WM_T_82545:
7748 	case WM_T_82545_3:
7749 	case WM_T_82546:
7750 	case WM_T_82546_3:
7751 	case WM_T_82541_2:
7752 	case WM_T_82547_2:
7753 	case WM_T_82571:
7754 	case WM_T_82572:
7755 	case WM_T_82573:
7756 	case WM_T_82574:
7757 	case WM_T_82575:
7758 	case WM_T_82576:
7759 	case WM_T_82580:
7760 	case WM_T_I350:
7761 	case WM_T_I354:
7762 	case WM_T_I210:
7763 	case WM_T_I211:
7764 	case WM_T_82583:
7765 	case WM_T_80003:
7766 		/* null */
7767 		break;
7768 	case WM_T_82541:
7769 	case WM_T_82547:
7770 		/* XXX Configure actively LED after PHY reset */
7771 		break;
7772 	case WM_T_ICH8:
7773 	case WM_T_ICH9:
7774 	case WM_T_ICH10:
7775 	case WM_T_PCH:
7776 	case WM_T_PCH2:
7777 	case WM_T_PCH_LPT:
7778 		/* Allow time for h/w to get to a quiescent state afer reset */
7779 		delay(10*1000);
7780 
7781 		if (sc->sc_type == WM_T_PCH)
7782 			wm_hv_phy_workaround_ich8lan(sc);
7783 
7784 		if (sc->sc_type == WM_T_PCH2)
7785 			wm_lv_phy_workaround_ich8lan(sc);
7786 
7787 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
7788 			/*
7789 			 * dummy read to clear the phy wakeup bit after lcd
7790 			 * reset
7791 			 */
7792 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
7793 		}
7794 
7795 		/*
7796 		 * XXX Configure the LCD with th extended configuration region
7797 		 * in NVM
7798 		 */
7799 
7800 		/* Configure the LCD with the OEM bits in NVM */
7801 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
7802 		    || (sc->sc_type == WM_T_PCH_LPT)) {
7803 			/*
7804 			 * Disable LPLU.
7805 			 * XXX It seems that 82567 has LPLU, too.
7806 			 */
7807 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
7808 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
7809 			reg |= HV_OEM_BITS_ANEGNOW;
7810 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
7811 		}
7812 		break;
7813 	default:
7814 		panic("%s: unknown type\n", __func__);
7815 		break;
7816 	}
7817 }
7818 
7819 /*
7820  * wm_get_phy_id_82575:
7821  *
7822  * Return PHY ID. Return -1 if it failed.
7823  */
7824 static int
7825 wm_get_phy_id_82575(struct wm_softc *sc)
7826 {
7827 	uint32_t reg;
7828 	int phyid = -1;
7829 
7830 	/* XXX */
7831 	if ((sc->sc_flags & WM_F_SGMII) == 0)
7832 		return -1;
7833 
7834 	if (wm_sgmii_uses_mdio(sc)) {
7835 		switch (sc->sc_type) {
7836 		case WM_T_82575:
7837 		case WM_T_82576:
7838 			reg = CSR_READ(sc, WMREG_MDIC);
7839 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
7840 			break;
7841 		case WM_T_82580:
7842 		case WM_T_I350:
7843 		case WM_T_I354:
7844 		case WM_T_I210:
7845 		case WM_T_I211:
7846 			reg = CSR_READ(sc, WMREG_MDICNFG);
7847 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
7848 			break;
7849 		default:
7850 			return -1;
7851 		}
7852 	}
7853 
7854 	return phyid;
7855 }
7856 
7857 
7858 /*
7859  * wm_gmii_mediainit:
7860  *
7861  *	Initialize media for use on 1000BASE-T devices.
7862  */
7863 static void
7864 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
7865 {
7866 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7867 	struct mii_data *mii = &sc->sc_mii;
7868 	uint32_t reg;
7869 
7870 	/* We have GMII. */
7871 	sc->sc_flags |= WM_F_HAS_MII;
7872 
7873 	if (sc->sc_type == WM_T_80003)
7874 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
7875 	else
7876 		sc->sc_tipg = TIPG_1000T_DFLT;
7877 
7878 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
7879 	if ((sc->sc_type == WM_T_82580)
7880 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
7881 	    || (sc->sc_type == WM_T_I211)) {
7882 		reg = CSR_READ(sc, WMREG_PHPM);
7883 		reg &= ~PHPM_GO_LINK_D;
7884 		CSR_WRITE(sc, WMREG_PHPM, reg);
7885 	}
7886 
7887 	/*
7888 	 * Let the chip set speed/duplex on its own based on
7889 	 * signals from the PHY.
7890 	 * XXXbouyer - I'm not sure this is right for the 80003,
7891 	 * the em driver only sets CTRL_SLU here - but it seems to work.
7892 	 */
7893 	sc->sc_ctrl |= CTRL_SLU;
7894 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7895 
7896 	/* Initialize our media structures and probe the GMII. */
7897 	mii->mii_ifp = ifp;
7898 
7899 	/*
7900 	 * Determine the PHY access method.
7901 	 *
7902 	 *  For SGMII, use SGMII specific method.
7903 	 *
7904 	 *  For some devices, we can determine the PHY access method
7905 	 * from sc_type.
7906 	 *
7907 	 *  For ICH and PCH variants, it's difficult to determine the PHY
7908 	 * access  method by sc_type, so use the PCI product ID for some
7909 	 * devices.
7910 	 * For other ICH8 variants, try to use igp's method. If the PHY
7911 	 * can't detect, then use bm's method.
7912 	 */
7913 	switch (prodid) {
7914 	case PCI_PRODUCT_INTEL_PCH_M_LM:
7915 	case PCI_PRODUCT_INTEL_PCH_M_LC:
7916 		/* 82577 */
7917 		sc->sc_phytype = WMPHY_82577;
7918 		break;
7919 	case PCI_PRODUCT_INTEL_PCH_D_DM:
7920 	case PCI_PRODUCT_INTEL_PCH_D_DC:
7921 		/* 82578 */
7922 		sc->sc_phytype = WMPHY_82578;
7923 		break;
7924 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
7925 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
7926 		/* 82579 */
7927 		sc->sc_phytype = WMPHY_82579;
7928 		break;
7929 	case PCI_PRODUCT_INTEL_82801I_BM:
7930 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
7931 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
7932 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
7933 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
7934 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
7935 		/* 82567 */
7936 		sc->sc_phytype = WMPHY_BM;
7937 		mii->mii_readreg = wm_gmii_bm_readreg;
7938 		mii->mii_writereg = wm_gmii_bm_writereg;
7939 		break;
7940 	default:
7941 		if (((sc->sc_flags & WM_F_SGMII) != 0)
7942 		    && !wm_sgmii_uses_mdio(sc)){
7943 			/* SGMII */
7944 			mii->mii_readreg = wm_sgmii_readreg;
7945 			mii->mii_writereg = wm_sgmii_writereg;
7946 		} else if (sc->sc_type >= WM_T_80003) {
7947 			/* 80003 */
7948 			mii->mii_readreg = wm_gmii_i80003_readreg;
7949 			mii->mii_writereg = wm_gmii_i80003_writereg;
7950 		} else if (sc->sc_type >= WM_T_I210) {
7951 			/* I210 and I211 */
7952 			mii->mii_readreg = wm_gmii_gs40g_readreg;
7953 			mii->mii_writereg = wm_gmii_gs40g_writereg;
7954 		} else if (sc->sc_type >= WM_T_82580) {
7955 			/* 82580, I350 and I354 */
7956 			sc->sc_phytype = WMPHY_82580;
7957 			mii->mii_readreg = wm_gmii_82580_readreg;
7958 			mii->mii_writereg = wm_gmii_82580_writereg;
7959 		} else if (sc->sc_type >= WM_T_82544) {
7960 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
7961 			mii->mii_readreg = wm_gmii_i82544_readreg;
7962 			mii->mii_writereg = wm_gmii_i82544_writereg;
7963 		} else {
7964 			mii->mii_readreg = wm_gmii_i82543_readreg;
7965 			mii->mii_writereg = wm_gmii_i82543_writereg;
7966 		}
7967 		break;
7968 	}
7969 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
7970 		/* All PCH* use _hv_ */
7971 		mii->mii_readreg = wm_gmii_hv_readreg;
7972 		mii->mii_writereg = wm_gmii_hv_writereg;
7973 	}
7974 	mii->mii_statchg = wm_gmii_statchg;
7975 
7976 	wm_gmii_reset(sc);
7977 
7978 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
7979 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
7980 	    wm_gmii_mediastatus);
7981 
7982 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
7983 	    || (sc->sc_type == WM_T_82580)
7984 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
7985 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
7986 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
7987 			/* Attach only one port */
7988 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
7989 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
7990 		} else {
7991 			int i, id;
7992 			uint32_t ctrl_ext;
7993 
7994 			id = wm_get_phy_id_82575(sc);
7995 			if (id != -1) {
7996 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
7997 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
7998 			}
7999 			if ((id == -1)
8000 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
8001 				/* Power on sgmii phy if it is disabled */
8002 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8003 				CSR_WRITE(sc, WMREG_CTRL_EXT,
8004 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
8005 				CSR_WRITE_FLUSH(sc);
8006 				delay(300*1000); /* XXX too long */
8007 
8008 				/* from 1 to 8 */
8009 				for (i = 1; i < 8; i++)
8010 					mii_attach(sc->sc_dev, &sc->sc_mii,
8011 					    0xffffffff, i, MII_OFFSET_ANY,
8012 					    MIIF_DOPAUSE);
8013 
8014 				/* restore previous sfp cage power state */
8015 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8016 			}
8017 		}
8018 	} else {
8019 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8020 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
8021 	}
8022 
8023 	/*
8024 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
8025 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
8026 	 */
8027 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
8028 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
8029 		wm_set_mdio_slow_mode_hv(sc);
8030 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8031 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
8032 	}
8033 
8034 	/*
8035 	 * (For ICH8 variants)
8036 	 * If PHY detection failed, use BM's r/w function and retry.
8037 	 */
8038 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
8039 		/* if failed, retry with *_bm_* */
8040 		mii->mii_readreg = wm_gmii_bm_readreg;
8041 		mii->mii_writereg = wm_gmii_bm_writereg;
8042 
8043 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8044 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
8045 	}
8046 
8047 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
8048 		/* Any PHY wasn't find */
8049 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
8050 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
8051 		sc->sc_phytype = WMPHY_NONE;
8052 	} else {
8053 		/*
8054 		 * PHY Found!
8055 		 * Check PHY type.
8056 		 */
8057 		uint32_t model;
8058 		struct mii_softc *child;
8059 
8060 		child = LIST_FIRST(&mii->mii_phys);
8061 		if (device_is_a(child->mii_dev, "igphy")) {
8062 			struct igphy_softc *isc = (struct igphy_softc *)child;
8063 
8064 			model = isc->sc_mii.mii_mpd_model;
8065 			if (model == MII_MODEL_yyINTEL_I82566)
8066 				sc->sc_phytype = WMPHY_IGP_3;
8067 		}
8068 
8069 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
8070 	}
8071 }
8072 
8073 /*
8074  * wm_gmii_mediachange:	[ifmedia interface function]
8075  *
8076  *	Set hardware to newly-selected media on a 1000BASE-T device.
8077  */
8078 static int
8079 wm_gmii_mediachange(struct ifnet *ifp)
8080 {
8081 	struct wm_softc *sc = ifp->if_softc;
8082 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8083 	int rc;
8084 
8085 	if ((ifp->if_flags & IFF_UP) == 0)
8086 		return 0;
8087 
8088 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
8089 	sc->sc_ctrl |= CTRL_SLU;
8090 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8091 	    || (sc->sc_type > WM_T_82543)) {
8092 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
8093 	} else {
8094 		sc->sc_ctrl &= ~CTRL_ASDE;
8095 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
8096 		if (ife->ifm_media & IFM_FDX)
8097 			sc->sc_ctrl |= CTRL_FD;
8098 		switch (IFM_SUBTYPE(ife->ifm_media)) {
8099 		case IFM_10_T:
8100 			sc->sc_ctrl |= CTRL_SPEED_10;
8101 			break;
8102 		case IFM_100_TX:
8103 			sc->sc_ctrl |= CTRL_SPEED_100;
8104 			break;
8105 		case IFM_1000_T:
8106 			sc->sc_ctrl |= CTRL_SPEED_1000;
8107 			break;
8108 		default:
8109 			panic("wm_gmii_mediachange: bad media 0x%x",
8110 			    ife->ifm_media);
8111 		}
8112 	}
8113 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8114 	if (sc->sc_type <= WM_T_82543)
8115 		wm_gmii_reset(sc);
8116 
8117 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
8118 		return 0;
8119 	return rc;
8120 }
8121 
8122 /*
8123  * wm_gmii_mediastatus:	[ifmedia interface function]
8124  *
8125  *	Get the current interface media status on a 1000BASE-T device.
8126  */
8127 static void
8128 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8129 {
8130 	struct wm_softc *sc = ifp->if_softc;
8131 
8132 	ether_mediastatus(ifp, ifmr);
8133 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8134 	    | sc->sc_flowflags;
8135 }
8136 
8137 #define	MDI_IO		CTRL_SWDPIN(2)
8138 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
8139 #define	MDI_CLK		CTRL_SWDPIN(3)
8140 
8141 static void
8142 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
8143 {
8144 	uint32_t i, v;
8145 
8146 	v = CSR_READ(sc, WMREG_CTRL);
8147 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8148 	v |= MDI_DIR | CTRL_SWDPIO(3);
8149 
8150 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
8151 		if (data & i)
8152 			v |= MDI_IO;
8153 		else
8154 			v &= ~MDI_IO;
8155 		CSR_WRITE(sc, WMREG_CTRL, v);
8156 		CSR_WRITE_FLUSH(sc);
8157 		delay(10);
8158 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8159 		CSR_WRITE_FLUSH(sc);
8160 		delay(10);
8161 		CSR_WRITE(sc, WMREG_CTRL, v);
8162 		CSR_WRITE_FLUSH(sc);
8163 		delay(10);
8164 	}
8165 }
8166 
8167 static uint32_t
8168 wm_i82543_mii_recvbits(struct wm_softc *sc)
8169 {
8170 	uint32_t v, i, data = 0;
8171 
8172 	v = CSR_READ(sc, WMREG_CTRL);
8173 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8174 	v |= CTRL_SWDPIO(3);
8175 
8176 	CSR_WRITE(sc, WMREG_CTRL, v);
8177 	CSR_WRITE_FLUSH(sc);
8178 	delay(10);
8179 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8180 	CSR_WRITE_FLUSH(sc);
8181 	delay(10);
8182 	CSR_WRITE(sc, WMREG_CTRL, v);
8183 	CSR_WRITE_FLUSH(sc);
8184 	delay(10);
8185 
8186 	for (i = 0; i < 16; i++) {
8187 		data <<= 1;
8188 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8189 		CSR_WRITE_FLUSH(sc);
8190 		delay(10);
8191 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
8192 			data |= 1;
8193 		CSR_WRITE(sc, WMREG_CTRL, v);
8194 		CSR_WRITE_FLUSH(sc);
8195 		delay(10);
8196 	}
8197 
8198 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8199 	CSR_WRITE_FLUSH(sc);
8200 	delay(10);
8201 	CSR_WRITE(sc, WMREG_CTRL, v);
8202 	CSR_WRITE_FLUSH(sc);
8203 	delay(10);
8204 
8205 	return data;
8206 }
8207 
8208 #undef MDI_IO
8209 #undef MDI_DIR
8210 #undef MDI_CLK
8211 
8212 /*
8213  * wm_gmii_i82543_readreg:	[mii interface function]
8214  *
8215  *	Read a PHY register on the GMII (i82543 version).
8216  */
8217 static int
8218 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
8219 {
8220 	struct wm_softc *sc = device_private(self);
8221 	int rv;
8222 
8223 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8224 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
8225 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
8226 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
8227 
8228 	DPRINTF(WM_DEBUG_GMII,
8229 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
8230 	    device_xname(sc->sc_dev), phy, reg, rv));
8231 
8232 	return rv;
8233 }
8234 
8235 /*
8236  * wm_gmii_i82543_writereg:	[mii interface function]
8237  *
8238  *	Write a PHY register on the GMII (i82543 version).
8239  */
8240 static void
8241 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
8242 {
8243 	struct wm_softc *sc = device_private(self);
8244 
8245 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8246 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
8247 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
8248 	    (MII_COMMAND_START << 30), 32);
8249 }
8250 
8251 /*
8252  * wm_gmii_i82544_readreg:	[mii interface function]
8253  *
8254  *	Read a PHY register on the GMII.
8255  */
8256 static int
8257 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
8258 {
8259 	struct wm_softc *sc = device_private(self);
8260 	uint32_t mdic = 0;
8261 	int i, rv;
8262 
8263 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
8264 	    MDIC_REGADD(reg));
8265 
8266 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8267 		mdic = CSR_READ(sc, WMREG_MDIC);
8268 		if (mdic & MDIC_READY)
8269 			break;
8270 		delay(50);
8271 	}
8272 
8273 	if ((mdic & MDIC_READY) == 0) {
8274 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
8275 		    device_xname(sc->sc_dev), phy, reg);
8276 		rv = 0;
8277 	} else if (mdic & MDIC_E) {
8278 #if 0 /* This is normal if no PHY is present. */
8279 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
8280 		    device_xname(sc->sc_dev), phy, reg);
8281 #endif
8282 		rv = 0;
8283 	} else {
8284 		rv = MDIC_DATA(mdic);
8285 		if (rv == 0xffff)
8286 			rv = 0;
8287 	}
8288 
8289 	return rv;
8290 }
8291 
8292 /*
8293  * wm_gmii_i82544_writereg:	[mii interface function]
8294  *
8295  *	Write a PHY register on the GMII.
8296  */
8297 static void
8298 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
8299 {
8300 	struct wm_softc *sc = device_private(self);
8301 	uint32_t mdic = 0;
8302 	int i;
8303 
8304 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
8305 	    MDIC_REGADD(reg) | MDIC_DATA(val));
8306 
8307 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8308 		mdic = CSR_READ(sc, WMREG_MDIC);
8309 		if (mdic & MDIC_READY)
8310 			break;
8311 		delay(50);
8312 	}
8313 
8314 	if ((mdic & MDIC_READY) == 0)
8315 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
8316 		    device_xname(sc->sc_dev), phy, reg);
8317 	else if (mdic & MDIC_E)
8318 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
8319 		    device_xname(sc->sc_dev), phy, reg);
8320 }
8321 
8322 /*
8323  * wm_gmii_i80003_readreg:	[mii interface function]
8324  *
8325  *	Read a PHY register on the kumeran
8326  * This could be handled by the PHY layer if we didn't have to lock the
8327  * ressource ...
8328  */
8329 static int
8330 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
8331 {
8332 	struct wm_softc *sc = device_private(self);
8333 	int sem;
8334 	int rv;
8335 
8336 	if (phy != 1) /* only one PHY on kumeran bus */
8337 		return 0;
8338 
8339 	sem = swfwphysem[sc->sc_funcid];
8340 	if (wm_get_swfw_semaphore(sc, sem)) {
8341 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8342 		    __func__);
8343 		return 0;
8344 	}
8345 
8346 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8347 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8348 		    reg >> GG82563_PAGE_SHIFT);
8349 	} else {
8350 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8351 		    reg >> GG82563_PAGE_SHIFT);
8352 	}
8353 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
8354 	delay(200);
8355 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8356 	delay(200);
8357 
8358 	wm_put_swfw_semaphore(sc, sem);
8359 	return rv;
8360 }
8361 
8362 /*
8363  * wm_gmii_i80003_writereg:	[mii interface function]
8364  *
8365  *	Write a PHY register on the kumeran.
8366  * This could be handled by the PHY layer if we didn't have to lock the
8367  * ressource ...
8368  */
8369 static void
8370 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
8371 {
8372 	struct wm_softc *sc = device_private(self);
8373 	int sem;
8374 
8375 	if (phy != 1) /* only one PHY on kumeran bus */
8376 		return;
8377 
8378 	sem = swfwphysem[sc->sc_funcid];
8379 	if (wm_get_swfw_semaphore(sc, sem)) {
8380 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8381 		    __func__);
8382 		return;
8383 	}
8384 
8385 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8386 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8387 		    reg >> GG82563_PAGE_SHIFT);
8388 	} else {
8389 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8390 		    reg >> GG82563_PAGE_SHIFT);
8391 	}
8392 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
8393 	delay(200);
8394 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8395 	delay(200);
8396 
8397 	wm_put_swfw_semaphore(sc, sem);
8398 }
8399 
8400 /*
8401  * wm_gmii_bm_readreg:	[mii interface function]
8402  *
8403  *	Read a PHY register on the kumeran
8404  * This could be handled by the PHY layer if we didn't have to lock the
8405  * ressource ...
8406  */
8407 static int
8408 wm_gmii_bm_readreg(device_t self, int phy, int reg)
8409 {
8410 	struct wm_softc *sc = device_private(self);
8411 	int sem;
8412 	int rv;
8413 
8414 	sem = swfwphysem[sc->sc_funcid];
8415 	if (wm_get_swfw_semaphore(sc, sem)) {
8416 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8417 		    __func__);
8418 		return 0;
8419 	}
8420 
8421 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8422 		if (phy == 1)
8423 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
8424 			    reg);
8425 		else
8426 			wm_gmii_i82544_writereg(self, phy,
8427 			    GG82563_PHY_PAGE_SELECT,
8428 			    reg >> GG82563_PAGE_SHIFT);
8429 	}
8430 
8431 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8432 	wm_put_swfw_semaphore(sc, sem);
8433 	return rv;
8434 }
8435 
8436 /*
8437  * wm_gmii_bm_writereg:	[mii interface function]
8438  *
8439  *	Write a PHY register on the kumeran.
8440  * This could be handled by the PHY layer if we didn't have to lock the
8441  * ressource ...
8442  */
8443 static void
8444 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
8445 {
8446 	struct wm_softc *sc = device_private(self);
8447 	int sem;
8448 
8449 	sem = swfwphysem[sc->sc_funcid];
8450 	if (wm_get_swfw_semaphore(sc, sem)) {
8451 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8452 		    __func__);
8453 		return;
8454 	}
8455 
8456 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8457 		if (phy == 1)
8458 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
8459 			    reg);
8460 		else
8461 			wm_gmii_i82544_writereg(self, phy,
8462 			    GG82563_PHY_PAGE_SELECT,
8463 			    reg >> GG82563_PAGE_SHIFT);
8464 	}
8465 
8466 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8467 	wm_put_swfw_semaphore(sc, sem);
8468 }
8469 
8470 static void
8471 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
8472 {
8473 	struct wm_softc *sc = device_private(self);
8474 	uint16_t regnum = BM_PHY_REG_NUM(offset);
8475 	uint16_t wuce;
8476 
8477 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
8478 	if (sc->sc_type == WM_T_PCH) {
8479 		/* XXX e1000 driver do nothing... why? */
8480 	}
8481 
8482 	/* Set page 769 */
8483 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8484 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8485 
8486 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
8487 
8488 	wuce &= ~BM_WUC_HOST_WU_BIT;
8489 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
8490 	    wuce | BM_WUC_ENABLE_BIT);
8491 
8492 	/* Select page 800 */
8493 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8494 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
8495 
8496 	/* Write page 800 */
8497 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
8498 
8499 	if (rd)
8500 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
8501 	else
8502 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
8503 
8504 	/* Set page 769 */
8505 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8506 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8507 
8508 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
8509 }
8510 
8511 /*
8512  * wm_gmii_hv_readreg:	[mii interface function]
8513  *
8514  *	Read a PHY register on the kumeran
8515  * This could be handled by the PHY layer if we didn't have to lock the
8516  * ressource ...
8517  */
8518 static int
8519 wm_gmii_hv_readreg(device_t self, int phy, int reg)
8520 {
8521 	struct wm_softc *sc = device_private(self);
8522 	uint16_t page = BM_PHY_REG_PAGE(reg);
8523 	uint16_t regnum = BM_PHY_REG_NUM(reg);
8524 	uint16_t val;
8525 	int rv;
8526 
8527 	if (wm_get_swfwhw_semaphore(sc)) {
8528 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8529 		    __func__);
8530 		return 0;
8531 	}
8532 
8533 	/* XXX Workaround failure in MDIO access while cable is disconnected */
8534 	if (sc->sc_phytype == WMPHY_82577) {
8535 		/* XXX must write */
8536 	}
8537 
8538 	/* Page 800 works differently than the rest so it has its own func */
8539 	if (page == BM_WUC_PAGE) {
8540 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
8541 		return val;
8542 	}
8543 
8544 	/*
8545 	 * Lower than page 768 works differently than the rest so it has its
8546 	 * own func
8547 	 */
8548 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8549 		printf("gmii_hv_readreg!!!\n");
8550 		return 0;
8551 	}
8552 
8553 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8554 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8555 		    page << BME1000_PAGE_SHIFT);
8556 	}
8557 
8558 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
8559 	wm_put_swfwhw_semaphore(sc);
8560 	return rv;
8561 }
8562 
8563 /*
8564  * wm_gmii_hv_writereg:	[mii interface function]
8565  *
8566  *	Write a PHY register on the kumeran.
8567  * This could be handled by the PHY layer if we didn't have to lock the
8568  * ressource ...
8569  */
8570 static void
8571 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
8572 {
8573 	struct wm_softc *sc = device_private(self);
8574 	uint16_t page = BM_PHY_REG_PAGE(reg);
8575 	uint16_t regnum = BM_PHY_REG_NUM(reg);
8576 
8577 	if (wm_get_swfwhw_semaphore(sc)) {
8578 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8579 		    __func__);
8580 		return;
8581 	}
8582 
8583 	/* XXX Workaround failure in MDIO access while cable is disconnected */
8584 
8585 	/* Page 800 works differently than the rest so it has its own func */
8586 	if (page == BM_WUC_PAGE) {
8587 		uint16_t tmp;
8588 
8589 		tmp = val;
8590 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
8591 		return;
8592 	}
8593 
8594 	/*
8595 	 * Lower than page 768 works differently than the rest so it has its
8596 	 * own func
8597 	 */
8598 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8599 		printf("gmii_hv_writereg!!!\n");
8600 		return;
8601 	}
8602 
8603 	/*
8604 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
8605 	 * Power Down (whenever bit 11 of the PHY control register is set)
8606 	 */
8607 
8608 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8609 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8610 		    page << BME1000_PAGE_SHIFT);
8611 	}
8612 
8613 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
8614 	wm_put_swfwhw_semaphore(sc);
8615 }
8616 
8617 /*
8618  * wm_gmii_82580_readreg:	[mii interface function]
8619  *
8620  *	Read a PHY register on the 82580 and I350.
8621  * This could be handled by the PHY layer if we didn't have to lock the
8622  * ressource ...
8623  */
8624 static int
8625 wm_gmii_82580_readreg(device_t self, int phy, int reg)
8626 {
8627 	struct wm_softc *sc = device_private(self);
8628 	int sem;
8629 	int rv;
8630 
8631 	sem = swfwphysem[sc->sc_funcid];
8632 	if (wm_get_swfw_semaphore(sc, sem)) {
8633 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8634 		    __func__);
8635 		return 0;
8636 	}
8637 
8638 	rv = wm_gmii_i82544_readreg(self, phy, reg);
8639 
8640 	wm_put_swfw_semaphore(sc, sem);
8641 	return rv;
8642 }
8643 
8644 /*
8645  * wm_gmii_82580_writereg:	[mii interface function]
8646  *
8647  *	Write a PHY register on the 82580 and I350.
8648  * This could be handled by the PHY layer if we didn't have to lock the
8649  * ressource ...
8650  */
8651 static void
8652 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
8653 {
8654 	struct wm_softc *sc = device_private(self);
8655 	int sem;
8656 
8657 	sem = swfwphysem[sc->sc_funcid];
8658 	if (wm_get_swfw_semaphore(sc, sem)) {
8659 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8660 		    __func__);
8661 		return;
8662 	}
8663 
8664 	wm_gmii_i82544_writereg(self, phy, reg, val);
8665 
8666 	wm_put_swfw_semaphore(sc, sem);
8667 }
8668 
8669 /*
8670  * wm_gmii_gs40g_readreg:	[mii interface function]
8671  *
8672  *	Read a PHY register on the I2100 and I211.
8673  * This could be handled by the PHY layer if we didn't have to lock the
8674  * ressource ...
8675  */
8676 static int
8677 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
8678 {
8679 	struct wm_softc *sc = device_private(self);
8680 	int sem;
8681 	int page, offset;
8682 	int rv;
8683 
8684 	/* Acquire semaphore */
8685 	sem = swfwphysem[sc->sc_funcid];
8686 	if (wm_get_swfw_semaphore(sc, sem)) {
8687 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8688 		    __func__);
8689 		return 0;
8690 	}
8691 
8692 	/* Page select */
8693 	page = reg >> GS40G_PAGE_SHIFT;
8694 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
8695 
8696 	/* Read reg */
8697 	offset = reg & GS40G_OFFSET_MASK;
8698 	rv = wm_gmii_i82544_readreg(self, phy, offset);
8699 
8700 	wm_put_swfw_semaphore(sc, sem);
8701 	return rv;
8702 }
8703 
8704 /*
8705  * wm_gmii_gs40g_writereg:	[mii interface function]
8706  *
8707  *	Write a PHY register on the I210 and I211.
8708  * This could be handled by the PHY layer if we didn't have to lock the
8709  * ressource ...
8710  */
8711 static void
8712 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
8713 {
8714 	struct wm_softc *sc = device_private(self);
8715 	int sem;
8716 	int page, offset;
8717 
8718 	/* Acquire semaphore */
8719 	sem = swfwphysem[sc->sc_funcid];
8720 	if (wm_get_swfw_semaphore(sc, sem)) {
8721 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8722 		    __func__);
8723 		return;
8724 	}
8725 
8726 	/* Page select */
8727 	page = reg >> GS40G_PAGE_SHIFT;
8728 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
8729 
8730 	/* Write reg */
8731 	offset = reg & GS40G_OFFSET_MASK;
8732 	wm_gmii_i82544_writereg(self, phy, offset, val);
8733 
8734 	/* Release semaphore */
8735 	wm_put_swfw_semaphore(sc, sem);
8736 }
8737 
8738 /*
8739  * wm_gmii_statchg:	[mii interface function]
8740  *
8741  *	Callback from MII layer when media changes.
8742  */
8743 static void
8744 wm_gmii_statchg(struct ifnet *ifp)
8745 {
8746 	struct wm_softc *sc = ifp->if_softc;
8747 	struct mii_data *mii = &sc->sc_mii;
8748 
8749 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
8750 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
8751 	sc->sc_fcrtl &= ~FCRTL_XONE;
8752 
8753 	/*
8754 	 * Get flow control negotiation result.
8755 	 */
8756 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
8757 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
8758 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
8759 		mii->mii_media_active &= ~IFM_ETH_FMASK;
8760 	}
8761 
8762 	if (sc->sc_flowflags & IFM_FLOW) {
8763 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
8764 			sc->sc_ctrl |= CTRL_TFCE;
8765 			sc->sc_fcrtl |= FCRTL_XONE;
8766 		}
8767 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
8768 			sc->sc_ctrl |= CTRL_RFCE;
8769 	}
8770 
8771 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
8772 		DPRINTF(WM_DEBUG_LINK,
8773 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
8774 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
8775 	} else {
8776 		DPRINTF(WM_DEBUG_LINK,
8777 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
8778 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
8779 	}
8780 
8781 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8782 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8783 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
8784 						 : WMREG_FCRTL, sc->sc_fcrtl);
8785 	if (sc->sc_type == WM_T_80003) {
8786 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
8787 		case IFM_1000_T:
8788 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8789 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
8790 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
8791 			break;
8792 		default:
8793 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8794 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
8795 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
8796 			break;
8797 		}
8798 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
8799 	}
8800 }
8801 
8802 /*
8803  * wm_kmrn_readreg:
8804  *
8805  *	Read a kumeran register
8806  */
8807 static int
8808 wm_kmrn_readreg(struct wm_softc *sc, int reg)
8809 {
8810 	int rv;
8811 
8812 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
8813 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8814 			aprint_error_dev(sc->sc_dev,
8815 			    "%s: failed to get semaphore\n", __func__);
8816 			return 0;
8817 		}
8818 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8819 		if (wm_get_swfwhw_semaphore(sc)) {
8820 			aprint_error_dev(sc->sc_dev,
8821 			    "%s: failed to get semaphore\n", __func__);
8822 			return 0;
8823 		}
8824 	}
8825 
8826 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8827 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8828 	    KUMCTRLSTA_REN);
8829 	CSR_WRITE_FLUSH(sc);
8830 	delay(2);
8831 
8832 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
8833 
8834 	if (sc->sc_flags & WM_F_LOCK_SWFW)
8835 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8836 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8837 		wm_put_swfwhw_semaphore(sc);
8838 
8839 	return rv;
8840 }
8841 
8842 /*
8843  * wm_kmrn_writereg:
8844  *
8845  *	Write a kumeran register
8846  */
8847 static void
8848 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
8849 {
8850 
8851 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
8852 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8853 			aprint_error_dev(sc->sc_dev,
8854 			    "%s: failed to get semaphore\n", __func__);
8855 			return;
8856 		}
8857 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8858 		if (wm_get_swfwhw_semaphore(sc)) {
8859 			aprint_error_dev(sc->sc_dev,
8860 			    "%s: failed to get semaphore\n", __func__);
8861 			return;
8862 		}
8863 	}
8864 
8865 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8866 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8867 	    (val & KUMCTRLSTA_MASK));
8868 
8869 	if (sc->sc_flags & WM_F_LOCK_SWFW)
8870 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8871 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8872 		wm_put_swfwhw_semaphore(sc);
8873 }
8874 
8875 /* SGMII related */
8876 
8877 /*
8878  * wm_sgmii_uses_mdio
8879  *
8880  * Check whether the transaction is to the internal PHY or the external
8881  * MDIO interface. Return true if it's MDIO.
8882  */
8883 static bool
8884 wm_sgmii_uses_mdio(struct wm_softc *sc)
8885 {
8886 	uint32_t reg;
8887 	bool ismdio = false;
8888 
8889 	switch (sc->sc_type) {
8890 	case WM_T_82575:
8891 	case WM_T_82576:
8892 		reg = CSR_READ(sc, WMREG_MDIC);
8893 		ismdio = ((reg & MDIC_DEST) != 0);
8894 		break;
8895 	case WM_T_82580:
8896 	case WM_T_I350:
8897 	case WM_T_I354:
8898 	case WM_T_I210:
8899 	case WM_T_I211:
8900 		reg = CSR_READ(sc, WMREG_MDICNFG);
8901 		ismdio = ((reg & MDICNFG_DEST) != 0);
8902 		break;
8903 	default:
8904 		break;
8905 	}
8906 
8907 	return ismdio;
8908 }
8909 
8910 /*
8911  * wm_sgmii_readreg:	[mii interface function]
8912  *
8913  *	Read a PHY register on the SGMII
8914  * This could be handled by the PHY layer if we didn't have to lock the
8915  * ressource ...
8916  */
8917 static int
8918 wm_sgmii_readreg(device_t self, int phy, int reg)
8919 {
8920 	struct wm_softc *sc = device_private(self);
8921 	uint32_t i2ccmd;
8922 	int i, rv;
8923 
8924 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
8925 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8926 		    __func__);
8927 		return 0;
8928 	}
8929 
8930 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
8931 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
8932 	    | I2CCMD_OPCODE_READ;
8933 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8934 
8935 	/* Poll the ready bit */
8936 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8937 		delay(50);
8938 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8939 		if (i2ccmd & I2CCMD_READY)
8940 			break;
8941 	}
8942 	if ((i2ccmd & I2CCMD_READY) == 0)
8943 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
8944 	if ((i2ccmd & I2CCMD_ERROR) != 0)
8945 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
8946 
8947 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
8948 
8949 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
8950 	return rv;
8951 }
8952 
8953 /*
8954  * wm_sgmii_writereg:	[mii interface function]
8955  *
8956  *	Write a PHY register on the SGMII.
8957  * This could be handled by the PHY layer if we didn't have to lock the
8958  * ressource ...
8959  */
8960 static void
8961 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
8962 {
8963 	struct wm_softc *sc = device_private(self);
8964 	uint32_t i2ccmd;
8965 	int i;
8966 	int val_swapped;
8967 
8968 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
8969 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8970 		    __func__);
8971 		return;
8972 	}
8973 	/* Swap the data bytes for the I2C interface */
8974 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
8975 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
8976 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
8977 	    | I2CCMD_OPCODE_WRITE | val_swapped;
8978 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8979 
8980 	/* Poll the ready bit */
8981 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8982 		delay(50);
8983 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8984 		if (i2ccmd & I2CCMD_READY)
8985 			break;
8986 	}
8987 	if ((i2ccmd & I2CCMD_READY) == 0)
8988 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
8989 	if ((i2ccmd & I2CCMD_ERROR) != 0)
8990 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
8991 
8992 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
8993 }
8994 
8995 /* TBI related */
8996 
8997 /*
8998  * wm_tbi_mediainit:
8999  *
9000  *	Initialize media for use on 1000BASE-X devices.
9001  */
9002 static void
9003 wm_tbi_mediainit(struct wm_softc *sc)
9004 {
9005 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9006 	const char *sep = "";
9007 
9008 	if (sc->sc_type < WM_T_82543)
9009 		sc->sc_tipg = TIPG_WM_DFLT;
9010 	else
9011 		sc->sc_tipg = TIPG_LG_DFLT;
9012 
9013 	sc->sc_tbi_serdes_anegticks = 5;
9014 
9015 	/* Initialize our media structures */
9016 	sc->sc_mii.mii_ifp = ifp;
9017 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
9018 
9019 	if ((sc->sc_type >= WM_T_82575)
9020 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
9021 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9022 		    wm_serdes_mediachange, wm_serdes_mediastatus);
9023 	else
9024 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9025 		    wm_tbi_mediachange, wm_tbi_mediastatus);
9026 
9027 	/*
9028 	 * SWD Pins:
9029 	 *
9030 	 *	0 = Link LED (output)
9031 	 *	1 = Loss Of Signal (input)
9032 	 */
9033 	sc->sc_ctrl |= CTRL_SWDPIO(0);
9034 
9035 	/* XXX Perhaps this is only for TBI */
9036 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9037 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
9038 
9039 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9040 		sc->sc_ctrl &= ~CTRL_LRST;
9041 
9042 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9043 
9044 #define	ADD(ss, mm, dd)							\
9045 do {									\
9046 	aprint_normal("%s%s", sep, ss);					\
9047 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
9048 	sep = ", ";							\
9049 } while (/*CONSTCOND*/0)
9050 
9051 	aprint_normal_dev(sc->sc_dev, "");
9052 
9053 	/* Only 82545 is LX */
9054 	if (sc->sc_type == WM_T_82545) {
9055 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
9056 		ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
9057 	} else {
9058 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
9059 		ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
9060 	}
9061 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
9062 	aprint_normal("\n");
9063 
9064 #undef ADD
9065 
9066 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
9067 }
9068 
9069 /*
9070  * wm_tbi_mediachange:	[ifmedia interface function]
9071  *
9072  *	Set hardware to newly-selected media on a 1000BASE-X device.
9073  */
9074 static int
9075 wm_tbi_mediachange(struct ifnet *ifp)
9076 {
9077 	struct wm_softc *sc = ifp->if_softc;
9078 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9079 	uint32_t status;
9080 	int i;
9081 
9082 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9083 		/* XXX need some work for >= 82571 and < 82575 */
9084 		if (sc->sc_type < WM_T_82575)
9085 			return 0;
9086 	}
9087 
9088 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9089 	    || (sc->sc_type >= WM_T_82575))
9090 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9091 
9092 	sc->sc_ctrl &= ~CTRL_LRST;
9093 	sc->sc_txcw = TXCW_ANE;
9094 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9095 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
9096 	else if (ife->ifm_media & IFM_FDX)
9097 		sc->sc_txcw |= TXCW_FD;
9098 	else
9099 		sc->sc_txcw |= TXCW_HD;
9100 
9101 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
9102 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
9103 
9104 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
9105 		    device_xname(sc->sc_dev), sc->sc_txcw));
9106 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9107 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9108 	CSR_WRITE_FLUSH(sc);
9109 	delay(1000);
9110 
9111 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
9112 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
9113 
9114 	/*
9115 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
9116 	 * optics detect a signal, 0 if they don't.
9117 	 */
9118 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
9119 		/* Have signal; wait for the link to come up. */
9120 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
9121 			delay(10000);
9122 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
9123 				break;
9124 		}
9125 
9126 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
9127 			    device_xname(sc->sc_dev),i));
9128 
9129 		status = CSR_READ(sc, WMREG_STATUS);
9130 		DPRINTF(WM_DEBUG_LINK,
9131 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
9132 			device_xname(sc->sc_dev),status, STATUS_LU));
9133 		if (status & STATUS_LU) {
9134 			/* Link is up. */
9135 			DPRINTF(WM_DEBUG_LINK,
9136 			    ("%s: LINK: set media -> link up %s\n",
9137 			    device_xname(sc->sc_dev),
9138 			    (status & STATUS_FD) ? "FDX" : "HDX"));
9139 
9140 			/*
9141 			 * NOTE: CTRL will update TFCE and RFCE automatically,
9142 			 * so we should update sc->sc_ctrl
9143 			 */
9144 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9145 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9146 			sc->sc_fcrtl &= ~FCRTL_XONE;
9147 			if (status & STATUS_FD)
9148 				sc->sc_tctl |=
9149 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9150 			else
9151 				sc->sc_tctl |=
9152 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9153 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
9154 				sc->sc_fcrtl |= FCRTL_XONE;
9155 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9156 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9157 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
9158 				      sc->sc_fcrtl);
9159 			sc->sc_tbi_linkup = 1;
9160 		} else {
9161 			if (i == WM_LINKUP_TIMEOUT)
9162 				wm_check_for_link(sc);
9163 			/* Link is down. */
9164 			DPRINTF(WM_DEBUG_LINK,
9165 			    ("%s: LINK: set media -> link down\n",
9166 			    device_xname(sc->sc_dev)));
9167 			sc->sc_tbi_linkup = 0;
9168 		}
9169 	} else {
9170 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
9171 		    device_xname(sc->sc_dev)));
9172 		sc->sc_tbi_linkup = 0;
9173 	}
9174 
9175 	wm_tbi_serdes_set_linkled(sc);
9176 
9177 	return 0;
9178 }
9179 
9180 /*
9181  * wm_tbi_mediastatus:	[ifmedia interface function]
9182  *
9183  *	Get the current interface media status on a 1000BASE-X device.
9184  */
9185 static void
9186 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9187 {
9188 	struct wm_softc *sc = ifp->if_softc;
9189 	uint32_t ctrl, status;
9190 
9191 	ifmr->ifm_status = IFM_AVALID;
9192 	ifmr->ifm_active = IFM_ETHER;
9193 
9194 	status = CSR_READ(sc, WMREG_STATUS);
9195 	if ((status & STATUS_LU) == 0) {
9196 		ifmr->ifm_active |= IFM_NONE;
9197 		return;
9198 	}
9199 
9200 	ifmr->ifm_status |= IFM_ACTIVE;
9201 	/* Only 82545 is LX */
9202 	if (sc->sc_type == WM_T_82545)
9203 		ifmr->ifm_active |= IFM_1000_LX;
9204 	else
9205 		ifmr->ifm_active |= IFM_1000_SX;
9206 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
9207 		ifmr->ifm_active |= IFM_FDX;
9208 	else
9209 		ifmr->ifm_active |= IFM_HDX;
9210 	ctrl = CSR_READ(sc, WMREG_CTRL);
9211 	if (ctrl & CTRL_RFCE)
9212 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
9213 	if (ctrl & CTRL_TFCE)
9214 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
9215 }
9216 
9217 /* XXX TBI only */
9218 static int
9219 wm_check_for_link(struct wm_softc *sc)
9220 {
9221 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9222 	uint32_t rxcw;
9223 	uint32_t ctrl;
9224 	uint32_t status;
9225 	uint32_t sig;
9226 
9227 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9228 		/* XXX need some work for >= 82571 */
9229 		if (sc->sc_type >= WM_T_82571) {
9230 			sc->sc_tbi_linkup = 1;
9231 			return 0;
9232 		}
9233 	}
9234 
9235 	rxcw = CSR_READ(sc, WMREG_RXCW);
9236 	ctrl = CSR_READ(sc, WMREG_CTRL);
9237 	status = CSR_READ(sc, WMREG_STATUS);
9238 
9239 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
9240 
9241 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
9242 		device_xname(sc->sc_dev), __func__,
9243 		((ctrl & CTRL_SWDPIN(1)) == sig),
9244 		((status & STATUS_LU) != 0),
9245 		((rxcw & RXCW_C) != 0)
9246 		    ));
9247 
9248 	/*
9249 	 * SWDPIN   LU RXCW
9250 	 *      0    0    0
9251 	 *      0    0    1	(should not happen)
9252 	 *      0    1    0	(should not happen)
9253 	 *      0    1    1	(should not happen)
9254 	 *      1    0    0	Disable autonego and force linkup
9255 	 *      1    0    1	got /C/ but not linkup yet
9256 	 *      1    1    0	(linkup)
9257 	 *      1    1    1	If IFM_AUTO, back to autonego
9258 	 *
9259 	 */
9260 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
9261 	    && ((status & STATUS_LU) == 0)
9262 	    && ((rxcw & RXCW_C) == 0)) {
9263 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
9264 			__func__));
9265 		sc->sc_tbi_linkup = 0;
9266 		/* Disable auto-negotiation in the TXCW register */
9267 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
9268 
9269 		/*
9270 		 * Force link-up and also force full-duplex.
9271 		 *
9272 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
9273 		 * so we should update sc->sc_ctrl
9274 		 */
9275 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
9276 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9277 	} else if (((status & STATUS_LU) != 0)
9278 	    && ((rxcw & RXCW_C) != 0)
9279 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
9280 		sc->sc_tbi_linkup = 1;
9281 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
9282 			__func__));
9283 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9284 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
9285 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
9286 	    && ((rxcw & RXCW_C) != 0)) {
9287 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
9288 	} else {
9289 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
9290 			status));
9291 	}
9292 
9293 	return 0;
9294 }
9295 
9296 /*
9297  * wm_tbi_tick:
9298  *
9299  *	Check the link on TBI devices.
9300  *	This function acts as mii_tick().
9301  */
9302 static void
9303 wm_tbi_tick(struct wm_softc *sc)
9304 {
9305 	struct wm_txqueue *txq __diagused = &sc->sc_txq[0];
9306 	struct mii_data *mii = &sc->sc_mii;
9307 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9308 	uint32_t status;
9309 
9310 	KASSERT(WM_TX_LOCKED(txq));
9311 
9312 	status = CSR_READ(sc, WMREG_STATUS);
9313 
9314 	/* XXX is this needed? */
9315 	(void)CSR_READ(sc, WMREG_RXCW);
9316 	(void)CSR_READ(sc, WMREG_CTRL);
9317 
9318 	/* set link status */
9319 	if ((status & STATUS_LU) == 0) {
9320 		DPRINTF(WM_DEBUG_LINK,
9321 		    ("%s: LINK: checklink -> down\n",
9322 			device_xname(sc->sc_dev)));
9323 		sc->sc_tbi_linkup = 0;
9324 	} else if (sc->sc_tbi_linkup == 0) {
9325 		DPRINTF(WM_DEBUG_LINK,
9326 		    ("%s: LINK: checklink -> up %s\n",
9327 			device_xname(sc->sc_dev),
9328 			(status & STATUS_FD) ? "FDX" : "HDX"));
9329 		sc->sc_tbi_linkup = 1;
9330 		sc->sc_tbi_serdes_ticks = 0;
9331 	}
9332 
9333 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
9334 		goto setled;
9335 
9336 	if ((status & STATUS_LU) == 0) {
9337 		sc->sc_tbi_linkup = 0;
9338 		/* If the timer expired, retry autonegotiation */
9339 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9340 		    && (++sc->sc_tbi_serdes_ticks
9341 			>= sc->sc_tbi_serdes_anegticks)) {
9342 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9343 			sc->sc_tbi_serdes_ticks = 0;
9344 			/*
9345 			 * Reset the link, and let autonegotiation do
9346 			 * its thing
9347 			 */
9348 			sc->sc_ctrl |= CTRL_LRST;
9349 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9350 			CSR_WRITE_FLUSH(sc);
9351 			delay(1000);
9352 			sc->sc_ctrl &= ~CTRL_LRST;
9353 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9354 			CSR_WRITE_FLUSH(sc);
9355 			delay(1000);
9356 			CSR_WRITE(sc, WMREG_TXCW,
9357 			    sc->sc_txcw & ~TXCW_ANE);
9358 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9359 		}
9360 	}
9361 
9362 setled:
9363 	wm_tbi_serdes_set_linkled(sc);
9364 }
9365 
9366 /* SERDES related */
9367 static void
9368 wm_serdes_power_up_link_82575(struct wm_softc *sc)
9369 {
9370 	uint32_t reg;
9371 
9372 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9373 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
9374 		return;
9375 
9376 	reg = CSR_READ(sc, WMREG_PCS_CFG);
9377 	reg |= PCS_CFG_PCS_EN;
9378 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
9379 
9380 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
9381 	reg &= ~CTRL_EXT_SWDPIN(3);
9382 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9383 	CSR_WRITE_FLUSH(sc);
9384 }
9385 
9386 static int
9387 wm_serdes_mediachange(struct ifnet *ifp)
9388 {
9389 	struct wm_softc *sc = ifp->if_softc;
9390 	bool pcs_autoneg = true; /* XXX */
9391 	uint32_t ctrl_ext, pcs_lctl, reg;
9392 
9393 	/* XXX Currently, this function is not called on 8257[12] */
9394 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9395 	    || (sc->sc_type >= WM_T_82575))
9396 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9397 
9398 	wm_serdes_power_up_link_82575(sc);
9399 
9400 	sc->sc_ctrl |= CTRL_SLU;
9401 
9402 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
9403 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
9404 
9405 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9406 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
9407 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
9408 	case CTRL_EXT_LINK_MODE_SGMII:
9409 		pcs_autoneg = true;
9410 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
9411 		break;
9412 	case CTRL_EXT_LINK_MODE_1000KX:
9413 		pcs_autoneg = false;
9414 		/* FALLTHROUGH */
9415 	default:
9416 		if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)){
9417 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
9418 				pcs_autoneg = false;
9419 		}
9420 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
9421 		    | CTRL_FRCFDX;
9422 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
9423 	}
9424 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9425 
9426 	if (pcs_autoneg) {
9427 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
9428 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
9429 
9430 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
9431 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
9432 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
9433 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
9434 	} else
9435 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
9436 
9437 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
9438 
9439 
9440 	return 0;
9441 }
9442 
9443 static void
9444 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9445 {
9446 	struct wm_softc *sc = ifp->if_softc;
9447 	struct mii_data *mii = &sc->sc_mii;
9448 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9449 	uint32_t pcs_adv, pcs_lpab, reg;
9450 
9451 	ifmr->ifm_status = IFM_AVALID;
9452 	ifmr->ifm_active = IFM_ETHER;
9453 
9454 	/* Check PCS */
9455 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
9456 	if ((reg & PCS_LSTS_LINKOK) == 0) {
9457 		ifmr->ifm_active |= IFM_NONE;
9458 		sc->sc_tbi_linkup = 0;
9459 		goto setled;
9460 	}
9461 
9462 	sc->sc_tbi_linkup = 1;
9463 	ifmr->ifm_status |= IFM_ACTIVE;
9464 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
9465 	if ((reg & PCS_LSTS_FDX) != 0)
9466 		ifmr->ifm_active |= IFM_FDX;
9467 	else
9468 		ifmr->ifm_active |= IFM_HDX;
9469 	mii->mii_media_active &= ~IFM_ETH_FMASK;
9470 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
9471 		/* Check flow */
9472 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
9473 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
9474 			printf("XXX LINKOK but not ACOMP\n");
9475 			goto setled;
9476 		}
9477 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
9478 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
9479 			printf("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab);
9480 		if ((pcs_adv & TXCW_SYM_PAUSE)
9481 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
9482 			mii->mii_media_active |= IFM_FLOW
9483 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
9484 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
9485 		    && (pcs_adv & TXCW_ASYM_PAUSE)
9486 		    && (pcs_lpab & TXCW_SYM_PAUSE)
9487 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9488 			mii->mii_media_active |= IFM_FLOW
9489 			    | IFM_ETH_TXPAUSE;
9490 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
9491 		    && (pcs_adv & TXCW_ASYM_PAUSE)
9492 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
9493 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9494 			mii->mii_media_active |= IFM_FLOW
9495 			    | IFM_ETH_RXPAUSE;
9496 		} else {
9497 		}
9498 	}
9499 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
9500 	    | (mii->mii_media_active & IFM_ETH_FMASK);
9501 setled:
9502 	wm_tbi_serdes_set_linkled(sc);
9503 }
9504 
9505 /*
9506  * wm_serdes_tick:
9507  *
9508  *	Check the link on serdes devices.
9509  */
9510 static void
9511 wm_serdes_tick(struct wm_softc *sc)
9512 {
9513 	struct wm_txqueue *txq __diagused = &sc->sc_txq[0];
9514 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9515 	struct mii_data *mii = &sc->sc_mii;
9516 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9517 	uint32_t reg;
9518 
9519 	KASSERT(WM_TX_LOCKED(txq));
9520 
9521 	mii->mii_media_status = IFM_AVALID;
9522 	mii->mii_media_active = IFM_ETHER;
9523 
9524 	/* Check PCS */
9525 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
9526 	if ((reg & PCS_LSTS_LINKOK) != 0) {
9527 		mii->mii_media_status |= IFM_ACTIVE;
9528 		sc->sc_tbi_linkup = 1;
9529 		sc->sc_tbi_serdes_ticks = 0;
9530 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
9531 		if ((reg & PCS_LSTS_FDX) != 0)
9532 			mii->mii_media_active |= IFM_FDX;
9533 		else
9534 			mii->mii_media_active |= IFM_HDX;
9535 	} else {
9536 		mii->mii_media_status |= IFM_NONE;
9537 		sc->sc_tbi_linkup = 0;
9538 		    /* If the timer expired, retry autonegotiation */
9539 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9540 		    && (++sc->sc_tbi_serdes_ticks
9541 			>= sc->sc_tbi_serdes_anegticks)) {
9542 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9543 			sc->sc_tbi_serdes_ticks = 0;
9544 			/* XXX */
9545 			wm_serdes_mediachange(ifp);
9546 		}
9547 	}
9548 
9549 	wm_tbi_serdes_set_linkled(sc);
9550 }
9551 
9552 /* SFP related */
9553 
9554 static int
9555 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
9556 {
9557 	uint32_t i2ccmd;
9558 	int i;
9559 
9560 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
9561 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9562 
9563 	/* Poll the ready bit */
9564 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9565 		delay(50);
9566 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9567 		if (i2ccmd & I2CCMD_READY)
9568 			break;
9569 	}
9570 	if ((i2ccmd & I2CCMD_READY) == 0)
9571 		return -1;
9572 	if ((i2ccmd & I2CCMD_ERROR) != 0)
9573 		return -1;
9574 
9575 	*data = i2ccmd & 0x00ff;
9576 
9577 	return 0;
9578 }
9579 
9580 static uint32_t
9581 wm_sfp_get_media_type(struct wm_softc *sc)
9582 {
9583 	uint32_t ctrl_ext;
9584 	uint8_t val = 0;
9585 	int timeout = 3;
9586 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
9587 	int rv = -1;
9588 
9589 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9590 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
9591 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
9592 	CSR_WRITE_FLUSH(sc);
9593 
9594 	/* Read SFP module data */
9595 	while (timeout) {
9596 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
9597 		if (rv == 0)
9598 			break;
9599 		delay(100*1000); /* XXX too big */
9600 		timeout--;
9601 	}
9602 	if (rv != 0)
9603 		goto out;
9604 	switch (val) {
9605 	case SFF_SFP_ID_SFF:
9606 		aprint_normal_dev(sc->sc_dev,
9607 		    "Module/Connector soldered to board\n");
9608 		break;
9609 	case SFF_SFP_ID_SFP:
9610 		aprint_normal_dev(sc->sc_dev, "SFP\n");
9611 		break;
9612 	case SFF_SFP_ID_UNKNOWN:
9613 		goto out;
9614 	default:
9615 		break;
9616 	}
9617 
9618 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
9619 	if (rv != 0) {
9620 		goto out;
9621 	}
9622 
9623 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
9624 		mediatype = WM_MEDIATYPE_SERDES;
9625 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
9626 		sc->sc_flags |= WM_F_SGMII;
9627 		mediatype = WM_MEDIATYPE_COPPER;
9628 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
9629 		sc->sc_flags |= WM_F_SGMII;
9630 		mediatype = WM_MEDIATYPE_SERDES;
9631 	}
9632 
9633 out:
9634 	/* Restore I2C interface setting */
9635 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9636 
9637 	return mediatype;
9638 }
9639 /*
9640  * NVM related.
9641  * Microwire, SPI (w/wo EERD) and Flash.
9642  */
9643 
9644 /* Both spi and uwire */
9645 
9646 /*
9647  * wm_eeprom_sendbits:
9648  *
9649  *	Send a series of bits to the EEPROM.
9650  */
9651 static void
9652 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
9653 {
9654 	uint32_t reg;
9655 	int x;
9656 
9657 	reg = CSR_READ(sc, WMREG_EECD);
9658 
9659 	for (x = nbits; x > 0; x--) {
9660 		if (bits & (1U << (x - 1)))
9661 			reg |= EECD_DI;
9662 		else
9663 			reg &= ~EECD_DI;
9664 		CSR_WRITE(sc, WMREG_EECD, reg);
9665 		CSR_WRITE_FLUSH(sc);
9666 		delay(2);
9667 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9668 		CSR_WRITE_FLUSH(sc);
9669 		delay(2);
9670 		CSR_WRITE(sc, WMREG_EECD, reg);
9671 		CSR_WRITE_FLUSH(sc);
9672 		delay(2);
9673 	}
9674 }
9675 
9676 /*
9677  * wm_eeprom_recvbits:
9678  *
9679  *	Receive a series of bits from the EEPROM.
9680  */
9681 static void
9682 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
9683 {
9684 	uint32_t reg, val;
9685 	int x;
9686 
9687 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
9688 
9689 	val = 0;
9690 	for (x = nbits; x > 0; x--) {
9691 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9692 		CSR_WRITE_FLUSH(sc);
9693 		delay(2);
9694 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
9695 			val |= (1U << (x - 1));
9696 		CSR_WRITE(sc, WMREG_EECD, reg);
9697 		CSR_WRITE_FLUSH(sc);
9698 		delay(2);
9699 	}
9700 	*valp = val;
9701 }
9702 
9703 /* Microwire */
9704 
9705 /*
9706  * wm_nvm_read_uwire:
9707  *
9708  *	Read a word from the EEPROM using the MicroWire protocol.
9709  */
9710 static int
9711 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9712 {
9713 	uint32_t reg, val;
9714 	int i;
9715 
9716 	for (i = 0; i < wordcnt; i++) {
9717 		/* Clear SK and DI. */
9718 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
9719 		CSR_WRITE(sc, WMREG_EECD, reg);
9720 
9721 		/*
9722 		 * XXX: workaround for a bug in qemu-0.12.x and prior
9723 		 * and Xen.
9724 		 *
9725 		 * We use this workaround only for 82540 because qemu's
9726 		 * e1000 act as 82540.
9727 		 */
9728 		if (sc->sc_type == WM_T_82540) {
9729 			reg |= EECD_SK;
9730 			CSR_WRITE(sc, WMREG_EECD, reg);
9731 			reg &= ~EECD_SK;
9732 			CSR_WRITE(sc, WMREG_EECD, reg);
9733 			CSR_WRITE_FLUSH(sc);
9734 			delay(2);
9735 		}
9736 		/* XXX: end of workaround */
9737 
9738 		/* Set CHIP SELECT. */
9739 		reg |= EECD_CS;
9740 		CSR_WRITE(sc, WMREG_EECD, reg);
9741 		CSR_WRITE_FLUSH(sc);
9742 		delay(2);
9743 
9744 		/* Shift in the READ command. */
9745 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
9746 
9747 		/* Shift in address. */
9748 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
9749 
9750 		/* Shift out the data. */
9751 		wm_eeprom_recvbits(sc, &val, 16);
9752 		data[i] = val & 0xffff;
9753 
9754 		/* Clear CHIP SELECT. */
9755 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
9756 		CSR_WRITE(sc, WMREG_EECD, reg);
9757 		CSR_WRITE_FLUSH(sc);
9758 		delay(2);
9759 	}
9760 
9761 	return 0;
9762 }
9763 
9764 /* SPI */
9765 
9766 /*
9767  * Set SPI and FLASH related information from the EECD register.
9768  * For 82541 and 82547, the word size is taken from EEPROM.
9769  */
9770 static int
9771 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
9772 {
9773 	int size;
9774 	uint32_t reg;
9775 	uint16_t data;
9776 
9777 	reg = CSR_READ(sc, WMREG_EECD);
9778 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
9779 
9780 	/* Read the size of NVM from EECD by default */
9781 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
9782 	switch (sc->sc_type) {
9783 	case WM_T_82541:
9784 	case WM_T_82541_2:
9785 	case WM_T_82547:
9786 	case WM_T_82547_2:
9787 		/* Set dummy value to access EEPROM */
9788 		sc->sc_nvm_wordsize = 64;
9789 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
9790 		reg = data;
9791 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
9792 		if (size == 0)
9793 			size = 6; /* 64 word size */
9794 		else
9795 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
9796 		break;
9797 	case WM_T_80003:
9798 	case WM_T_82571:
9799 	case WM_T_82572:
9800 	case WM_T_82573: /* SPI case */
9801 	case WM_T_82574: /* SPI case */
9802 	case WM_T_82583: /* SPI case */
9803 		size += NVM_WORD_SIZE_BASE_SHIFT;
9804 		if (size > 14)
9805 			size = 14;
9806 		break;
9807 	case WM_T_82575:
9808 	case WM_T_82576:
9809 	case WM_T_82580:
9810 	case WM_T_I350:
9811 	case WM_T_I354:
9812 	case WM_T_I210:
9813 	case WM_T_I211:
9814 		size += NVM_WORD_SIZE_BASE_SHIFT;
9815 		if (size > 15)
9816 			size = 15;
9817 		break;
9818 	default:
9819 		aprint_error_dev(sc->sc_dev,
9820 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
9821 		return -1;
9822 		break;
9823 	}
9824 
9825 	sc->sc_nvm_wordsize = 1 << size;
9826 
9827 	return 0;
9828 }
9829 
9830 /*
9831  * wm_nvm_ready_spi:
9832  *
9833  *	Wait for a SPI EEPROM to be ready for commands.
9834  */
9835 static int
9836 wm_nvm_ready_spi(struct wm_softc *sc)
9837 {
9838 	uint32_t val;
9839 	int usec;
9840 
9841 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
9842 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
9843 		wm_eeprom_recvbits(sc, &val, 8);
9844 		if ((val & SPI_SR_RDY) == 0)
9845 			break;
9846 	}
9847 	if (usec >= SPI_MAX_RETRIES) {
9848 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
9849 		return 1;
9850 	}
9851 	return 0;
9852 }
9853 
9854 /*
9855  * wm_nvm_read_spi:
9856  *
9857  *	Read a work from the EEPROM using the SPI protocol.
9858  */
9859 static int
9860 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9861 {
9862 	uint32_t reg, val;
9863 	int i;
9864 	uint8_t opc;
9865 
9866 	/* Clear SK and CS. */
9867 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
9868 	CSR_WRITE(sc, WMREG_EECD, reg);
9869 	CSR_WRITE_FLUSH(sc);
9870 	delay(2);
9871 
9872 	if (wm_nvm_ready_spi(sc))
9873 		return 1;
9874 
9875 	/* Toggle CS to flush commands. */
9876 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
9877 	CSR_WRITE_FLUSH(sc);
9878 	delay(2);
9879 	CSR_WRITE(sc, WMREG_EECD, reg);
9880 	CSR_WRITE_FLUSH(sc);
9881 	delay(2);
9882 
9883 	opc = SPI_OPC_READ;
9884 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
9885 		opc |= SPI_OPC_A8;
9886 
9887 	wm_eeprom_sendbits(sc, opc, 8);
9888 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
9889 
9890 	for (i = 0; i < wordcnt; i++) {
9891 		wm_eeprom_recvbits(sc, &val, 16);
9892 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
9893 	}
9894 
9895 	/* Raise CS and clear SK. */
9896 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
9897 	CSR_WRITE(sc, WMREG_EECD, reg);
9898 	CSR_WRITE_FLUSH(sc);
9899 	delay(2);
9900 
9901 	return 0;
9902 }
9903 
9904 /* Using with EERD */
9905 
9906 static int
9907 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
9908 {
9909 	uint32_t attempts = 100000;
9910 	uint32_t i, reg = 0;
9911 	int32_t done = -1;
9912 
9913 	for (i = 0; i < attempts; i++) {
9914 		reg = CSR_READ(sc, rw);
9915 
9916 		if (reg & EERD_DONE) {
9917 			done = 0;
9918 			break;
9919 		}
9920 		delay(5);
9921 	}
9922 
9923 	return done;
9924 }
9925 
9926 static int
9927 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
9928     uint16_t *data)
9929 {
9930 	int i, eerd = 0;
9931 	int error = 0;
9932 
9933 	for (i = 0; i < wordcnt; i++) {
9934 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
9935 
9936 		CSR_WRITE(sc, WMREG_EERD, eerd);
9937 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
9938 		if (error != 0)
9939 			break;
9940 
9941 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
9942 	}
9943 
9944 	return error;
9945 }
9946 
9947 /* Flash */
9948 
9949 static int
9950 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
9951 {
9952 	uint32_t eecd;
9953 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
9954 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
9955 	uint8_t sig_byte = 0;
9956 
9957 	switch (sc->sc_type) {
9958 	case WM_T_ICH8:
9959 	case WM_T_ICH9:
9960 		eecd = CSR_READ(sc, WMREG_EECD);
9961 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
9962 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
9963 			return 0;
9964 		}
9965 		/* FALLTHROUGH */
9966 	default:
9967 		/* Default to 0 */
9968 		*bank = 0;
9969 
9970 		/* Check bank 0 */
9971 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
9972 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
9973 			*bank = 0;
9974 			return 0;
9975 		}
9976 
9977 		/* Check bank 1 */
9978 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
9979 		    &sig_byte);
9980 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
9981 			*bank = 1;
9982 			return 0;
9983 		}
9984 	}
9985 
9986 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
9987 		device_xname(sc->sc_dev)));
9988 	return -1;
9989 }
9990 
9991 /******************************************************************************
9992  * This function does initial flash setup so that a new read/write/erase cycle
9993  * can be started.
9994  *
9995  * sc - The pointer to the hw structure
9996  ****************************************************************************/
9997 static int32_t
9998 wm_ich8_cycle_init(struct wm_softc *sc)
9999 {
10000 	uint16_t hsfsts;
10001 	int32_t error = 1;
10002 	int32_t i     = 0;
10003 
10004 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10005 
10006 	/* May be check the Flash Des Valid bit in Hw status */
10007 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
10008 		return error;
10009 	}
10010 
10011 	/* Clear FCERR in Hw status by writing 1 */
10012 	/* Clear DAEL in Hw status by writing a 1 */
10013 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
10014 
10015 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10016 
10017 	/*
10018 	 * Either we should have a hardware SPI cycle in progress bit to check
10019 	 * against, in order to start a new cycle or FDONE bit should be
10020 	 * changed in the hardware so that it is 1 after harware reset, which
10021 	 * can then be used as an indication whether a cycle is in progress or
10022 	 * has been completed .. we should also have some software semaphore
10023 	 * mechanism to guard FDONE or the cycle in progress bit so that two
10024 	 * threads access to those bits can be sequentiallized or a way so that
10025 	 * 2 threads dont start the cycle at the same time
10026 	 */
10027 
10028 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10029 		/*
10030 		 * There is no cycle running at present, so we can start a
10031 		 * cycle
10032 		 */
10033 
10034 		/* Begin by setting Flash Cycle Done. */
10035 		hsfsts |= HSFSTS_DONE;
10036 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10037 		error = 0;
10038 	} else {
10039 		/*
10040 		 * otherwise poll for sometime so the current cycle has a
10041 		 * chance to end before giving up.
10042 		 */
10043 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
10044 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10045 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10046 				error = 0;
10047 				break;
10048 			}
10049 			delay(1);
10050 		}
10051 		if (error == 0) {
10052 			/*
10053 			 * Successful in waiting for previous cycle to timeout,
10054 			 * now set the Flash Cycle Done.
10055 			 */
10056 			hsfsts |= HSFSTS_DONE;
10057 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10058 		}
10059 	}
10060 	return error;
10061 }
10062 
10063 /******************************************************************************
10064  * This function starts a flash cycle and waits for its completion
10065  *
10066  * sc - The pointer to the hw structure
10067  ****************************************************************************/
10068 static int32_t
10069 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
10070 {
10071 	uint16_t hsflctl;
10072 	uint16_t hsfsts;
10073 	int32_t error = 1;
10074 	uint32_t i = 0;
10075 
10076 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
10077 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10078 	hsflctl |= HSFCTL_GO;
10079 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10080 
10081 	/* Wait till FDONE bit is set to 1 */
10082 	do {
10083 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10084 		if (hsfsts & HSFSTS_DONE)
10085 			break;
10086 		delay(1);
10087 		i++;
10088 	} while (i < timeout);
10089 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
10090 		error = 0;
10091 
10092 	return error;
10093 }
10094 
10095 /******************************************************************************
10096  * Reads a byte or word from the NVM using the ICH8 flash access registers.
10097  *
10098  * sc - The pointer to the hw structure
10099  * index - The index of the byte or word to read.
10100  * size - Size of data to read, 1=byte 2=word
10101  * data - Pointer to the word to store the value read.
10102  *****************************************************************************/
10103 static int32_t
10104 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
10105     uint32_t size, uint16_t *data)
10106 {
10107 	uint16_t hsfsts;
10108 	uint16_t hsflctl;
10109 	uint32_t flash_linear_address;
10110 	uint32_t flash_data = 0;
10111 	int32_t error = 1;
10112 	int32_t count = 0;
10113 
10114 	if (size < 1  || size > 2 || data == 0x0 ||
10115 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
10116 		return error;
10117 
10118 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
10119 	    sc->sc_ich8_flash_base;
10120 
10121 	do {
10122 		delay(1);
10123 		/* Steps */
10124 		error = wm_ich8_cycle_init(sc);
10125 		if (error)
10126 			break;
10127 
10128 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10129 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
10130 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
10131 		    & HSFCTL_BCOUNT_MASK;
10132 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
10133 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10134 
10135 		/*
10136 		 * Write the last 24 bits of index into Flash Linear address
10137 		 * field in Flash Address
10138 		 */
10139 		/* TODO: TBD maybe check the index against the size of flash */
10140 
10141 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
10142 
10143 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
10144 
10145 		/*
10146 		 * Check if FCERR is set to 1, if set to 1, clear it and try
10147 		 * the whole sequence a few more times, else read in (shift in)
10148 		 * the Flash Data0, the order is least significant byte first
10149 		 * msb to lsb
10150 		 */
10151 		if (error == 0) {
10152 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
10153 			if (size == 1)
10154 				*data = (uint8_t)(flash_data & 0x000000FF);
10155 			else if (size == 2)
10156 				*data = (uint16_t)(flash_data & 0x0000FFFF);
10157 			break;
10158 		} else {
10159 			/*
10160 			 * If we've gotten here, then things are probably
10161 			 * completely hosed, but if the error condition is
10162 			 * detected, it won't hurt to give it another try...
10163 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
10164 			 */
10165 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10166 			if (hsfsts & HSFSTS_ERR) {
10167 				/* Repeat for some time before giving up. */
10168 				continue;
10169 			} else if ((hsfsts & HSFSTS_DONE) == 0)
10170 				break;
10171 		}
10172 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
10173 
10174 	return error;
10175 }
10176 
10177 /******************************************************************************
10178  * Reads a single byte from the NVM using the ICH8 flash access registers.
10179  *
10180  * sc - pointer to wm_hw structure
10181  * index - The index of the byte to read.
10182  * data - Pointer to a byte to store the value read.
10183  *****************************************************************************/
10184 static int32_t
10185 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
10186 {
10187 	int32_t status;
10188 	uint16_t word = 0;
10189 
10190 	status = wm_read_ich8_data(sc, index, 1, &word);
10191 	if (status == 0)
10192 		*data = (uint8_t)word;
10193 	else
10194 		*data = 0;
10195 
10196 	return status;
10197 }
10198 
10199 /******************************************************************************
10200  * Reads a word from the NVM using the ICH8 flash access registers.
10201  *
10202  * sc - pointer to wm_hw structure
10203  * index - The starting byte index of the word to read.
10204  * data - Pointer to a word to store the value read.
10205  *****************************************************************************/
10206 static int32_t
10207 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
10208 {
10209 	int32_t status;
10210 
10211 	status = wm_read_ich8_data(sc, index, 2, data);
10212 	return status;
10213 }
10214 
10215 /******************************************************************************
10216  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
10217  * register.
10218  *
10219  * sc - Struct containing variables accessed by shared code
10220  * offset - offset of word in the EEPROM to read
10221  * data - word read from the EEPROM
10222  * words - number of words to read
10223  *****************************************************************************/
10224 static int
10225 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
10226 {
10227 	int32_t  error = 0;
10228 	uint32_t flash_bank = 0;
10229 	uint32_t act_offset = 0;
10230 	uint32_t bank_offset = 0;
10231 	uint16_t word = 0;
10232 	uint16_t i = 0;
10233 
10234 	/*
10235 	 * We need to know which is the valid flash bank.  In the event
10236 	 * that we didn't allocate eeprom_shadow_ram, we may not be
10237 	 * managing flash_bank.  So it cannot be trusted and needs
10238 	 * to be updated with each read.
10239 	 */
10240 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
10241 	if (error) {
10242 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
10243 			device_xname(sc->sc_dev)));
10244 		flash_bank = 0;
10245 	}
10246 
10247 	/*
10248 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
10249 	 * size
10250 	 */
10251 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
10252 
10253 	error = wm_get_swfwhw_semaphore(sc);
10254 	if (error) {
10255 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10256 		    __func__);
10257 		return error;
10258 	}
10259 
10260 	for (i = 0; i < words; i++) {
10261 		/* The NVM part needs a byte offset, hence * 2 */
10262 		act_offset = bank_offset + ((offset + i) * 2);
10263 		error = wm_read_ich8_word(sc, act_offset, &word);
10264 		if (error) {
10265 			aprint_error_dev(sc->sc_dev,
10266 			    "%s: failed to read NVM\n", __func__);
10267 			break;
10268 		}
10269 		data[i] = word;
10270 	}
10271 
10272 	wm_put_swfwhw_semaphore(sc);
10273 	return error;
10274 }
10275 
10276 /* iNVM */
10277 
10278 static int
10279 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
10280 {
10281 	int32_t  rv = 0;
10282 	uint32_t invm_dword;
10283 	uint16_t i;
10284 	uint8_t record_type, word_address;
10285 
10286 	for (i = 0; i < INVM_SIZE; i++) {
10287 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
10288 		/* Get record type */
10289 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
10290 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
10291 			break;
10292 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
10293 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
10294 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
10295 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
10296 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
10297 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
10298 			if (word_address == address) {
10299 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
10300 				rv = 0;
10301 				break;
10302 			}
10303 		}
10304 	}
10305 
10306 	return rv;
10307 }
10308 
10309 static int
10310 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
10311 {
10312 	int rv = 0;
10313 	int i;
10314 
10315 	for (i = 0; i < words; i++) {
10316 		switch (offset + i) {
10317 		case NVM_OFF_MACADDR:
10318 		case NVM_OFF_MACADDR1:
10319 		case NVM_OFF_MACADDR2:
10320 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
10321 			if (rv != 0) {
10322 				data[i] = 0xffff;
10323 				rv = -1;
10324 			}
10325 			break;
10326 		case NVM_OFF_CFG2:
10327 			rv = wm_nvm_read_word_invm(sc, offset, data);
10328 			if (rv != 0) {
10329 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
10330 				rv = 0;
10331 			}
10332 			break;
10333 		case NVM_OFF_CFG4:
10334 			rv = wm_nvm_read_word_invm(sc, offset, data);
10335 			if (rv != 0) {
10336 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
10337 				rv = 0;
10338 			}
10339 			break;
10340 		case NVM_OFF_LED_1_CFG:
10341 			rv = wm_nvm_read_word_invm(sc, offset, data);
10342 			if (rv != 0) {
10343 				*data = NVM_LED_1_CFG_DEFAULT_I211;
10344 				rv = 0;
10345 			}
10346 			break;
10347 		case NVM_OFF_LED_0_2_CFG:
10348 			rv = wm_nvm_read_word_invm(sc, offset, data);
10349 			if (rv != 0) {
10350 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
10351 				rv = 0;
10352 			}
10353 			break;
10354 		case NVM_OFF_ID_LED_SETTINGS:
10355 			rv = wm_nvm_read_word_invm(sc, offset, data);
10356 			if (rv != 0) {
10357 				*data = ID_LED_RESERVED_FFFF;
10358 				rv = 0;
10359 			}
10360 			break;
10361 		default:
10362 			DPRINTF(WM_DEBUG_NVM,
10363 			    ("NVM word 0x%02x is not mapped.\n", offset));
10364 			*data = NVM_RESERVED_WORD;
10365 			break;
10366 		}
10367 	}
10368 
10369 	return rv;
10370 }
10371 
10372 /* Lock, detecting NVM type, validate checksum, version and read */
10373 
10374 /*
10375  * wm_nvm_acquire:
10376  *
10377  *	Perform the EEPROM handshake required on some chips.
10378  */
10379 static int
10380 wm_nvm_acquire(struct wm_softc *sc)
10381 {
10382 	uint32_t reg;
10383 	int x;
10384 	int ret = 0;
10385 
10386 	/* always success */
10387 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
10388 		return 0;
10389 
10390 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
10391 		ret = wm_get_swfwhw_semaphore(sc);
10392 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
10393 		/* This will also do wm_get_swsm_semaphore() if needed */
10394 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
10395 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
10396 		ret = wm_get_swsm_semaphore(sc);
10397 	}
10398 
10399 	if (ret) {
10400 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10401 			__func__);
10402 		return 1;
10403 	}
10404 
10405 	if (sc->sc_flags & WM_F_LOCK_EECD) {
10406 		reg = CSR_READ(sc, WMREG_EECD);
10407 
10408 		/* Request EEPROM access. */
10409 		reg |= EECD_EE_REQ;
10410 		CSR_WRITE(sc, WMREG_EECD, reg);
10411 
10412 		/* ..and wait for it to be granted. */
10413 		for (x = 0; x < 1000; x++) {
10414 			reg = CSR_READ(sc, WMREG_EECD);
10415 			if (reg & EECD_EE_GNT)
10416 				break;
10417 			delay(5);
10418 		}
10419 		if ((reg & EECD_EE_GNT) == 0) {
10420 			aprint_error_dev(sc->sc_dev,
10421 			    "could not acquire EEPROM GNT\n");
10422 			reg &= ~EECD_EE_REQ;
10423 			CSR_WRITE(sc, WMREG_EECD, reg);
10424 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10425 				wm_put_swfwhw_semaphore(sc);
10426 			if (sc->sc_flags & WM_F_LOCK_SWFW)
10427 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10428 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
10429 				wm_put_swsm_semaphore(sc);
10430 			return 1;
10431 		}
10432 	}
10433 
10434 	return 0;
10435 }
10436 
10437 /*
10438  * wm_nvm_release:
10439  *
10440  *	Release the EEPROM mutex.
10441  */
10442 static void
10443 wm_nvm_release(struct wm_softc *sc)
10444 {
10445 	uint32_t reg;
10446 
10447 	/* always success */
10448 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
10449 		return;
10450 
10451 	if (sc->sc_flags & WM_F_LOCK_EECD) {
10452 		reg = CSR_READ(sc, WMREG_EECD);
10453 		reg &= ~EECD_EE_REQ;
10454 		CSR_WRITE(sc, WMREG_EECD, reg);
10455 	}
10456 
10457 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10458 		wm_put_swfwhw_semaphore(sc);
10459 	if (sc->sc_flags & WM_F_LOCK_SWFW)
10460 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10461 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
10462 		wm_put_swsm_semaphore(sc);
10463 }
10464 
10465 static int
10466 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
10467 {
10468 	uint32_t eecd = 0;
10469 
10470 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
10471 	    || sc->sc_type == WM_T_82583) {
10472 		eecd = CSR_READ(sc, WMREG_EECD);
10473 
10474 		/* Isolate bits 15 & 16 */
10475 		eecd = ((eecd >> 15) & 0x03);
10476 
10477 		/* If both bits are set, device is Flash type */
10478 		if (eecd == 0x03)
10479 			return 0;
10480 	}
10481 	return 1;
10482 }
10483 
10484 static int
10485 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
10486 {
10487 	uint32_t eec;
10488 
10489 	eec = CSR_READ(sc, WMREG_EEC);
10490 	if ((eec & EEC_FLASH_DETECTED) != 0)
10491 		return 1;
10492 
10493 	return 0;
10494 }
10495 
10496 /*
10497  * wm_nvm_validate_checksum
10498  *
10499  * The checksum is defined as the sum of the first 64 (16 bit) words.
10500  */
10501 static int
10502 wm_nvm_validate_checksum(struct wm_softc *sc)
10503 {
10504 	uint16_t checksum;
10505 	uint16_t eeprom_data;
10506 #ifdef WM_DEBUG
10507 	uint16_t csum_wordaddr, valid_checksum;
10508 #endif
10509 	int i;
10510 
10511 	checksum = 0;
10512 
10513 	/* Don't check for I211 */
10514 	if (sc->sc_type == WM_T_I211)
10515 		return 0;
10516 
10517 #ifdef WM_DEBUG
10518 	if (sc->sc_type == WM_T_PCH_LPT) {
10519 		csum_wordaddr = NVM_OFF_COMPAT;
10520 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
10521 	} else {
10522 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
10523 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
10524 	}
10525 
10526 	/* Dump EEPROM image for debug */
10527 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10528 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10529 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
10530 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
10531 		if ((eeprom_data & valid_checksum) == 0) {
10532 			DPRINTF(WM_DEBUG_NVM,
10533 			    ("%s: NVM need to be updated (%04x != %04x)\n",
10534 				device_xname(sc->sc_dev), eeprom_data,
10535 				    valid_checksum));
10536 		}
10537 	}
10538 
10539 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
10540 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
10541 		for (i = 0; i < NVM_SIZE; i++) {
10542 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
10543 				printf("XXXX ");
10544 			else
10545 				printf("%04hx ", eeprom_data);
10546 			if (i % 8 == 7)
10547 				printf("\n");
10548 		}
10549 	}
10550 
10551 #endif /* WM_DEBUG */
10552 
10553 	for (i = 0; i < NVM_SIZE; i++) {
10554 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
10555 			return 1;
10556 		checksum += eeprom_data;
10557 	}
10558 
10559 	if (checksum != (uint16_t) NVM_CHECKSUM) {
10560 #ifdef WM_DEBUG
10561 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
10562 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
10563 #endif
10564 	}
10565 
10566 	return 0;
10567 }
10568 
10569 static void
10570 wm_nvm_version_invm(struct wm_softc *sc)
10571 {
10572 	uint32_t dword;
10573 
10574 	/*
10575 	 * Linux's code to decode version is very strange, so we don't
10576 	 * obey that algorithm and just use word 61 as the document.
10577 	 * Perhaps it's not perfect though...
10578 	 *
10579 	 * Example:
10580 	 *
10581 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
10582 	 */
10583 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
10584 	dword = __SHIFTOUT(dword, INVM_VER_1);
10585 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
10586 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
10587 }
10588 
10589 static void
10590 wm_nvm_version(struct wm_softc *sc)
10591 {
10592 	uint16_t major, minor, build, patch;
10593 	uint16_t uid0, uid1;
10594 	uint16_t nvm_data;
10595 	uint16_t off;
10596 	bool check_version = false;
10597 	bool check_optionrom = false;
10598 	bool have_build = false;
10599 
10600 	/*
10601 	 * Version format:
10602 	 *
10603 	 * XYYZ
10604 	 * X0YZ
10605 	 * X0YY
10606 	 *
10607 	 * Example:
10608 	 *
10609 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
10610 	 *	82571	0x50a6	5.10.6?
10611 	 *	82572	0x506a	5.6.10?
10612 	 *	82572EI	0x5069	5.6.9?
10613 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
10614 	 *		0x2013	2.1.3?
10615 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
10616 	 */
10617 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
10618 	switch (sc->sc_type) {
10619 	case WM_T_82571:
10620 	case WM_T_82572:
10621 	case WM_T_82574:
10622 	case WM_T_82583:
10623 		check_version = true;
10624 		check_optionrom = true;
10625 		have_build = true;
10626 		break;
10627 	case WM_T_82575:
10628 	case WM_T_82576:
10629 	case WM_T_82580:
10630 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
10631 			check_version = true;
10632 		break;
10633 	case WM_T_I211:
10634 		wm_nvm_version_invm(sc);
10635 		goto printver;
10636 	case WM_T_I210:
10637 		if (!wm_nvm_get_flash_presence_i210(sc)) {
10638 			wm_nvm_version_invm(sc);
10639 			goto printver;
10640 		}
10641 		/* FALLTHROUGH */
10642 	case WM_T_I350:
10643 	case WM_T_I354:
10644 		check_version = true;
10645 		check_optionrom = true;
10646 		break;
10647 	default:
10648 		return;
10649 	}
10650 	if (check_version) {
10651 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
10652 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
10653 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
10654 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
10655 			build = nvm_data & NVM_BUILD_MASK;
10656 			have_build = true;
10657 		} else
10658 			minor = nvm_data & 0x00ff;
10659 
10660 		/* Decimal */
10661 		minor = (minor / 16) * 10 + (minor % 16);
10662 		sc->sc_nvm_ver_major = major;
10663 		sc->sc_nvm_ver_minor = minor;
10664 
10665 printver:
10666 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
10667 		    sc->sc_nvm_ver_minor);
10668 		if (have_build) {
10669 			sc->sc_nvm_ver_build = build;
10670 			aprint_verbose(".%d", build);
10671 		}
10672 	}
10673 	if (check_optionrom) {
10674 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
10675 		/* Option ROM Version */
10676 		if ((off != 0x0000) && (off != 0xffff)) {
10677 			off += NVM_COMBO_VER_OFF;
10678 			wm_nvm_read(sc, off + 1, 1, &uid1);
10679 			wm_nvm_read(sc, off, 1, &uid0);
10680 			if ((uid0 != 0) && (uid0 != 0xffff)
10681 			    && (uid1 != 0) && (uid1 != 0xffff)) {
10682 				/* 16bits */
10683 				major = uid0 >> 8;
10684 				build = (uid0 << 8) | (uid1 >> 8);
10685 				patch = uid1 & 0x00ff;
10686 				aprint_verbose(", option ROM Version %d.%d.%d",
10687 				    major, build, patch);
10688 			}
10689 		}
10690 	}
10691 
10692 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
10693 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
10694 }
10695 
10696 /*
10697  * wm_nvm_read:
10698  *
10699  *	Read data from the serial EEPROM.
10700  */
10701 static int
10702 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10703 {
10704 	int rv;
10705 
10706 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
10707 		return 1;
10708 
10709 	if (wm_nvm_acquire(sc))
10710 		return 1;
10711 
10712 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10713 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10714 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
10715 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
10716 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
10717 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
10718 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
10719 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
10720 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
10721 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
10722 	else
10723 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
10724 
10725 	wm_nvm_release(sc);
10726 	return rv;
10727 }
10728 
10729 /*
10730  * Hardware semaphores.
10731  * Very complexed...
10732  */
10733 
10734 static int
10735 wm_get_swsm_semaphore(struct wm_softc *sc)
10736 {
10737 	int32_t timeout;
10738 	uint32_t swsm;
10739 
10740 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
10741 		/* Get the SW semaphore. */
10742 		timeout = sc->sc_nvm_wordsize + 1;
10743 		while (timeout) {
10744 			swsm = CSR_READ(sc, WMREG_SWSM);
10745 
10746 			if ((swsm & SWSM_SMBI) == 0)
10747 				break;
10748 
10749 			delay(50);
10750 			timeout--;
10751 		}
10752 
10753 		if (timeout == 0) {
10754 			aprint_error_dev(sc->sc_dev,
10755 			    "could not acquire SWSM SMBI\n");
10756 			return 1;
10757 		}
10758 	}
10759 
10760 	/* Get the FW semaphore. */
10761 	timeout = sc->sc_nvm_wordsize + 1;
10762 	while (timeout) {
10763 		swsm = CSR_READ(sc, WMREG_SWSM);
10764 		swsm |= SWSM_SWESMBI;
10765 		CSR_WRITE(sc, WMREG_SWSM, swsm);
10766 		/* If we managed to set the bit we got the semaphore. */
10767 		swsm = CSR_READ(sc, WMREG_SWSM);
10768 		if (swsm & SWSM_SWESMBI)
10769 			break;
10770 
10771 		delay(50);
10772 		timeout--;
10773 	}
10774 
10775 	if (timeout == 0) {
10776 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
10777 		/* Release semaphores */
10778 		wm_put_swsm_semaphore(sc);
10779 		return 1;
10780 	}
10781 	return 0;
10782 }
10783 
10784 static void
10785 wm_put_swsm_semaphore(struct wm_softc *sc)
10786 {
10787 	uint32_t swsm;
10788 
10789 	swsm = CSR_READ(sc, WMREG_SWSM);
10790 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
10791 	CSR_WRITE(sc, WMREG_SWSM, swsm);
10792 }
10793 
10794 static int
10795 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
10796 {
10797 	uint32_t swfw_sync;
10798 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
10799 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
10800 	int timeout = 200;
10801 
10802 	for (timeout = 0; timeout < 200; timeout++) {
10803 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
10804 			if (wm_get_swsm_semaphore(sc)) {
10805 				aprint_error_dev(sc->sc_dev,
10806 				    "%s: failed to get semaphore\n",
10807 				    __func__);
10808 				return 1;
10809 			}
10810 		}
10811 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
10812 		if ((swfw_sync & (swmask | fwmask)) == 0) {
10813 			swfw_sync |= swmask;
10814 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
10815 			if (sc->sc_flags & WM_F_LOCK_SWSM)
10816 				wm_put_swsm_semaphore(sc);
10817 			return 0;
10818 		}
10819 		if (sc->sc_flags & WM_F_LOCK_SWSM)
10820 			wm_put_swsm_semaphore(sc);
10821 		delay(5000);
10822 	}
10823 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
10824 	    device_xname(sc->sc_dev), mask, swfw_sync);
10825 	return 1;
10826 }
10827 
10828 static void
10829 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
10830 {
10831 	uint32_t swfw_sync;
10832 
10833 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
10834 		while (wm_get_swsm_semaphore(sc) != 0)
10835 			continue;
10836 	}
10837 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
10838 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
10839 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
10840 	if (sc->sc_flags & WM_F_LOCK_SWSM)
10841 		wm_put_swsm_semaphore(sc);
10842 }
10843 
10844 static int
10845 wm_get_swfwhw_semaphore(struct wm_softc *sc)
10846 {
10847 	uint32_t ext_ctrl;
10848 	int timeout = 200;
10849 
10850 	for (timeout = 0; timeout < 200; timeout++) {
10851 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10852 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
10853 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10854 
10855 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10856 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
10857 			return 0;
10858 		delay(5000);
10859 	}
10860 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
10861 	    device_xname(sc->sc_dev), ext_ctrl);
10862 	return 1;
10863 }
10864 
10865 static void
10866 wm_put_swfwhw_semaphore(struct wm_softc *sc)
10867 {
10868 	uint32_t ext_ctrl;
10869 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10870 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10871 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10872 }
10873 
10874 static int
10875 wm_get_hw_semaphore_82573(struct wm_softc *sc)
10876 {
10877 	int i = 0;
10878 	uint32_t reg;
10879 
10880 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10881 	do {
10882 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
10883 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
10884 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10885 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
10886 			break;
10887 		delay(2*1000);
10888 		i++;
10889 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
10890 
10891 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
10892 		wm_put_hw_semaphore_82573(sc);
10893 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
10894 		    device_xname(sc->sc_dev));
10895 		return -1;
10896 	}
10897 
10898 	return 0;
10899 }
10900 
10901 static void
10902 wm_put_hw_semaphore_82573(struct wm_softc *sc)
10903 {
10904 	uint32_t reg;
10905 
10906 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10907 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10908 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
10909 }
10910 
10911 /*
10912  * Management mode and power management related subroutines.
10913  * BMC, AMT, suspend/resume and EEE.
10914  */
10915 
10916 static int
10917 wm_check_mng_mode(struct wm_softc *sc)
10918 {
10919 	int rv;
10920 
10921 	switch (sc->sc_type) {
10922 	case WM_T_ICH8:
10923 	case WM_T_ICH9:
10924 	case WM_T_ICH10:
10925 	case WM_T_PCH:
10926 	case WM_T_PCH2:
10927 	case WM_T_PCH_LPT:
10928 		rv = wm_check_mng_mode_ich8lan(sc);
10929 		break;
10930 	case WM_T_82574:
10931 	case WM_T_82583:
10932 		rv = wm_check_mng_mode_82574(sc);
10933 		break;
10934 	case WM_T_82571:
10935 	case WM_T_82572:
10936 	case WM_T_82573:
10937 	case WM_T_80003:
10938 		rv = wm_check_mng_mode_generic(sc);
10939 		break;
10940 	default:
10941 		/* noting to do */
10942 		rv = 0;
10943 		break;
10944 	}
10945 
10946 	return rv;
10947 }
10948 
10949 static int
10950 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
10951 {
10952 	uint32_t fwsm;
10953 
10954 	fwsm = CSR_READ(sc, WMREG_FWSM);
10955 
10956 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
10957 		return 1;
10958 
10959 	return 0;
10960 }
10961 
10962 static int
10963 wm_check_mng_mode_82574(struct wm_softc *sc)
10964 {
10965 	uint16_t data;
10966 
10967 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
10968 
10969 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
10970 		return 1;
10971 
10972 	return 0;
10973 }
10974 
10975 static int
10976 wm_check_mng_mode_generic(struct wm_softc *sc)
10977 {
10978 	uint32_t fwsm;
10979 
10980 	fwsm = CSR_READ(sc, WMREG_FWSM);
10981 
10982 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
10983 		return 1;
10984 
10985 	return 0;
10986 }
10987 
10988 static int
10989 wm_enable_mng_pass_thru(struct wm_softc *sc)
10990 {
10991 	uint32_t manc, fwsm, factps;
10992 
10993 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
10994 		return 0;
10995 
10996 	manc = CSR_READ(sc, WMREG_MANC);
10997 
10998 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
10999 		device_xname(sc->sc_dev), manc));
11000 	if ((manc & MANC_RECV_TCO_EN) == 0)
11001 		return 0;
11002 
11003 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
11004 		fwsm = CSR_READ(sc, WMREG_FWSM);
11005 		factps = CSR_READ(sc, WMREG_FACTPS);
11006 		if (((factps & FACTPS_MNGCG) == 0)
11007 		    && ((fwsm & FWSM_MODE_MASK)
11008 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
11009 			return 1;
11010 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11011 		uint16_t data;
11012 
11013 		factps = CSR_READ(sc, WMREG_FACTPS);
11014 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11015 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
11016 			device_xname(sc->sc_dev), factps, data));
11017 		if (((factps & FACTPS_MNGCG) == 0)
11018 		    && ((data & NVM_CFG2_MNGM_MASK)
11019 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
11020 			return 1;
11021 	} else if (((manc & MANC_SMBUS_EN) != 0)
11022 	    && ((manc & MANC_ASF_EN) == 0))
11023 		return 1;
11024 
11025 	return 0;
11026 }
11027 
11028 static int
11029 wm_check_reset_block(struct wm_softc *sc)
11030 {
11031 	uint32_t reg;
11032 
11033 	switch (sc->sc_type) {
11034 	case WM_T_ICH8:
11035 	case WM_T_ICH9:
11036 	case WM_T_ICH10:
11037 	case WM_T_PCH:
11038 	case WM_T_PCH2:
11039 	case WM_T_PCH_LPT:
11040 		reg = CSR_READ(sc, WMREG_FWSM);
11041 		if ((reg & FWSM_RSPCIPHY) != 0)
11042 			return 0;
11043 		else
11044 			return -1;
11045 		break;
11046 	case WM_T_82571:
11047 	case WM_T_82572:
11048 	case WM_T_82573:
11049 	case WM_T_82574:
11050 	case WM_T_82583:
11051 	case WM_T_80003:
11052 		reg = CSR_READ(sc, WMREG_MANC);
11053 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
11054 			return -1;
11055 		else
11056 			return 0;
11057 		break;
11058 	default:
11059 		/* no problem */
11060 		break;
11061 	}
11062 
11063 	return 0;
11064 }
11065 
11066 static void
11067 wm_get_hw_control(struct wm_softc *sc)
11068 {
11069 	uint32_t reg;
11070 
11071 	switch (sc->sc_type) {
11072 	case WM_T_82573:
11073 		reg = CSR_READ(sc, WMREG_SWSM);
11074 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
11075 		break;
11076 	case WM_T_82571:
11077 	case WM_T_82572:
11078 	case WM_T_82574:
11079 	case WM_T_82583:
11080 	case WM_T_80003:
11081 	case WM_T_ICH8:
11082 	case WM_T_ICH9:
11083 	case WM_T_ICH10:
11084 	case WM_T_PCH:
11085 	case WM_T_PCH2:
11086 	case WM_T_PCH_LPT:
11087 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
11088 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
11089 		break;
11090 	default:
11091 		break;
11092 	}
11093 }
11094 
11095 static void
11096 wm_release_hw_control(struct wm_softc *sc)
11097 {
11098 	uint32_t reg;
11099 
11100 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
11101 		return;
11102 
11103 	if (sc->sc_type == WM_T_82573) {
11104 		reg = CSR_READ(sc, WMREG_SWSM);
11105 		reg &= ~SWSM_DRV_LOAD;
11106 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
11107 	} else {
11108 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
11109 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
11110 	}
11111 }
11112 
11113 static void
11114 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
11115 {
11116 	uint32_t reg;
11117 
11118 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11119 
11120 	if (on != 0)
11121 		reg |= EXTCNFCTR_GATE_PHY_CFG;
11122 	else
11123 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
11124 
11125 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
11126 }
11127 
11128 static void
11129 wm_smbustopci(struct wm_softc *sc)
11130 {
11131 	uint32_t fwsm;
11132 
11133 	fwsm = CSR_READ(sc, WMREG_FWSM);
11134 	if (((fwsm & FWSM_FW_VALID) == 0)
11135 	    && ((wm_check_reset_block(sc) == 0))) {
11136 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
11137 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
11138 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11139 		CSR_WRITE_FLUSH(sc);
11140 		delay(10);
11141 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
11142 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11143 		CSR_WRITE_FLUSH(sc);
11144 		delay(50*1000);
11145 
11146 		/*
11147 		 * Gate automatic PHY configuration by hardware on non-managed
11148 		 * 82579
11149 		 */
11150 		if (sc->sc_type == WM_T_PCH2)
11151 			wm_gate_hw_phy_config_ich8lan(sc, 1);
11152 	}
11153 }
11154 
11155 static void
11156 wm_init_manageability(struct wm_softc *sc)
11157 {
11158 
11159 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
11160 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
11161 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
11162 
11163 		/* Disable hardware interception of ARP */
11164 		manc &= ~MANC_ARP_EN;
11165 
11166 		/* Enable receiving management packets to the host */
11167 		if (sc->sc_type >= WM_T_82571) {
11168 			manc |= MANC_EN_MNG2HOST;
11169 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
11170 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
11171 		}
11172 
11173 		CSR_WRITE(sc, WMREG_MANC, manc);
11174 	}
11175 }
11176 
11177 static void
11178 wm_release_manageability(struct wm_softc *sc)
11179 {
11180 
11181 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
11182 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
11183 
11184 		manc |= MANC_ARP_EN;
11185 		if (sc->sc_type >= WM_T_82571)
11186 			manc &= ~MANC_EN_MNG2HOST;
11187 
11188 		CSR_WRITE(sc, WMREG_MANC, manc);
11189 	}
11190 }
11191 
11192 static void
11193 wm_get_wakeup(struct wm_softc *sc)
11194 {
11195 
11196 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
11197 	switch (sc->sc_type) {
11198 	case WM_T_82573:
11199 	case WM_T_82583:
11200 		sc->sc_flags |= WM_F_HAS_AMT;
11201 		/* FALLTHROUGH */
11202 	case WM_T_80003:
11203 	case WM_T_82541:
11204 	case WM_T_82547:
11205 	case WM_T_82571:
11206 	case WM_T_82572:
11207 	case WM_T_82574:
11208 	case WM_T_82575:
11209 	case WM_T_82576:
11210 	case WM_T_82580:
11211 	case WM_T_I350:
11212 	case WM_T_I354:
11213 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
11214 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
11215 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11216 		break;
11217 	case WM_T_ICH8:
11218 	case WM_T_ICH9:
11219 	case WM_T_ICH10:
11220 	case WM_T_PCH:
11221 	case WM_T_PCH2:
11222 	case WM_T_PCH_LPT:
11223 		sc->sc_flags |= WM_F_HAS_AMT;
11224 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11225 		break;
11226 	default:
11227 		break;
11228 	}
11229 
11230 	/* 1: HAS_MANAGE */
11231 	if (wm_enable_mng_pass_thru(sc) != 0)
11232 		sc->sc_flags |= WM_F_HAS_MANAGE;
11233 
11234 #ifdef WM_DEBUG
11235 	printf("\n");
11236 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
11237 		printf("HAS_AMT,");
11238 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
11239 		printf("ARC_SUBSYS_VALID,");
11240 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
11241 		printf("ASF_FIRMWARE_PRES,");
11242 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
11243 		printf("HAS_MANAGE,");
11244 	printf("\n");
11245 #endif
11246 	/*
11247 	 * Note that the WOL flags is set after the resetting of the eeprom
11248 	 * stuff
11249 	 */
11250 }
11251 
11252 #ifdef WM_WOL
11253 /* WOL in the newer chipset interfaces (pchlan) */
11254 static void
11255 wm_enable_phy_wakeup(struct wm_softc *sc)
11256 {
11257 #if 0
11258 	uint16_t preg;
11259 
11260 	/* Copy MAC RARs to PHY RARs */
11261 
11262 	/* Copy MAC MTA to PHY MTA */
11263 
11264 	/* Configure PHY Rx Control register */
11265 
11266 	/* Enable PHY wakeup in MAC register */
11267 
11268 	/* Configure and enable PHY wakeup in PHY registers */
11269 
11270 	/* Activate PHY wakeup */
11271 
11272 	/* XXX */
11273 #endif
11274 }
11275 
11276 /* Power down workaround on D3 */
11277 static void
11278 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
11279 {
11280 	uint32_t reg;
11281 	int i;
11282 
11283 	for (i = 0; i < 2; i++) {
11284 		/* Disable link */
11285 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
11286 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
11287 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11288 
11289 		/*
11290 		 * Call gig speed drop workaround on Gig disable before
11291 		 * accessing any PHY registers
11292 		 */
11293 		if (sc->sc_type == WM_T_ICH8)
11294 			wm_gig_downshift_workaround_ich8lan(sc);
11295 
11296 		/* Write VR power-down enable */
11297 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
11298 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
11299 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
11300 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
11301 
11302 		/* Read it back and test */
11303 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
11304 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
11305 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
11306 			break;
11307 
11308 		/* Issue PHY reset and repeat at most one more time */
11309 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
11310 	}
11311 }
11312 
11313 static void
11314 wm_enable_wakeup(struct wm_softc *sc)
11315 {
11316 	uint32_t reg, pmreg;
11317 	pcireg_t pmode;
11318 
11319 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
11320 		&pmreg, NULL) == 0)
11321 		return;
11322 
11323 	/* Advertise the wakeup capability */
11324 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
11325 	    | CTRL_SWDPIN(3));
11326 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
11327 
11328 	/* ICH workaround */
11329 	switch (sc->sc_type) {
11330 	case WM_T_ICH8:
11331 	case WM_T_ICH9:
11332 	case WM_T_ICH10:
11333 	case WM_T_PCH:
11334 	case WM_T_PCH2:
11335 	case WM_T_PCH_LPT:
11336 		/* Disable gig during WOL */
11337 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
11338 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
11339 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11340 		if (sc->sc_type == WM_T_PCH)
11341 			wm_gmii_reset(sc);
11342 
11343 		/* Power down workaround */
11344 		if (sc->sc_phytype == WMPHY_82577) {
11345 			struct mii_softc *child;
11346 
11347 			/* Assume that the PHY is copper */
11348 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
11349 			if (child->mii_mpd_rev <= 2)
11350 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
11351 				    (768 << 5) | 25, 0x0444); /* magic num */
11352 		}
11353 		break;
11354 	default:
11355 		break;
11356 	}
11357 
11358 	/* Keep the laser running on fiber adapters */
11359 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
11360 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
11361 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
11362 		reg |= CTRL_EXT_SWDPIN(3);
11363 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11364 	}
11365 
11366 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
11367 #if 0	/* for the multicast packet */
11368 	reg |= WUFC_MC;
11369 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
11370 #endif
11371 
11372 	if (sc->sc_type == WM_T_PCH) {
11373 		wm_enable_phy_wakeup(sc);
11374 	} else {
11375 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
11376 		CSR_WRITE(sc, WMREG_WUFC, reg);
11377 	}
11378 
11379 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11380 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11381 		|| (sc->sc_type == WM_T_PCH2))
11382 		    && (sc->sc_phytype == WMPHY_IGP_3))
11383 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
11384 
11385 	/* Request PME */
11386 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
11387 #if 0
11388 	/* Disable WOL */
11389 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
11390 #else
11391 	/* For WOL */
11392 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
11393 #endif
11394 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
11395 }
11396 #endif /* WM_WOL */
11397 
11398 /* EEE */
11399 
11400 static void
11401 wm_set_eee_i350(struct wm_softc *sc)
11402 {
11403 	uint32_t ipcnfg, eeer;
11404 
11405 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
11406 	eeer = CSR_READ(sc, WMREG_EEER);
11407 
11408 	if ((sc->sc_flags & WM_F_EEE) != 0) {
11409 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
11410 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
11411 		    | EEER_LPI_FC);
11412 	} else {
11413 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
11414 		ipcnfg &= ~IPCNFG_10BASE_TE;
11415 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
11416 		    | EEER_LPI_FC);
11417 	}
11418 
11419 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
11420 	CSR_WRITE(sc, WMREG_EEER, eeer);
11421 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
11422 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
11423 }
11424 
11425 /*
11426  * Workarounds (mainly PHY related).
11427  * Basically, PHY's workarounds are in the PHY drivers.
11428  */
11429 
11430 /* Work-around for 82566 Kumeran PCS lock loss */
11431 static void
11432 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
11433 {
11434 	int miistatus, active, i;
11435 	int reg;
11436 
11437 	miistatus = sc->sc_mii.mii_media_status;
11438 
11439 	/* If the link is not up, do nothing */
11440 	if ((miistatus & IFM_ACTIVE) != 0)
11441 		return;
11442 
11443 	active = sc->sc_mii.mii_media_active;
11444 
11445 	/* Nothing to do if the link is other than 1Gbps */
11446 	if (IFM_SUBTYPE(active) != IFM_1000_T)
11447 		return;
11448 
11449 	for (i = 0; i < 10; i++) {
11450 		/* read twice */
11451 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
11452 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
11453 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
11454 			goto out;	/* GOOD! */
11455 
11456 		/* Reset the PHY */
11457 		wm_gmii_reset(sc);
11458 		delay(5*1000);
11459 	}
11460 
11461 	/* Disable GigE link negotiation */
11462 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
11463 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
11464 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11465 
11466 	/*
11467 	 * Call gig speed drop workaround on Gig disable before accessing
11468 	 * any PHY registers.
11469 	 */
11470 	wm_gig_downshift_workaround_ich8lan(sc);
11471 
11472 out:
11473 	return;
11474 }
11475 
11476 /* WOL from S5 stops working */
11477 static void
11478 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
11479 {
11480 	uint16_t kmrn_reg;
11481 
11482 	/* Only for igp3 */
11483 	if (sc->sc_phytype == WMPHY_IGP_3) {
11484 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
11485 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
11486 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
11487 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
11488 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
11489 	}
11490 }
11491 
11492 /*
11493  * Workaround for pch's PHYs
11494  * XXX should be moved to new PHY driver?
11495  */
11496 static void
11497 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
11498 {
11499 	if (sc->sc_phytype == WMPHY_82577)
11500 		wm_set_mdio_slow_mode_hv(sc);
11501 
11502 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
11503 
11504 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
11505 
11506 	/* 82578 */
11507 	if (sc->sc_phytype == WMPHY_82578) {
11508 		/* PCH rev. < 3 */
11509 		if (sc->sc_rev < 3) {
11510 			/* XXX 6 bit shift? Why? Is it page2? */
11511 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
11512 			    0x66c0);
11513 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
11514 			    0xffff);
11515 		}
11516 
11517 		/* XXX phy rev. < 2 */
11518 	}
11519 
11520 	/* Select page 0 */
11521 
11522 	/* XXX acquire semaphore */
11523 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
11524 	/* XXX release semaphore */
11525 
11526 	/*
11527 	 * Configure the K1 Si workaround during phy reset assuming there is
11528 	 * link so that it disables K1 if link is in 1Gbps.
11529 	 */
11530 	wm_k1_gig_workaround_hv(sc, 1);
11531 }
11532 
11533 static void
11534 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
11535 {
11536 
11537 	wm_set_mdio_slow_mode_hv(sc);
11538 }
11539 
11540 static void
11541 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
11542 {
11543 	int k1_enable = sc->sc_nvm_k1_enabled;
11544 
11545 	/* XXX acquire semaphore */
11546 
11547 	if (link) {
11548 		k1_enable = 0;
11549 
11550 		/* Link stall fix for link up */
11551 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
11552 	} else {
11553 		/* Link stall fix for link down */
11554 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
11555 	}
11556 
11557 	wm_configure_k1_ich8lan(sc, k1_enable);
11558 
11559 	/* XXX release semaphore */
11560 }
11561 
11562 static void
11563 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
11564 {
11565 	uint32_t reg;
11566 
11567 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
11568 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
11569 	    reg | HV_KMRN_MDIO_SLOW);
11570 }
11571 
11572 static void
11573 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
11574 {
11575 	uint32_t ctrl, ctrl_ext, tmp;
11576 	uint16_t kmrn_reg;
11577 
11578 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
11579 
11580 	if (k1_enable)
11581 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
11582 	else
11583 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
11584 
11585 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
11586 
11587 	delay(20);
11588 
11589 	ctrl = CSR_READ(sc, WMREG_CTRL);
11590 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11591 
11592 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
11593 	tmp |= CTRL_FRCSPD;
11594 
11595 	CSR_WRITE(sc, WMREG_CTRL, tmp);
11596 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
11597 	CSR_WRITE_FLUSH(sc);
11598 	delay(20);
11599 
11600 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
11601 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11602 	CSR_WRITE_FLUSH(sc);
11603 	delay(20);
11604 }
11605 
11606 /* special case - for 82575 - need to do manual init ... */
11607 static void
11608 wm_reset_init_script_82575(struct wm_softc *sc)
11609 {
11610 	/*
11611 	 * remark: this is untested code - we have no board without EEPROM
11612 	 *  same setup as mentioned int the FreeBSD driver for the i82575
11613 	 */
11614 
11615 	/* SerDes configuration via SERDESCTRL */
11616 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
11617 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
11618 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
11619 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
11620 
11621 	/* CCM configuration via CCMCTL register */
11622 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
11623 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
11624 
11625 	/* PCIe lanes configuration */
11626 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
11627 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
11628 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
11629 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
11630 
11631 	/* PCIe PLL Configuration */
11632 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
11633 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
11634 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
11635 }
11636 
11637 static void
11638 wm_reset_mdicnfg_82580(struct wm_softc *sc)
11639 {
11640 	uint32_t reg;
11641 	uint16_t nvmword;
11642 	int rv;
11643 
11644 	if ((sc->sc_flags & WM_F_SGMII) == 0)
11645 		return;
11646 
11647 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
11648 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
11649 	if (rv != 0) {
11650 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
11651 		    __func__);
11652 		return;
11653 	}
11654 
11655 	reg = CSR_READ(sc, WMREG_MDICNFG);
11656 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
11657 		reg |= MDICNFG_DEST;
11658 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
11659 		reg |= MDICNFG_COM_MDIO;
11660 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
11661 }
11662 
11663 /*
11664  * I210 Errata 25 and I211 Errata 10
11665  * Slow System Clock.
11666  */
11667 static void
11668 wm_pll_workaround_i210(struct wm_softc *sc)
11669 {
11670 	uint32_t mdicnfg, wuc;
11671 	uint32_t reg;
11672 	pcireg_t pcireg;
11673 	uint32_t pmreg;
11674 	uint16_t nvmword, tmp_nvmword;
11675 	int phyval;
11676 	bool wa_done = false;
11677 	int i;
11678 
11679 	/* Save WUC and MDICNFG registers */
11680 	wuc = CSR_READ(sc, WMREG_WUC);
11681 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
11682 
11683 	reg = mdicnfg & ~MDICNFG_DEST;
11684 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
11685 
11686 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
11687 		nvmword = INVM_DEFAULT_AL;
11688 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
11689 
11690 	/* Get Power Management cap offset */
11691 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
11692 		&pmreg, NULL) == 0)
11693 		return;
11694 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
11695 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
11696 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
11697 
11698 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
11699 			break; /* OK */
11700 		}
11701 
11702 		wa_done = true;
11703 		/* Directly reset the internal PHY */
11704 		reg = CSR_READ(sc, WMREG_CTRL);
11705 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
11706 
11707 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
11708 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
11709 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11710 
11711 		CSR_WRITE(sc, WMREG_WUC, 0);
11712 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
11713 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
11714 
11715 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
11716 		    pmreg + PCI_PMCSR);
11717 		pcireg |= PCI_PMCSR_STATE_D3;
11718 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
11719 		    pmreg + PCI_PMCSR, pcireg);
11720 		delay(1000);
11721 		pcireg &= ~PCI_PMCSR_STATE_D3;
11722 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
11723 		    pmreg + PCI_PMCSR, pcireg);
11724 
11725 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
11726 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
11727 
11728 		/* Restore WUC register */
11729 		CSR_WRITE(sc, WMREG_WUC, wuc);
11730 	}
11731 
11732 	/* Restore MDICNFG setting */
11733 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
11734 	if (wa_done)
11735 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
11736 }
11737