xref: /netbsd-src/sys/dev/pci/if_wm.c (revision 53b02e147d4ed531c0d2a5ca9b3e8026ba3e99b5)
1 /*	$NetBSD: if_wm.c,v 1.725 2021/12/23 17:05:49 hannken Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Check XXX'ed comments
76  *	- TX Multi queue improvement (refine queue selection logic)
77  *	- Split header buffer for newer descriptors
78  *	- EEE (Energy Efficiency Ethernet) for I354
79  *	- Virtual Function
80  *	- Set LED correctly (based on contents in EEPROM)
81  *	- Rework how parameters are loaded from the EEPROM.
82  */
83 
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.725 2021/12/23 17:05:49 hannken Exp $");
86 
87 #ifdef _KERNEL_OPT
88 #include "opt_net_mpsafe.h"
89 #include "opt_if_wm.h"
90 #endif
91 
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/callout.h>
95 #include <sys/mbuf.h>
96 #include <sys/malloc.h>
97 #include <sys/kmem.h>
98 #include <sys/kernel.h>
99 #include <sys/socket.h>
100 #include <sys/ioctl.h>
101 #include <sys/errno.h>
102 #include <sys/device.h>
103 #include <sys/queue.h>
104 #include <sys/syslog.h>
105 #include <sys/interrupt.h>
106 #include <sys/cpu.h>
107 #include <sys/pcq.h>
108 #include <sys/sysctl.h>
109 #include <sys/workqueue.h>
110 #include <sys/atomic.h>
111 
112 #include <sys/rndsource.h>
113 
114 #include <net/if.h>
115 #include <net/if_dl.h>
116 #include <net/if_media.h>
117 #include <net/if_ether.h>
118 
119 #include <net/bpf.h>
120 
121 #include <net/rss_config.h>
122 
123 #include <netinet/in.h>			/* XXX for struct ip */
124 #include <netinet/in_systm.h>		/* XXX for struct ip */
125 #include <netinet/ip.h>			/* XXX for struct ip */
126 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
127 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
128 
129 #include <sys/bus.h>
130 #include <sys/intr.h>
131 #include <machine/endian.h>
132 
133 #include <dev/mii/mii.h>
134 #include <dev/mii/mdio.h>
135 #include <dev/mii/miivar.h>
136 #include <dev/mii/miidevs.h>
137 #include <dev/mii/mii_bitbang.h>
138 #include <dev/mii/ikphyreg.h>
139 #include <dev/mii/igphyreg.h>
140 #include <dev/mii/igphyvar.h>
141 #include <dev/mii/inbmphyreg.h>
142 #include <dev/mii/ihphyreg.h>
143 #include <dev/mii/makphyreg.h>
144 
145 #include <dev/pci/pcireg.h>
146 #include <dev/pci/pcivar.h>
147 #include <dev/pci/pcidevs.h>
148 
149 #include <dev/pci/if_wmreg.h>
150 #include <dev/pci/if_wmvar.h>
151 
152 #ifdef WM_DEBUG
153 #define	WM_DEBUG_LINK		__BIT(0)
154 #define	WM_DEBUG_TX		__BIT(1)
155 #define	WM_DEBUG_RX		__BIT(2)
156 #define	WM_DEBUG_GMII		__BIT(3)
157 #define	WM_DEBUG_MANAGE		__BIT(4)
158 #define	WM_DEBUG_NVM		__BIT(5)
159 #define	WM_DEBUG_INIT		__BIT(6)
160 #define	WM_DEBUG_LOCK		__BIT(7)
161 
162 #if 0
163 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
164 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
165 	WM_DEBUG_LOCK
166 #endif
167 
168 #define	DPRINTF(sc, x, y)			  \
169 	do {					  \
170 		if ((sc)->sc_debug & (x))	  \
171 			printf y;		  \
172 	} while (0)
173 #else
174 #define	DPRINTF(sc, x, y)	__nothing
175 #endif /* WM_DEBUG */
176 
177 #ifdef NET_MPSAFE
178 #define WM_MPSAFE	1
179 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
180 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
181 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
182 #else
183 #define WM_CALLOUT_FLAGS	0
184 #define WM_SOFTINT_FLAGS	0
185 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
186 #endif
187 
188 #define WM_WORKQUEUE_PRI PRI_SOFTNET
189 
190 /*
191  * This device driver's max interrupt numbers.
192  */
193 #define WM_MAX_NQUEUEINTR	16
194 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
195 
196 #ifndef WM_DISABLE_MSI
197 #define	WM_DISABLE_MSI 0
198 #endif
199 #ifndef WM_DISABLE_MSIX
200 #define	WM_DISABLE_MSIX 0
201 #endif
202 
203 int wm_disable_msi = WM_DISABLE_MSI;
204 int wm_disable_msix = WM_DISABLE_MSIX;
205 
206 #ifndef WM_WATCHDOG_TIMEOUT
207 #define WM_WATCHDOG_TIMEOUT 5
208 #endif
209 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
210 
211 /*
212  * Transmit descriptor list size.  Due to errata, we can only have
213  * 256 hardware descriptors in the ring on < 82544, but we use 4096
214  * on >= 82544. We tell the upper layers that they can queue a lot
215  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
216  * of them at a time.
217  *
218  * We allow up to 64 DMA segments per packet.  Pathological packet
219  * chains containing many small mbufs have been observed in zero-copy
220  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
221  * m_defrag() is called to reduce it.
222  */
223 #define	WM_NTXSEGS		64
224 #define	WM_IFQUEUELEN		256
225 #define	WM_TXQUEUELEN_MAX	64
226 #define	WM_TXQUEUELEN_MAX_82547	16
227 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
228 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
229 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
230 #define	WM_NTXDESC_82542	256
231 #define	WM_NTXDESC_82544	4096
232 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
233 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
234 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
235 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
236 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
237 
238 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
239 
240 #define	WM_TXINTERQSIZE		256
241 
242 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
243 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
244 #endif
245 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
246 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
247 #endif
248 
249 /*
250  * Receive descriptor list size.  We have one Rx buffer for normal
251  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
252  * packet.  We allocate 256 receive descriptors, each with a 2k
253  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
254  */
255 #define	WM_NRXDESC		256U
256 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
257 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
258 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
259 
260 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
261 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
262 #endif
263 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
264 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
265 #endif
266 
267 typedef union txdescs {
268 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
269 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
270 } txdescs_t;
271 
272 typedef union rxdescs {
273 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
274 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
275 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
276 } rxdescs_t;
277 
278 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
279 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
280 
281 /*
282  * Software state for transmit jobs.
283  */
284 struct wm_txsoft {
285 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
286 	bus_dmamap_t txs_dmamap;	/* our DMA map */
287 	int txs_firstdesc;		/* first descriptor in packet */
288 	int txs_lastdesc;		/* last descriptor in packet */
289 	int txs_ndesc;			/* # of descriptors used */
290 };
291 
292 /*
293  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
294  * buffer and a DMA map. For packets which fill more than one buffer, we chain
295  * them together.
296  */
297 struct wm_rxsoft {
298 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
299 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
300 };
301 
302 #define WM_LINKUP_TIMEOUT	50
303 
304 static uint16_t swfwphysem[] = {
305 	SWFW_PHY0_SM,
306 	SWFW_PHY1_SM,
307 	SWFW_PHY2_SM,
308 	SWFW_PHY3_SM
309 };
310 
311 static const uint32_t wm_82580_rxpbs_table[] = {
312 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
313 };
314 
315 struct wm_softc;
316 
317 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
318 #if !defined(WM_EVENT_COUNTERS)
319 #define WM_EVENT_COUNTERS 1
320 #endif
321 #endif
322 
323 #ifdef WM_EVENT_COUNTERS
324 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
325 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
326 	struct evcnt qname##_ev_##evname;
327 
328 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
329 	do {								\
330 		snprintf((q)->qname##_##evname##_evcnt_name,		\
331 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
332 		    "%s%02d%s", #qname, (qnum), #evname);		\
333 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
334 		    (evtype), NULL, (xname),				\
335 		    (q)->qname##_##evname##_evcnt_name);		\
336 	} while (0)
337 
338 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
339 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
340 
341 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
342 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
343 
344 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
345 	evcnt_detach(&(q)->qname##_ev_##evname);
346 #endif /* WM_EVENT_COUNTERS */
347 
348 struct wm_txqueue {
349 	kmutex_t *txq_lock;		/* lock for tx operations */
350 
351 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
352 
353 	/* Software state for the transmit descriptors. */
354 	int txq_num;			/* must be a power of two */
355 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
356 
357 	/* TX control data structures. */
358 	int txq_ndesc;			/* must be a power of two */
359 	size_t txq_descsize;		/* a tx descriptor size */
360 	txdescs_t *txq_descs_u;
361 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
362 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
363 	int txq_desc_rseg;		/* real number of control segment */
364 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
365 #define	txq_descs	txq_descs_u->sctxu_txdescs
366 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
367 
368 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
369 
370 	int txq_free;			/* number of free Tx descriptors */
371 	int txq_next;			/* next ready Tx descriptor */
372 
373 	int txq_sfree;			/* number of free Tx jobs */
374 	int txq_snext;			/* next free Tx job */
375 	int txq_sdirty;			/* dirty Tx jobs */
376 
377 	/* These 4 variables are used only on the 82547. */
378 	int txq_fifo_size;		/* Tx FIFO size */
379 	int txq_fifo_head;		/* current head of FIFO */
380 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
381 	int txq_fifo_stall;		/* Tx FIFO is stalled */
382 
383 	/*
384 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
385 	 * CPUs. This queue intermediate them without block.
386 	 */
387 	pcq_t *txq_interq;
388 
389 	/*
390 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
391 	 * to manage Tx H/W queue's busy flag.
392 	 */
393 	int txq_flags;			/* flags for H/W queue, see below */
394 #define	WM_TXQ_NO_SPACE		0x1
395 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
396 
397 	bool txq_stopping;
398 
399 	bool txq_sending;
400 	time_t txq_lastsent;
401 
402 	/* Checksum flags used for previous packet */
403 	uint32_t	txq_last_hw_cmd;
404 	uint8_t		txq_last_hw_fields;
405 	uint16_t	txq_last_hw_ipcs;
406 	uint16_t	txq_last_hw_tucs;
407 
408 	uint32_t txq_packets;		/* for AIM */
409 	uint32_t txq_bytes;		/* for AIM */
410 #ifdef WM_EVENT_COUNTERS
411 	/* TX event counters */
412 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
413 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
414 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
415 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
416 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
417 					    /* XXX not used? */
418 
419 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
420 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
421 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
422 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
423 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
424 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
425 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
426 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
427 					    /* other than toomanyseg */
428 
429 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
430 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
431 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
432 	WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
433 
434 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
435 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
436 #endif /* WM_EVENT_COUNTERS */
437 };
438 
439 struct wm_rxqueue {
440 	kmutex_t *rxq_lock;		/* lock for rx operations */
441 
442 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
443 
444 	/* Software state for the receive descriptors. */
445 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
446 
447 	/* RX control data structures. */
448 	int rxq_ndesc;			/* must be a power of two */
449 	size_t rxq_descsize;		/* a rx descriptor size */
450 	rxdescs_t *rxq_descs_u;
451 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
452 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
453 	int rxq_desc_rseg;		/* real number of control segment */
454 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
455 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
456 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
457 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
458 
459 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
460 
461 	int rxq_ptr;			/* next ready Rx desc/queue ent */
462 	int rxq_discard;
463 	int rxq_len;
464 	struct mbuf *rxq_head;
465 	struct mbuf *rxq_tail;
466 	struct mbuf **rxq_tailp;
467 
468 	bool rxq_stopping;
469 
470 	uint32_t rxq_packets;		/* for AIM */
471 	uint32_t rxq_bytes;		/* for AIM */
472 #ifdef WM_EVENT_COUNTERS
473 	/* RX event counters */
474 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
475 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
476 
477 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
478 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
479 #endif
480 };
481 
482 struct wm_queue {
483 	int wmq_id;			/* index of TX/RX queues */
484 	int wmq_intr_idx;		/* index of MSI-X tables */
485 
486 	uint32_t wmq_itr;		/* interrupt interval per queue. */
487 	bool wmq_set_itr;
488 
489 	struct wm_txqueue wmq_txq;
490 	struct wm_rxqueue wmq_rxq;
491 	char sysctlname[32];		/* Name for sysctl */
492 
493 	bool wmq_txrx_use_workqueue;
494 	struct work wmq_cookie;
495 	void *wmq_si;
496 };
497 
498 struct wm_phyop {
499 	int (*acquire)(struct wm_softc *);
500 	void (*release)(struct wm_softc *);
501 	int (*readreg_locked)(device_t, int, int, uint16_t *);
502 	int (*writereg_locked)(device_t, int, int, uint16_t);
503 	int reset_delay_us;
504 	bool no_errprint;
505 };
506 
507 struct wm_nvmop {
508 	int (*acquire)(struct wm_softc *);
509 	void (*release)(struct wm_softc *);
510 	int (*read)(struct wm_softc *, int, int, uint16_t *);
511 };
512 
513 /*
514  * Software state per device.
515  */
516 struct wm_softc {
517 	device_t sc_dev;		/* generic device information */
518 	bus_space_tag_t sc_st;		/* bus space tag */
519 	bus_space_handle_t sc_sh;	/* bus space handle */
520 	bus_size_t sc_ss;		/* bus space size */
521 	bus_space_tag_t sc_iot;		/* I/O space tag */
522 	bus_space_handle_t sc_ioh;	/* I/O space handle */
523 	bus_size_t sc_ios;		/* I/O space size */
524 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
525 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
526 	bus_size_t sc_flashs;		/* flash registers space size */
527 	off_t sc_flashreg_offset;	/*
528 					 * offset to flash registers from
529 					 * start of BAR
530 					 */
531 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
532 
533 	struct ethercom sc_ethercom;	/* ethernet common data */
534 	struct mii_data sc_mii;		/* MII/media information */
535 
536 	pci_chipset_tag_t sc_pc;
537 	pcitag_t sc_pcitag;
538 	int sc_bus_speed;		/* PCI/PCIX bus speed */
539 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
540 
541 	uint16_t sc_pcidevid;		/* PCI device ID */
542 	wm_chip_type sc_type;		/* MAC type */
543 	int sc_rev;			/* MAC revision */
544 	wm_phy_type sc_phytype;		/* PHY type */
545 	uint8_t sc_sfptype;		/* SFP type */
546 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
547 #define	WM_MEDIATYPE_UNKNOWN		0x00
548 #define	WM_MEDIATYPE_FIBER		0x01
549 #define	WM_MEDIATYPE_COPPER		0x02
550 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
551 	int sc_funcid;			/* unit number of the chip (0 to 3) */
552 	int sc_flags;			/* flags; see below */
553 	u_short sc_if_flags;		/* last if_flags */
554 	int sc_ec_capenable;		/* last ec_capenable */
555 	int sc_flowflags;		/* 802.3x flow control flags */
556 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
557 	int sc_align_tweak;
558 
559 	void *sc_ihs[WM_MAX_NINTR];	/*
560 					 * interrupt cookie.
561 					 * - legacy and msi use sc_ihs[0] only
562 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
563 					 */
564 	pci_intr_handle_t *sc_intrs;	/*
565 					 * legacy and msi use sc_intrs[0] only
566 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
567 					 */
568 	int sc_nintrs;			/* number of interrupts */
569 
570 	int sc_link_intr_idx;		/* index of MSI-X tables */
571 
572 	callout_t sc_tick_ch;		/* tick callout */
573 	bool sc_core_stopping;
574 
575 	int sc_nvm_ver_major;
576 	int sc_nvm_ver_minor;
577 	int sc_nvm_ver_build;
578 	int sc_nvm_addrbits;		/* NVM address bits */
579 	unsigned int sc_nvm_wordsize;	/* NVM word size */
580 	int sc_ich8_flash_base;
581 	int sc_ich8_flash_bank_size;
582 	int sc_nvm_k1_enabled;
583 
584 	int sc_nqueues;
585 	struct wm_queue *sc_queue;
586 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
587 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
588 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
589 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
590 	struct workqueue *sc_queue_wq;
591 	bool sc_txrx_use_workqueue;
592 
593 	int sc_affinity_offset;
594 
595 #ifdef WM_EVENT_COUNTERS
596 	/* Event counters. */
597 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
598 
599 	/* WM_T_82542_2_1 only */
600 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
601 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
602 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
603 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
604 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
605 #endif /* WM_EVENT_COUNTERS */
606 
607 	struct sysctllog *sc_sysctllog;
608 
609 	/* This variable are used only on the 82547. */
610 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
611 
612 	uint32_t sc_ctrl;		/* prototype CTRL register */
613 #if 0
614 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
615 #endif
616 	uint32_t sc_icr;		/* prototype interrupt bits */
617 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
618 	uint32_t sc_tctl;		/* prototype TCTL register */
619 	uint32_t sc_rctl;		/* prototype RCTL register */
620 	uint32_t sc_txcw;		/* prototype TXCW register */
621 	uint32_t sc_tipg;		/* prototype TIPG register */
622 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
623 	uint32_t sc_pba;		/* prototype PBA register */
624 
625 	int sc_tbi_linkup;		/* TBI link status */
626 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
627 	int sc_tbi_serdes_ticks;	/* tbi ticks */
628 
629 	int sc_mchash_type;		/* multicast filter offset */
630 
631 	krndsource_t rnd_source;	/* random source */
632 
633 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
634 
635 	kmutex_t *sc_core_lock;		/* lock for softc operations */
636 	kmutex_t *sc_ich_phymtx;	/*
637 					 * 82574/82583/ICH/PCH specific PHY
638 					 * mutex. For 82574/82583, the mutex
639 					 * is used for both PHY and NVM.
640 					 */
641 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
642 
643 	struct wm_phyop phy;
644 	struct wm_nvmop nvm;
645 #ifdef WM_DEBUG
646 	uint32_t sc_debug;
647 #endif
648 };
649 
650 #define WM_CORE_LOCK(_sc)						\
651 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
652 #define WM_CORE_UNLOCK(_sc)						\
653 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
654 #define WM_CORE_LOCKED(_sc)						\
655 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
656 
657 #define	WM_RXCHAIN_RESET(rxq)						\
658 do {									\
659 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
660 	*(rxq)->rxq_tailp = NULL;					\
661 	(rxq)->rxq_len = 0;						\
662 } while (/*CONSTCOND*/0)
663 
664 #define	WM_RXCHAIN_LINK(rxq, m)						\
665 do {									\
666 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
667 	(rxq)->rxq_tailp = &(m)->m_next;				\
668 } while (/*CONSTCOND*/0)
669 
670 #ifdef WM_EVENT_COUNTERS
671 #ifdef __HAVE_ATOMIC64_LOADSTORE
672 #define	WM_EVCNT_INCR(ev)						\
673 	atomic_store_relaxed(&((ev)->ev_count),				\
674 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
675 #define	WM_EVCNT_ADD(ev, val)						\
676 	atomic_store_relaxed(&((ev)->ev_count),				\
677 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
678 #else
679 #define	WM_EVCNT_INCR(ev)						\
680 	((ev)->ev_count)++
681 #define	WM_EVCNT_ADD(ev, val)						\
682 	(ev)->ev_count += (val)
683 #endif
684 
685 #define WM_Q_EVCNT_INCR(qname, evname)			\
686 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
687 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
688 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
689 #else /* !WM_EVENT_COUNTERS */
690 #define	WM_EVCNT_INCR(ev)	/* nothing */
691 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
692 
693 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
694 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
695 #endif /* !WM_EVENT_COUNTERS */
696 
697 #define	CSR_READ(sc, reg)						\
698 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
699 #define	CSR_WRITE(sc, reg, val)						\
700 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
701 #define	CSR_WRITE_FLUSH(sc)						\
702 	(void)CSR_READ((sc), WMREG_STATUS)
703 
704 #define ICH8_FLASH_READ32(sc, reg)					\
705 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
706 	    (reg) + sc->sc_flashreg_offset)
707 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
708 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
709 	    (reg) + sc->sc_flashreg_offset, (data))
710 
711 #define ICH8_FLASH_READ16(sc, reg)					\
712 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
713 	    (reg) + sc->sc_flashreg_offset)
714 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
715 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
716 	    (reg) + sc->sc_flashreg_offset, (data))
717 
718 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
719 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
720 
721 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
722 #define	WM_CDTXADDR_HI(txq, x)						\
723 	(sizeof(bus_addr_t) == 8 ?					\
724 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
725 
726 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
727 #define	WM_CDRXADDR_HI(rxq, x)						\
728 	(sizeof(bus_addr_t) == 8 ?					\
729 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
730 
731 /*
732  * Register read/write functions.
733  * Other than CSR_{READ|WRITE}().
734  */
735 #if 0
736 static inline uint32_t wm_io_read(struct wm_softc *, int);
737 #endif
738 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
739 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
740     uint32_t, uint32_t);
741 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
742 
743 /*
744  * Descriptor sync/init functions.
745  */
746 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
747 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
748 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
749 
750 /*
751  * Device driver interface functions and commonly used functions.
752  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
753  */
754 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
755 static int	wm_match(device_t, cfdata_t, void *);
756 static void	wm_attach(device_t, device_t, void *);
757 static int	wm_detach(device_t, int);
758 static bool	wm_suspend(device_t, const pmf_qual_t *);
759 static bool	wm_resume(device_t, const pmf_qual_t *);
760 static void	wm_watchdog(struct ifnet *);
761 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
762     uint16_t *);
763 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
764     uint16_t *);
765 static void	wm_tick(void *);
766 static int	wm_ifflags_cb(struct ethercom *);
767 static int	wm_ioctl(struct ifnet *, u_long, void *);
768 /* MAC address related */
769 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
770 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
771 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
772 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
773 static int	wm_rar_count(struct wm_softc *);
774 static void	wm_set_filter(struct wm_softc *);
775 /* Reset and init related */
776 static void	wm_set_vlan(struct wm_softc *);
777 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
778 static void	wm_get_auto_rd_done(struct wm_softc *);
779 static void	wm_lan_init_done(struct wm_softc *);
780 static void	wm_get_cfg_done(struct wm_softc *);
781 static int	wm_phy_post_reset(struct wm_softc *);
782 static int	wm_write_smbus_addr(struct wm_softc *);
783 static int	wm_init_lcd_from_nvm(struct wm_softc *);
784 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
785 static void	wm_initialize_hardware_bits(struct wm_softc *);
786 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
787 static int	wm_reset_phy(struct wm_softc *);
788 static void	wm_flush_desc_rings(struct wm_softc *);
789 static void	wm_reset(struct wm_softc *);
790 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
791 static void	wm_rxdrain(struct wm_rxqueue *);
792 static void	wm_init_rss(struct wm_softc *);
793 static void	wm_adjust_qnum(struct wm_softc *, int);
794 static inline bool	wm_is_using_msix(struct wm_softc *);
795 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
796 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
797 static int	wm_setup_legacy(struct wm_softc *);
798 static int	wm_setup_msix(struct wm_softc *);
799 static int	wm_init(struct ifnet *);
800 static int	wm_init_locked(struct ifnet *);
801 static void	wm_init_sysctls(struct wm_softc *);
802 static void	wm_unset_stopping_flags(struct wm_softc *);
803 static void	wm_set_stopping_flags(struct wm_softc *);
804 static void	wm_stop(struct ifnet *, int);
805 static void	wm_stop_locked(struct ifnet *, bool, bool);
806 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
807 static void	wm_82547_txfifo_stall(void *);
808 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
809 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
810 /* DMA related */
811 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
812 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
813 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
814 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
815     struct wm_txqueue *);
816 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
817 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
818 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
819     struct wm_rxqueue *);
820 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
821 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
822 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
823 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
824 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
825 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
826 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
827     struct wm_txqueue *);
828 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
829     struct wm_rxqueue *);
830 static int	wm_alloc_txrx_queues(struct wm_softc *);
831 static void	wm_free_txrx_queues(struct wm_softc *);
832 static int	wm_init_txrx_queues(struct wm_softc *);
833 /* Start */
834 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
835     struct wm_txsoft *, uint32_t *, uint8_t *);
836 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
837 static void	wm_start(struct ifnet *);
838 static void	wm_start_locked(struct ifnet *);
839 static int	wm_transmit(struct ifnet *, struct mbuf *);
840 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
841 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
842 		    bool);
843 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
844     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
845 static void	wm_nq_start(struct ifnet *);
846 static void	wm_nq_start_locked(struct ifnet *);
847 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
848 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
849 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
850 		    bool);
851 static void	wm_deferred_start_locked(struct wm_txqueue *);
852 static void	wm_handle_queue(void *);
853 static void	wm_handle_queue_work(struct work *, void *);
854 /* Interrupt */
855 static bool	wm_txeof(struct wm_txqueue *, u_int);
856 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
857 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
858 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
859 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
860 static void	wm_linkintr(struct wm_softc *, uint32_t);
861 static int	wm_intr_legacy(void *);
862 static inline void	wm_txrxintr_disable(struct wm_queue *);
863 static inline void	wm_txrxintr_enable(struct wm_queue *);
864 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
865 static int	wm_txrxintr_msix(void *);
866 static int	wm_linkintr_msix(void *);
867 
868 /*
869  * Media related.
870  * GMII, SGMII, TBI, SERDES and SFP.
871  */
872 /* Common */
873 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
874 /* GMII related */
875 static void	wm_gmii_reset(struct wm_softc *);
876 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
877 static int	wm_get_phy_id_82575(struct wm_softc *);
878 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
879 static int	wm_gmii_mediachange(struct ifnet *);
880 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
881 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
882 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
883 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
884 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
885 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
886 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
887 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
888 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
889 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
890 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
891 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
892 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
893 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
894 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
895 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
896 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
897 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
898 	bool);
899 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
900 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
901 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
902 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
903 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
904 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
905 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
906 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
907 static void	wm_gmii_statchg(struct ifnet *);
908 /*
909  * kumeran related (80003, ICH* and PCH*).
910  * These functions are not for accessing MII registers but for accessing
911  * kumeran specific registers.
912  */
913 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
914 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
915 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
916 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
917 /* EMI register related */
918 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
919 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
920 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
921 /* SGMII */
922 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
923 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
924 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
925 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
926 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
927 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
928 /* TBI related */
929 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
930 static void	wm_tbi_mediainit(struct wm_softc *);
931 static int	wm_tbi_mediachange(struct ifnet *);
932 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
933 static int	wm_check_for_link(struct wm_softc *);
934 static void	wm_tbi_tick(struct wm_softc *);
935 /* SERDES related */
936 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
937 static int	wm_serdes_mediachange(struct ifnet *);
938 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
939 static void	wm_serdes_tick(struct wm_softc *);
940 /* SFP related */
941 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
942 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
943 
944 /*
945  * NVM related.
946  * Microwire, SPI (w/wo EERD) and Flash.
947  */
948 /* Misc functions */
949 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
950 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
951 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
952 /* Microwire */
953 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
954 /* SPI */
955 static int	wm_nvm_ready_spi(struct wm_softc *);
956 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
957 /* Using with EERD */
958 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
959 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
960 /* Flash */
961 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
962     unsigned int *);
963 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
964 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
965 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
966     uint32_t *);
967 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
968 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
969 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
970 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
971 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
972 /* iNVM */
973 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
974 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
975 /* Lock, detecting NVM type, validate checksum and read */
976 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
977 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
978 static int	wm_nvm_validate_checksum(struct wm_softc *);
979 static void	wm_nvm_version_invm(struct wm_softc *);
980 static void	wm_nvm_version(struct wm_softc *);
981 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
982 
983 /*
984  * Hardware semaphores.
985  * Very complexed...
986  */
987 static int	wm_get_null(struct wm_softc *);
988 static void	wm_put_null(struct wm_softc *);
989 static int	wm_get_eecd(struct wm_softc *);
990 static void	wm_put_eecd(struct wm_softc *);
991 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
992 static void	wm_put_swsm_semaphore(struct wm_softc *);
993 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
994 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
995 static int	wm_get_nvm_80003(struct wm_softc *);
996 static void	wm_put_nvm_80003(struct wm_softc *);
997 static int	wm_get_nvm_82571(struct wm_softc *);
998 static void	wm_put_nvm_82571(struct wm_softc *);
999 static int	wm_get_phy_82575(struct wm_softc *);
1000 static void	wm_put_phy_82575(struct wm_softc *);
1001 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
1002 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
1003 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
1004 static void	wm_put_swflag_ich8lan(struct wm_softc *);
1005 static int	wm_get_nvm_ich8lan(struct wm_softc *);
1006 static void	wm_put_nvm_ich8lan(struct wm_softc *);
1007 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
1008 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
1009 
1010 /*
1011  * Management mode and power management related subroutines.
1012  * BMC, AMT, suspend/resume and EEE.
1013  */
1014 #if 0
1015 static int	wm_check_mng_mode(struct wm_softc *);
1016 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
1017 static int	wm_check_mng_mode_82574(struct wm_softc *);
1018 static int	wm_check_mng_mode_generic(struct wm_softc *);
1019 #endif
1020 static int	wm_enable_mng_pass_thru(struct wm_softc *);
1021 static bool	wm_phy_resetisblocked(struct wm_softc *);
1022 static void	wm_get_hw_control(struct wm_softc *);
1023 static void	wm_release_hw_control(struct wm_softc *);
1024 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
1025 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
1026 static void	wm_init_manageability(struct wm_softc *);
1027 static void	wm_release_manageability(struct wm_softc *);
1028 static void	wm_get_wakeup(struct wm_softc *);
1029 static int	wm_ulp_disable(struct wm_softc *);
1030 static int	wm_enable_phy_wakeup(struct wm_softc *);
1031 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
1032 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
1033 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
1034 static void	wm_enable_wakeup(struct wm_softc *);
1035 static void	wm_disable_aspm(struct wm_softc *);
1036 /* LPLU (Low Power Link Up) */
1037 static void	wm_lplu_d0_disable(struct wm_softc *);
1038 /* EEE */
1039 static int	wm_set_eee_i350(struct wm_softc *);
1040 static int	wm_set_eee_pchlan(struct wm_softc *);
1041 static int	wm_set_eee(struct wm_softc *);
1042 
1043 /*
1044  * Workarounds (mainly PHY related).
1045  * Basically, PHY's workarounds are in the PHY drivers.
1046  */
1047 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
1048 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
1049 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
1050 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
1051 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
1052 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
1053 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
1054 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
1055 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
1056 static int	wm_k1_workaround_lv(struct wm_softc *);
1057 static int	wm_link_stall_workaround_hv(struct wm_softc *);
1058 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
1059 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
1060 static void	wm_reset_init_script_82575(struct wm_softc *);
1061 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
1062 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
1063 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
1064 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
1065 static int	wm_pll_workaround_i210(struct wm_softc *);
1066 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
1067 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
1068 static void	wm_set_linkdown_discard(struct wm_softc *);
1069 static void	wm_clear_linkdown_discard(struct wm_softc *);
1070 
1071 static int	wm_sysctl_tdh_handler(SYSCTLFN_PROTO);
1072 static int	wm_sysctl_tdt_handler(SYSCTLFN_PROTO);
1073 #ifdef WM_DEBUG
1074 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
1075 #endif
1076 
1077 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
1078     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
1079 
1080 /*
1081  * Devices supported by this driver.
1082  */
1083 static const struct wm_product {
1084 	pci_vendor_id_t		wmp_vendor;
1085 	pci_product_id_t	wmp_product;
1086 	const char		*wmp_name;
1087 	wm_chip_type		wmp_type;
1088 	uint32_t		wmp_flags;
1089 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
1090 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
1091 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
1092 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
1093 #define WMP_MEDIATYPE(x)	((x) & 0x03)
1094 } wm_products[] = {
1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
1096 	  "Intel i82542 1000BASE-X Ethernet",
1097 	  WM_T_82542_2_1,	WMP_F_FIBER },
1098 
1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
1100 	  "Intel i82543GC 1000BASE-X Ethernet",
1101 	  WM_T_82543,		WMP_F_FIBER },
1102 
1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
1104 	  "Intel i82543GC 1000BASE-T Ethernet",
1105 	  WM_T_82543,		WMP_F_COPPER },
1106 
1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
1108 	  "Intel i82544EI 1000BASE-T Ethernet",
1109 	  WM_T_82544,		WMP_F_COPPER },
1110 
1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
1112 	  "Intel i82544EI 1000BASE-X Ethernet",
1113 	  WM_T_82544,		WMP_F_FIBER },
1114 
1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
1116 	  "Intel i82544GC 1000BASE-T Ethernet",
1117 	  WM_T_82544,		WMP_F_COPPER },
1118 
1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
1120 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
1121 	  WM_T_82544,		WMP_F_COPPER },
1122 
1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
1124 	  "Intel i82540EM 1000BASE-T Ethernet",
1125 	  WM_T_82540,		WMP_F_COPPER },
1126 
1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
1128 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
1129 	  WM_T_82540,		WMP_F_COPPER },
1130 
1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
1132 	  "Intel i82540EP 1000BASE-T Ethernet",
1133 	  WM_T_82540,		WMP_F_COPPER },
1134 
1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
1136 	  "Intel i82540EP 1000BASE-T Ethernet",
1137 	  WM_T_82540,		WMP_F_COPPER },
1138 
1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
1140 	  "Intel i82540EP 1000BASE-T Ethernet",
1141 	  WM_T_82540,		WMP_F_COPPER },
1142 
1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
1144 	  "Intel i82545EM 1000BASE-T Ethernet",
1145 	  WM_T_82545,		WMP_F_COPPER },
1146 
1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
1148 	  "Intel i82545GM 1000BASE-T Ethernet",
1149 	  WM_T_82545_3,		WMP_F_COPPER },
1150 
1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
1152 	  "Intel i82545GM 1000BASE-X Ethernet",
1153 	  WM_T_82545_3,		WMP_F_FIBER },
1154 
1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
1156 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
1157 	  WM_T_82545_3,		WMP_F_SERDES },
1158 
1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
1160 	  "Intel i82546EB 1000BASE-T Ethernet",
1161 	  WM_T_82546,		WMP_F_COPPER },
1162 
1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
1164 	  "Intel i82546EB 1000BASE-T Ethernet",
1165 	  WM_T_82546,		WMP_F_COPPER },
1166 
1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
1168 	  "Intel i82545EM 1000BASE-X Ethernet",
1169 	  WM_T_82545,		WMP_F_FIBER },
1170 
1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
1172 	  "Intel i82546EB 1000BASE-X Ethernet",
1173 	  WM_T_82546,		WMP_F_FIBER },
1174 
1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
1176 	  "Intel i82546GB 1000BASE-T Ethernet",
1177 	  WM_T_82546_3,		WMP_F_COPPER },
1178 
1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
1180 	  "Intel i82546GB 1000BASE-X Ethernet",
1181 	  WM_T_82546_3,		WMP_F_FIBER },
1182 
1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
1184 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
1185 	  WM_T_82546_3,		WMP_F_SERDES },
1186 
1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1188 	  "i82546GB quad-port Gigabit Ethernet",
1189 	  WM_T_82546_3,		WMP_F_COPPER },
1190 
1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1192 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
1193 	  WM_T_82546_3,		WMP_F_COPPER },
1194 
1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
1196 	  "Intel PRO/1000MT (82546GB)",
1197 	  WM_T_82546_3,		WMP_F_COPPER },
1198 
1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
1200 	  "Intel i82541EI 1000BASE-T Ethernet",
1201 	  WM_T_82541,		WMP_F_COPPER },
1202 
1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
1204 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1205 	  WM_T_82541,		WMP_F_COPPER },
1206 
1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
1208 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
1209 	  WM_T_82541,		WMP_F_COPPER },
1210 
1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
1212 	  "Intel i82541ER 1000BASE-T Ethernet",
1213 	  WM_T_82541_2,		WMP_F_COPPER },
1214 
1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
1216 	  "Intel i82541GI 1000BASE-T Ethernet",
1217 	  WM_T_82541_2,		WMP_F_COPPER },
1218 
1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
1220 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
1221 	  WM_T_82541_2,		WMP_F_COPPER },
1222 
1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
1224 	  "Intel i82541PI 1000BASE-T Ethernet",
1225 	  WM_T_82541_2,		WMP_F_COPPER },
1226 
1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
1228 	  "Intel i82547EI 1000BASE-T Ethernet",
1229 	  WM_T_82547,		WMP_F_COPPER },
1230 
1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
1232 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
1233 	  WM_T_82547,		WMP_F_COPPER },
1234 
1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
1236 	  "Intel i82547GI 1000BASE-T Ethernet",
1237 	  WM_T_82547_2,		WMP_F_COPPER },
1238 
1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
1240 	  "Intel PRO/1000 PT (82571EB)",
1241 	  WM_T_82571,		WMP_F_COPPER },
1242 
1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
1244 	  "Intel PRO/1000 PF (82571EB)",
1245 	  WM_T_82571,		WMP_F_FIBER },
1246 
1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
1248 	  "Intel PRO/1000 PB (82571EB)",
1249 	  WM_T_82571,		WMP_F_SERDES },
1250 
1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1252 	  "Intel PRO/1000 QT (82571EB)",
1253 	  WM_T_82571,		WMP_F_COPPER },
1254 
1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1256 	  "Intel PRO/1000 PT Quad Port Server Adapter",
1257 	  WM_T_82571,		WMP_F_COPPER },
1258 
1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1260 	  "Intel Gigabit PT Quad Port Server ExpressModule",
1261 	  WM_T_82571,		WMP_F_COPPER },
1262 
1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1264 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1265 	  WM_T_82571,		WMP_F_SERDES },
1266 
1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1268 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1269 	  WM_T_82571,		WMP_F_SERDES },
1270 
1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1272 	  "Intel 82571EB Quad 1000baseX Ethernet",
1273 	  WM_T_82571,		WMP_F_FIBER },
1274 
1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
1276 	  "Intel i82572EI 1000baseT Ethernet",
1277 	  WM_T_82572,		WMP_F_COPPER },
1278 
1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
1280 	  "Intel i82572EI 1000baseX Ethernet",
1281 	  WM_T_82572,		WMP_F_FIBER },
1282 
1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
1284 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
1285 	  WM_T_82572,		WMP_F_SERDES },
1286 
1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
1288 	  "Intel i82572EI 1000baseT Ethernet",
1289 	  WM_T_82572,		WMP_F_COPPER },
1290 
1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
1292 	  "Intel i82573E",
1293 	  WM_T_82573,		WMP_F_COPPER },
1294 
1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
1296 	  "Intel i82573E IAMT",
1297 	  WM_T_82573,		WMP_F_COPPER },
1298 
1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
1300 	  "Intel i82573L Gigabit Ethernet",
1301 	  WM_T_82573,		WMP_F_COPPER },
1302 
1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
1304 	  "Intel i82574L",
1305 	  WM_T_82574,		WMP_F_COPPER },
1306 
1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
1308 	  "Intel i82574L",
1309 	  WM_T_82574,		WMP_F_COPPER },
1310 
1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
1312 	  "Intel i82583V",
1313 	  WM_T_82583,		WMP_F_COPPER },
1314 
1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1316 	  "i80003 dual 1000baseT Ethernet",
1317 	  WM_T_80003,		WMP_F_COPPER },
1318 
1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1320 	  "i80003 dual 1000baseX Ethernet",
1321 	  WM_T_80003,		WMP_F_COPPER },
1322 
1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1324 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1325 	  WM_T_80003,		WMP_F_SERDES },
1326 
1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1328 	  "Intel i80003 1000baseT Ethernet",
1329 	  WM_T_80003,		WMP_F_COPPER },
1330 
1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1332 	  "Intel i80003 Gigabit Ethernet (SERDES)",
1333 	  WM_T_80003,		WMP_F_SERDES },
1334 
1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
1336 	  "Intel i82801H (M_AMT) LAN Controller",
1337 	  WM_T_ICH8,		WMP_F_COPPER },
1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
1339 	  "Intel i82801H (AMT) LAN Controller",
1340 	  WM_T_ICH8,		WMP_F_COPPER },
1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
1342 	  "Intel i82801H LAN Controller",
1343 	  WM_T_ICH8,		WMP_F_COPPER },
1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1345 	  "Intel i82801H (IFE) 10/100 LAN Controller",
1346 	  WM_T_ICH8,		WMP_F_COPPER },
1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
1348 	  "Intel i82801H (M) LAN Controller",
1349 	  WM_T_ICH8,		WMP_F_COPPER },
1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
1351 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
1352 	  WM_T_ICH8,		WMP_F_COPPER },
1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
1354 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
1355 	  WM_T_ICH8,		WMP_F_COPPER },
1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
1357 	  "82567V-3 LAN Controller",
1358 	  WM_T_ICH8,		WMP_F_COPPER },
1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1360 	  "82801I (AMT) LAN Controller",
1361 	  WM_T_ICH9,		WMP_F_COPPER },
1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
1363 	  "82801I 10/100 LAN Controller",
1364 	  WM_T_ICH9,		WMP_F_COPPER },
1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
1366 	  "82801I (G) 10/100 LAN Controller",
1367 	  WM_T_ICH9,		WMP_F_COPPER },
1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
1369 	  "82801I (GT) 10/100 LAN Controller",
1370 	  WM_T_ICH9,		WMP_F_COPPER },
1371 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
1372 	  "82801I (C) LAN Controller",
1373 	  WM_T_ICH9,		WMP_F_COPPER },
1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
1375 	  "82801I mobile LAN Controller",
1376 	  WM_T_ICH9,		WMP_F_COPPER },
1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1378 	  "82801I mobile (V) LAN Controller",
1379 	  WM_T_ICH9,		WMP_F_COPPER },
1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1381 	  "82801I mobile (AMT) LAN Controller",
1382 	  WM_T_ICH9,		WMP_F_COPPER },
1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
1384 	  "82567LM-4 LAN Controller",
1385 	  WM_T_ICH9,		WMP_F_COPPER },
1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1387 	  "82567LM-2 LAN Controller",
1388 	  WM_T_ICH10,		WMP_F_COPPER },
1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1390 	  "82567LF-2 LAN Controller",
1391 	  WM_T_ICH10,		WMP_F_COPPER },
1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1393 	  "82567LM-3 LAN Controller",
1394 	  WM_T_ICH10,		WMP_F_COPPER },
1395 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1396 	  "82567LF-3 LAN Controller",
1397 	  WM_T_ICH10,		WMP_F_COPPER },
1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
1399 	  "82567V-2 LAN Controller",
1400 	  WM_T_ICH10,		WMP_F_COPPER },
1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
1402 	  "82567V-3? LAN Controller",
1403 	  WM_T_ICH10,		WMP_F_COPPER },
1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
1405 	  "HANKSVILLE LAN Controller",
1406 	  WM_T_ICH10,		WMP_F_COPPER },
1407 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
1408 	  "PCH LAN (82577LM) Controller",
1409 	  WM_T_PCH,		WMP_F_COPPER },
1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
1411 	  "PCH LAN (82577LC) Controller",
1412 	  WM_T_PCH,		WMP_F_COPPER },
1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
1414 	  "PCH LAN (82578DM) Controller",
1415 	  WM_T_PCH,		WMP_F_COPPER },
1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
1417 	  "PCH LAN (82578DC) Controller",
1418 	  WM_T_PCH,		WMP_F_COPPER },
1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
1420 	  "PCH2 LAN (82579LM) Controller",
1421 	  WM_T_PCH2,		WMP_F_COPPER },
1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
1423 	  "PCH2 LAN (82579V) Controller",
1424 	  WM_T_PCH2,		WMP_F_COPPER },
1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
1426 	  "82575EB dual-1000baseT Ethernet",
1427 	  WM_T_82575,		WMP_F_COPPER },
1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1429 	  "82575EB dual-1000baseX Ethernet (SERDES)",
1430 	  WM_T_82575,		WMP_F_SERDES },
1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1432 	  "82575GB quad-1000baseT Ethernet",
1433 	  WM_T_82575,		WMP_F_COPPER },
1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1435 	  "82575GB quad-1000baseT Ethernet (PM)",
1436 	  WM_T_82575,		WMP_F_COPPER },
1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
1438 	  "82576 1000BaseT Ethernet",
1439 	  WM_T_82576,		WMP_F_COPPER },
1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
1441 	  "82576 1000BaseX Ethernet",
1442 	  WM_T_82576,		WMP_F_FIBER },
1443 
1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
1445 	  "82576 gigabit Ethernet (SERDES)",
1446 	  WM_T_82576,		WMP_F_SERDES },
1447 
1448 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1449 	  "82576 quad-1000BaseT Ethernet",
1450 	  WM_T_82576,		WMP_F_COPPER },
1451 
1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1453 	  "82576 Gigabit ET2 Quad Port Server Adapter",
1454 	  WM_T_82576,		WMP_F_COPPER },
1455 
1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
1457 	  "82576 gigabit Ethernet",
1458 	  WM_T_82576,		WMP_F_COPPER },
1459 
1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
1461 	  "82576 gigabit Ethernet (SERDES)",
1462 	  WM_T_82576,		WMP_F_SERDES },
1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1464 	  "82576 quad-gigabit Ethernet (SERDES)",
1465 	  WM_T_82576,		WMP_F_SERDES },
1466 
1467 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
1468 	  "82580 1000BaseT Ethernet",
1469 	  WM_T_82580,		WMP_F_COPPER },
1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
1471 	  "82580 1000BaseX Ethernet",
1472 	  WM_T_82580,		WMP_F_FIBER },
1473 
1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
1475 	  "82580 1000BaseT Ethernet (SERDES)",
1476 	  WM_T_82580,		WMP_F_SERDES },
1477 
1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1479 	  "82580 gigabit Ethernet (SGMII)",
1480 	  WM_T_82580,		WMP_F_COPPER },
1481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1482 	  "82580 dual-1000BaseT Ethernet",
1483 	  WM_T_82580,		WMP_F_COPPER },
1484 
1485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1486 	  "82580 quad-1000BaseX Ethernet",
1487 	  WM_T_82580,		WMP_F_FIBER },
1488 
1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1490 	  "DH89XXCC Gigabit Ethernet (SGMII)",
1491 	  WM_T_82580,		WMP_F_COPPER },
1492 
1493 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1494 	  "DH89XXCC Gigabit Ethernet (SERDES)",
1495 	  WM_T_82580,		WMP_F_SERDES },
1496 
1497 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1498 	  "DH89XXCC 1000BASE-KX Ethernet",
1499 	  WM_T_82580,		WMP_F_SERDES },
1500 
1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1502 	  "DH89XXCC Gigabit Ethernet (SFP)",
1503 	  WM_T_82580,		WMP_F_SERDES },
1504 
1505 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1506 	  "I350 Gigabit Network Connection",
1507 	  WM_T_I350,		WMP_F_COPPER },
1508 
1509 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1510 	  "I350 Gigabit Fiber Network Connection",
1511 	  WM_T_I350,		WMP_F_FIBER },
1512 
1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1514 	  "I350 Gigabit Backplane Connection",
1515 	  WM_T_I350,		WMP_F_SERDES },
1516 
1517 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
1518 	  "I350 Quad Port Gigabit Ethernet",
1519 	  WM_T_I350,		WMP_F_SERDES },
1520 
1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1522 	  "I350 Gigabit Connection",
1523 	  WM_T_I350,		WMP_F_COPPER },
1524 
1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
1526 	  "I354 Gigabit Ethernet (KX)",
1527 	  WM_T_I354,		WMP_F_SERDES },
1528 
1529 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1530 	  "I354 Gigabit Ethernet (SGMII)",
1531 	  WM_T_I354,		WMP_F_COPPER },
1532 
1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
1534 	  "I354 Gigabit Ethernet (2.5G)",
1535 	  WM_T_I354,		WMP_F_COPPER },
1536 
1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1538 	  "I210-T1 Ethernet Server Adapter",
1539 	  WM_T_I210,		WMP_F_COPPER },
1540 
1541 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1542 	  "I210 Ethernet (Copper OEM)",
1543 	  WM_T_I210,		WMP_F_COPPER },
1544 
1545 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1546 	  "I210 Ethernet (Copper IT)",
1547 	  WM_T_I210,		WMP_F_COPPER },
1548 
1549 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1550 	  "I210 Ethernet (Copper, FLASH less)",
1551 	  WM_T_I210,		WMP_F_COPPER },
1552 
1553 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1554 	  "I210 Gigabit Ethernet (Fiber)",
1555 	  WM_T_I210,		WMP_F_FIBER },
1556 
1557 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1558 	  "I210 Gigabit Ethernet (SERDES)",
1559 	  WM_T_I210,		WMP_F_SERDES },
1560 
1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1562 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
1563 	  WM_T_I210,		WMP_F_SERDES },
1564 
1565 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1566 	  "I210 Gigabit Ethernet (SGMII)",
1567 	  WM_T_I210,		WMP_F_COPPER },
1568 
1569 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
1570 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
1571 	  WM_T_I210,		WMP_F_COPPER },
1572 
1573 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1574 	  "I211 Ethernet (COPPER)",
1575 	  WM_T_I211,		WMP_F_COPPER },
1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1577 	  "I217 V Ethernet Connection",
1578 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1579 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1580 	  "I217 LM Ethernet Connection",
1581 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1582 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1583 	  "I218 V Ethernet Connection",
1584 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1585 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
1586 	  "I218 V Ethernet Connection",
1587 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1588 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
1589 	  "I218 V Ethernet Connection",
1590 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1591 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1592 	  "I218 LM Ethernet Connection",
1593 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1594 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
1595 	  "I218 LM Ethernet Connection",
1596 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1597 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
1598 	  "I218 LM Ethernet Connection",
1599 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1600 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
1601 	  "I219 LM Ethernet Connection",
1602 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1603 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
1604 	  "I219 LM (2) Ethernet Connection",
1605 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1606 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
1607 	  "I219 LM (3) Ethernet Connection",
1608 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1609 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
1610 	  "I219 LM (4) Ethernet Connection",
1611 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1612 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
1613 	  "I219 LM (5) Ethernet Connection",
1614 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1615 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
1616 	  "I219 LM (6) Ethernet Connection",
1617 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1618 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
1619 	  "I219 LM (7) Ethernet Connection",
1620 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1621 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
1622 	  "I219 LM (8) Ethernet Connection",
1623 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1624 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
1625 	  "I219 LM (9) Ethernet Connection",
1626 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1627 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
1628 	  "I219 LM (10) Ethernet Connection",
1629 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1630 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
1631 	  "I219 LM (11) Ethernet Connection",
1632 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1633 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
1634 	  "I219 LM (12) Ethernet Connection",
1635 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1636 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
1637 	  "I219 LM (13) Ethernet Connection",
1638 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1639 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
1640 	  "I219 LM (14) Ethernet Connection",
1641 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1642 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
1643 	  "I219 LM (15) Ethernet Connection",
1644 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1645 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM16,
1646 	  "I219 LM (16) Ethernet Connection",
1647 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1648 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM17,
1649 	  "I219 LM (17) Ethernet Connection",
1650 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1651 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM18,
1652 	  "I219 LM (18) Ethernet Connection",
1653 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1654 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM19,
1655 	  "I219 LM (19) Ethernet Connection",
1656 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1657 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
1658 	  "I219 V Ethernet Connection",
1659 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1660 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
1661 	  "I219 V (2) Ethernet Connection",
1662 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1663 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
1664 	  "I219 V (4) Ethernet Connection",
1665 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1666 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
1667 	  "I219 V (5) Ethernet Connection",
1668 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1669 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
1670 	  "I219 V (6) Ethernet Connection",
1671 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1672 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
1673 	  "I219 V (7) Ethernet Connection",
1674 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1675 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
1676 	  "I219 V (8) Ethernet Connection",
1677 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1678 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
1679 	  "I219 V (9) Ethernet Connection",
1680 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1681 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
1682 	  "I219 V (10) Ethernet Connection",
1683 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1684 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
1685 	  "I219 V (11) Ethernet Connection",
1686 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1687 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
1688 	  "I219 V (12) Ethernet Connection",
1689 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1690 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
1691 	  "I219 V (13) Ethernet Connection",
1692 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1693 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
1694 	  "I219 V (14) Ethernet Connection",
1695 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1696 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V15,
1697 	  "I219 V (15) Ethernet Connection",
1698 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1699 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V16,
1700 	  "I219 V (16) Ethernet Connection",
1701 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1702 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V17,
1703 	  "I219 V (17) Ethernet Connection",
1704 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1705 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V18,
1706 	  "I219 V (18) Ethernet Connection",
1707 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1708 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V19,
1709 	  "I219 V (19) Ethernet Connection",
1710 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1711 	{ 0,			0,
1712 	  NULL,
1713 	  0,			0 },
1714 };
1715 
1716 /*
1717  * Register read/write functions.
1718  * Other than CSR_{READ|WRITE}().
1719  */
1720 
1721 #if 0 /* Not currently used */
1722 static inline uint32_t
1723 wm_io_read(struct wm_softc *sc, int reg)
1724 {
1725 
1726 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1727 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1728 }
1729 #endif
1730 
1731 static inline void
1732 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1733 {
1734 
1735 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1736 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1737 }
1738 
1739 static inline void
1740 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1741     uint32_t data)
1742 {
1743 	uint32_t regval;
1744 	int i;
1745 
1746 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1747 
1748 	CSR_WRITE(sc, reg, regval);
1749 
1750 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1751 		delay(5);
1752 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1753 			break;
1754 	}
1755 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1756 		aprint_error("%s: WARNING:"
1757 		    " i82575 reg 0x%08x setup did not indicate ready\n",
1758 		    device_xname(sc->sc_dev), reg);
1759 	}
1760 }
1761 
1762 static inline void
1763 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1764 {
1765 	wa->wa_low = htole32(BUS_ADDR_LO32(v));
1766 	wa->wa_high = htole32(BUS_ADDR_HI32(v));
1767 }
1768 
1769 /*
1770  * Descriptor sync/init functions.
1771  */
1772 static inline void
1773 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1774 {
1775 	struct wm_softc *sc = txq->txq_sc;
1776 
1777 	/* If it will wrap around, sync to the end of the ring. */
1778 	if ((start + num) > WM_NTXDESC(txq)) {
1779 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1780 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
1781 		    (WM_NTXDESC(txq) - start), ops);
1782 		num -= (WM_NTXDESC(txq) - start);
1783 		start = 0;
1784 	}
1785 
1786 	/* Now sync whatever is left. */
1787 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1788 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1789 }
1790 
1791 static inline void
1792 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1793 {
1794 	struct wm_softc *sc = rxq->rxq_sc;
1795 
1796 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1797 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1798 }
1799 
1800 static inline void
1801 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1802 {
1803 	struct wm_softc *sc = rxq->rxq_sc;
1804 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1805 	struct mbuf *m = rxs->rxs_mbuf;
1806 
1807 	/*
1808 	 * Note: We scoot the packet forward 2 bytes in the buffer
1809 	 * so that the payload after the Ethernet header is aligned
1810 	 * to a 4-byte boundary.
1811 
1812 	 * XXX BRAINDAMAGE ALERT!
1813 	 * The stupid chip uses the same size for every buffer, which
1814 	 * is set in the Receive Control register.  We are using the 2K
1815 	 * size option, but what we REALLY want is (2K - 2)!  For this
1816 	 * reason, we can't "scoot" packets longer than the standard
1817 	 * Ethernet MTU.  On strict-alignment platforms, if the total
1818 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
1819 	 * the upper layer copy the headers.
1820 	 */
1821 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1822 
1823 	if (sc->sc_type == WM_T_82574) {
1824 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1825 		rxd->erx_data.erxd_addr =
1826 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1827 		rxd->erx_data.erxd_dd = 0;
1828 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
1829 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1830 
1831 		rxd->nqrx_data.nrxd_paddr =
1832 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1833 		/* Currently, split header is not supported. */
1834 		rxd->nqrx_data.nrxd_haddr = 0;
1835 	} else {
1836 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1837 
1838 		wm_set_dma_addr(&rxd->wrx_addr,
1839 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1840 		rxd->wrx_len = 0;
1841 		rxd->wrx_cksum = 0;
1842 		rxd->wrx_status = 0;
1843 		rxd->wrx_errors = 0;
1844 		rxd->wrx_special = 0;
1845 	}
1846 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1847 
1848 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1849 }
1850 
1851 /*
1852  * Device driver interface functions and commonly used functions.
1853  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1854  */
1855 
1856 /* Lookup supported device table */
1857 static const struct wm_product *
1858 wm_lookup(const struct pci_attach_args *pa)
1859 {
1860 	const struct wm_product *wmp;
1861 
1862 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1863 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1864 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1865 			return wmp;
1866 	}
1867 	return NULL;
1868 }
1869 
1870 /* The match function (ca_match) */
1871 static int
1872 wm_match(device_t parent, cfdata_t cf, void *aux)
1873 {
1874 	struct pci_attach_args *pa = aux;
1875 
1876 	if (wm_lookup(pa) != NULL)
1877 		return 1;
1878 
1879 	return 0;
1880 }
1881 
1882 /* The attach function (ca_attach) */
1883 static void
1884 wm_attach(device_t parent, device_t self, void *aux)
1885 {
1886 	struct wm_softc *sc = device_private(self);
1887 	struct pci_attach_args *pa = aux;
1888 	prop_dictionary_t dict;
1889 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1890 	pci_chipset_tag_t pc = pa->pa_pc;
1891 	int counts[PCI_INTR_TYPE_SIZE];
1892 	pci_intr_type_t max_type;
1893 	const char *eetype, *xname;
1894 	bus_space_tag_t memt;
1895 	bus_space_handle_t memh;
1896 	bus_size_t memsize;
1897 	int memh_valid;
1898 	int i, error;
1899 	const struct wm_product *wmp;
1900 	prop_data_t ea;
1901 	prop_number_t pn;
1902 	uint8_t enaddr[ETHER_ADDR_LEN];
1903 	char buf[256];
1904 	char wqname[MAXCOMLEN];
1905 	uint16_t cfg1, cfg2, swdpin, nvmword;
1906 	pcireg_t preg, memtype;
1907 	uint16_t eeprom_data, apme_mask;
1908 	bool force_clear_smbi;
1909 	uint32_t link_mode;
1910 	uint32_t reg;
1911 
1912 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
1913 	sc->sc_debug = WM_DEBUG_DEFAULT;
1914 #endif
1915 	sc->sc_dev = self;
1916 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
1917 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
1918 	sc->sc_core_stopping = false;
1919 
1920 	wmp = wm_lookup(pa);
1921 #ifdef DIAGNOSTIC
1922 	if (wmp == NULL) {
1923 		printf("\n");
1924 		panic("wm_attach: impossible");
1925 	}
1926 #endif
1927 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1928 
1929 	sc->sc_pc = pa->pa_pc;
1930 	sc->sc_pcitag = pa->pa_tag;
1931 
1932 	if (pci_dma64_available(pa)) {
1933 		aprint_verbose(", 64-bit DMA");
1934 		sc->sc_dmat = pa->pa_dmat64;
1935 	} else {
1936 		aprint_verbose(", 32-bit DMA");
1937 		sc->sc_dmat = pa->pa_dmat;
1938 	}
1939 
1940 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1941 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1942 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1943 
1944 	sc->sc_type = wmp->wmp_type;
1945 
1946 	/* Set default function pointers */
1947 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
1948 	sc->phy.release = sc->nvm.release = wm_put_null;
1949 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
1950 
1951 	if (sc->sc_type < WM_T_82543) {
1952 		if (sc->sc_rev < 2) {
1953 			aprint_error_dev(sc->sc_dev,
1954 			    "i82542 must be at least rev. 2\n");
1955 			return;
1956 		}
1957 		if (sc->sc_rev < 3)
1958 			sc->sc_type = WM_T_82542_2_0;
1959 	}
1960 
1961 	/*
1962 	 * Disable MSI for Errata:
1963 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1964 	 *
1965 	 *  82544: Errata 25
1966 	 *  82540: Errata  6 (easy to reproduce device timeout)
1967 	 *  82545: Errata  4 (easy to reproduce device timeout)
1968 	 *  82546: Errata 26 (easy to reproduce device timeout)
1969 	 *  82541: Errata  7 (easy to reproduce device timeout)
1970 	 *
1971 	 * "Byte Enables 2 and 3 are not set on MSI writes"
1972 	 *
1973 	 *  82571 & 82572: Errata 63
1974 	 */
1975 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1976 	    || (sc->sc_type == WM_T_82572))
1977 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1978 
1979 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1980 	    || (sc->sc_type == WM_T_82580)
1981 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1982 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1983 		sc->sc_flags |= WM_F_NEWQUEUE;
1984 
1985 	/* Set device properties (mactype) */
1986 	dict = device_properties(sc->sc_dev);
1987 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1988 
1989 	/*
1990 	 * Map the device.  All devices support memory-mapped acccess,
1991 	 * and it is really required for normal operation.
1992 	 */
1993 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1994 	switch (memtype) {
1995 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1996 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1997 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1998 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1999 		break;
2000 	default:
2001 		memh_valid = 0;
2002 		break;
2003 	}
2004 
2005 	if (memh_valid) {
2006 		sc->sc_st = memt;
2007 		sc->sc_sh = memh;
2008 		sc->sc_ss = memsize;
2009 	} else {
2010 		aprint_error_dev(sc->sc_dev,
2011 		    "unable to map device registers\n");
2012 		return;
2013 	}
2014 
2015 	/*
2016 	 * In addition, i82544 and later support I/O mapped indirect
2017 	 * register access.  It is not desirable (nor supported in
2018 	 * this driver) to use it for normal operation, though it is
2019 	 * required to work around bugs in some chip versions.
2020 	 */
2021 	switch (sc->sc_type) {
2022 	case WM_T_82544:
2023 	case WM_T_82541:
2024 	case WM_T_82541_2:
2025 	case WM_T_82547:
2026 	case WM_T_82547_2:
2027 		/* First we have to find the I/O BAR. */
2028 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
2029 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
2030 			if (memtype == PCI_MAPREG_TYPE_IO)
2031 				break;
2032 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
2033 			    PCI_MAPREG_MEM_TYPE_64BIT)
2034 				i += 4;	/* skip high bits, too */
2035 		}
2036 		if (i < PCI_MAPREG_END) {
2037 			/*
2038 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
2039 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
2040 			 * It's no problem because newer chips has no this
2041 			 * bug.
2042 			 *
2043 			 * The i8254x doesn't apparently respond when the
2044 			 * I/O BAR is 0, which looks somewhat like it's not
2045 			 * been configured.
2046 			 */
2047 			preg = pci_conf_read(pc, pa->pa_tag, i);
2048 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
2049 				aprint_error_dev(sc->sc_dev,
2050 				    "WARNING: I/O BAR at zero.\n");
2051 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
2052 					0, &sc->sc_iot, &sc->sc_ioh,
2053 					NULL, &sc->sc_ios) == 0) {
2054 				sc->sc_flags |= WM_F_IOH_VALID;
2055 			} else
2056 				aprint_error_dev(sc->sc_dev,
2057 				    "WARNING: unable to map I/O space\n");
2058 		}
2059 		break;
2060 	default:
2061 		break;
2062 	}
2063 
2064 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
2065 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
2066 	preg |= PCI_COMMAND_MASTER_ENABLE;
2067 	if (sc->sc_type < WM_T_82542_2_1)
2068 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
2069 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
2070 
2071 	/* Power up chip */
2072 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
2073 	    && error != EOPNOTSUPP) {
2074 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
2075 		return;
2076 	}
2077 
2078 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
2079 	/*
2080 	 *  Don't use MSI-X if we can use only one queue to save interrupt
2081 	 * resource.
2082 	 */
2083 	if (sc->sc_nqueues > 1) {
2084 		max_type = PCI_INTR_TYPE_MSIX;
2085 		/*
2086 		 *  82583 has a MSI-X capability in the PCI configuration space
2087 		 * but it doesn't support it. At least the document doesn't
2088 		 * say anything about MSI-X.
2089 		 */
2090 		counts[PCI_INTR_TYPE_MSIX]
2091 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
2092 	} else {
2093 		max_type = PCI_INTR_TYPE_MSI;
2094 		counts[PCI_INTR_TYPE_MSIX] = 0;
2095 	}
2096 
2097 	/* Allocation settings */
2098 	counts[PCI_INTR_TYPE_MSI] = 1;
2099 	counts[PCI_INTR_TYPE_INTX] = 1;
2100 	/* overridden by disable flags */
2101 	if (wm_disable_msi != 0) {
2102 		counts[PCI_INTR_TYPE_MSI] = 0;
2103 		if (wm_disable_msix != 0) {
2104 			max_type = PCI_INTR_TYPE_INTX;
2105 			counts[PCI_INTR_TYPE_MSIX] = 0;
2106 		}
2107 	} else if (wm_disable_msix != 0) {
2108 		max_type = PCI_INTR_TYPE_MSI;
2109 		counts[PCI_INTR_TYPE_MSIX] = 0;
2110 	}
2111 
2112 alloc_retry:
2113 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
2114 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
2115 		return;
2116 	}
2117 
2118 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
2119 		error = wm_setup_msix(sc);
2120 		if (error) {
2121 			pci_intr_release(pc, sc->sc_intrs,
2122 			    counts[PCI_INTR_TYPE_MSIX]);
2123 
2124 			/* Setup for MSI: Disable MSI-X */
2125 			max_type = PCI_INTR_TYPE_MSI;
2126 			counts[PCI_INTR_TYPE_MSI] = 1;
2127 			counts[PCI_INTR_TYPE_INTX] = 1;
2128 			goto alloc_retry;
2129 		}
2130 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
2131 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
2132 		error = wm_setup_legacy(sc);
2133 		if (error) {
2134 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
2135 			    counts[PCI_INTR_TYPE_MSI]);
2136 
2137 			/* The next try is for INTx: Disable MSI */
2138 			max_type = PCI_INTR_TYPE_INTX;
2139 			counts[PCI_INTR_TYPE_INTX] = 1;
2140 			goto alloc_retry;
2141 		}
2142 	} else {
2143 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
2144 		error = wm_setup_legacy(sc);
2145 		if (error) {
2146 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
2147 			    counts[PCI_INTR_TYPE_INTX]);
2148 			return;
2149 		}
2150 	}
2151 
2152 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
2153 	error = workqueue_create(&sc->sc_queue_wq, wqname,
2154 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
2155 	    WM_WORKQUEUE_FLAGS);
2156 	if (error) {
2157 		aprint_error_dev(sc->sc_dev,
2158 		    "unable to create workqueue\n");
2159 		goto out;
2160 	}
2161 
2162 	/*
2163 	 * Check the function ID (unit number of the chip).
2164 	 */
2165 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
2166 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
2167 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2168 	    || (sc->sc_type == WM_T_82580)
2169 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2170 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
2171 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
2172 	else
2173 		sc->sc_funcid = 0;
2174 
2175 	/*
2176 	 * Determine a few things about the bus we're connected to.
2177 	 */
2178 	if (sc->sc_type < WM_T_82543) {
2179 		/* We don't really know the bus characteristics here. */
2180 		sc->sc_bus_speed = 33;
2181 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
2182 		/*
2183 		 * CSA (Communication Streaming Architecture) is about as fast
2184 		 * a 32-bit 66MHz PCI Bus.
2185 		 */
2186 		sc->sc_flags |= WM_F_CSA;
2187 		sc->sc_bus_speed = 66;
2188 		aprint_verbose_dev(sc->sc_dev,
2189 		    "Communication Streaming Architecture\n");
2190 		if (sc->sc_type == WM_T_82547) {
2191 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
2192 			callout_setfunc(&sc->sc_txfifo_ch,
2193 			    wm_82547_txfifo_stall, sc);
2194 			aprint_verbose_dev(sc->sc_dev,
2195 			    "using 82547 Tx FIFO stall work-around\n");
2196 		}
2197 	} else if (sc->sc_type >= WM_T_82571) {
2198 		sc->sc_flags |= WM_F_PCIE;
2199 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
2200 		    && (sc->sc_type != WM_T_ICH10)
2201 		    && (sc->sc_type != WM_T_PCH)
2202 		    && (sc->sc_type != WM_T_PCH2)
2203 		    && (sc->sc_type != WM_T_PCH_LPT)
2204 		    && (sc->sc_type != WM_T_PCH_SPT)
2205 		    && (sc->sc_type != WM_T_PCH_CNP)) {
2206 			/* ICH* and PCH* have no PCIe capability registers */
2207 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2208 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
2209 				NULL) == 0)
2210 				aprint_error_dev(sc->sc_dev,
2211 				    "unable to find PCIe capability\n");
2212 		}
2213 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
2214 	} else {
2215 		reg = CSR_READ(sc, WMREG_STATUS);
2216 		if (reg & STATUS_BUS64)
2217 			sc->sc_flags |= WM_F_BUS64;
2218 		if ((reg & STATUS_PCIX_MODE) != 0) {
2219 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
2220 
2221 			sc->sc_flags |= WM_F_PCIX;
2222 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2223 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
2224 				aprint_error_dev(sc->sc_dev,
2225 				    "unable to find PCIX capability\n");
2226 			else if (sc->sc_type != WM_T_82545_3 &&
2227 				 sc->sc_type != WM_T_82546_3) {
2228 				/*
2229 				 * Work around a problem caused by the BIOS
2230 				 * setting the max memory read byte count
2231 				 * incorrectly.
2232 				 */
2233 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
2234 				    sc->sc_pcixe_capoff + PCIX_CMD);
2235 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
2236 				    sc->sc_pcixe_capoff + PCIX_STATUS);
2237 
2238 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
2239 				    PCIX_CMD_BYTECNT_SHIFT;
2240 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
2241 				    PCIX_STATUS_MAXB_SHIFT;
2242 				if (bytecnt > maxb) {
2243 					aprint_verbose_dev(sc->sc_dev,
2244 					    "resetting PCI-X MMRBC: %d -> %d\n",
2245 					    512 << bytecnt, 512 << maxb);
2246 					pcix_cmd = (pcix_cmd &
2247 					    ~PCIX_CMD_BYTECNT_MASK) |
2248 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
2249 					pci_conf_write(pa->pa_pc, pa->pa_tag,
2250 					    sc->sc_pcixe_capoff + PCIX_CMD,
2251 					    pcix_cmd);
2252 				}
2253 			}
2254 		}
2255 		/*
2256 		 * The quad port adapter is special; it has a PCIX-PCIX
2257 		 * bridge on the board, and can run the secondary bus at
2258 		 * a higher speed.
2259 		 */
2260 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
2261 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
2262 								      : 66;
2263 		} else if (sc->sc_flags & WM_F_PCIX) {
2264 			switch (reg & STATUS_PCIXSPD_MASK) {
2265 			case STATUS_PCIXSPD_50_66:
2266 				sc->sc_bus_speed = 66;
2267 				break;
2268 			case STATUS_PCIXSPD_66_100:
2269 				sc->sc_bus_speed = 100;
2270 				break;
2271 			case STATUS_PCIXSPD_100_133:
2272 				sc->sc_bus_speed = 133;
2273 				break;
2274 			default:
2275 				aprint_error_dev(sc->sc_dev,
2276 				    "unknown PCIXSPD %d; assuming 66MHz\n",
2277 				    reg & STATUS_PCIXSPD_MASK);
2278 				sc->sc_bus_speed = 66;
2279 				break;
2280 			}
2281 		} else
2282 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
2283 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
2284 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
2285 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
2286 	}
2287 
2288 	/* clear interesting stat counters */
2289 	CSR_READ(sc, WMREG_COLC);
2290 	CSR_READ(sc, WMREG_RXERRC);
2291 
2292 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
2293 	    || (sc->sc_type >= WM_T_ICH8))
2294 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2295 	if (sc->sc_type >= WM_T_ICH8)
2296 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2297 
2298 	/* Set PHY, NVM mutex related stuff */
2299 	switch (sc->sc_type) {
2300 	case WM_T_82542_2_0:
2301 	case WM_T_82542_2_1:
2302 	case WM_T_82543:
2303 	case WM_T_82544:
2304 		/* Microwire */
2305 		sc->nvm.read = wm_nvm_read_uwire;
2306 		sc->sc_nvm_wordsize = 64;
2307 		sc->sc_nvm_addrbits = 6;
2308 		break;
2309 	case WM_T_82540:
2310 	case WM_T_82545:
2311 	case WM_T_82545_3:
2312 	case WM_T_82546:
2313 	case WM_T_82546_3:
2314 		/* Microwire */
2315 		sc->nvm.read = wm_nvm_read_uwire;
2316 		reg = CSR_READ(sc, WMREG_EECD);
2317 		if (reg & EECD_EE_SIZE) {
2318 			sc->sc_nvm_wordsize = 256;
2319 			sc->sc_nvm_addrbits = 8;
2320 		} else {
2321 			sc->sc_nvm_wordsize = 64;
2322 			sc->sc_nvm_addrbits = 6;
2323 		}
2324 		sc->sc_flags |= WM_F_LOCK_EECD;
2325 		sc->nvm.acquire = wm_get_eecd;
2326 		sc->nvm.release = wm_put_eecd;
2327 		break;
2328 	case WM_T_82541:
2329 	case WM_T_82541_2:
2330 	case WM_T_82547:
2331 	case WM_T_82547_2:
2332 		reg = CSR_READ(sc, WMREG_EECD);
2333 		/*
2334 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
2335 		 * on 8254[17], so set flags and functios before calling it.
2336 		 */
2337 		sc->sc_flags |= WM_F_LOCK_EECD;
2338 		sc->nvm.acquire = wm_get_eecd;
2339 		sc->nvm.release = wm_put_eecd;
2340 		if (reg & EECD_EE_TYPE) {
2341 			/* SPI */
2342 			sc->nvm.read = wm_nvm_read_spi;
2343 			sc->sc_flags |= WM_F_EEPROM_SPI;
2344 			wm_nvm_set_addrbits_size_eecd(sc);
2345 		} else {
2346 			/* Microwire */
2347 			sc->nvm.read = wm_nvm_read_uwire;
2348 			if ((reg & EECD_EE_ABITS) != 0) {
2349 				sc->sc_nvm_wordsize = 256;
2350 				sc->sc_nvm_addrbits = 8;
2351 			} else {
2352 				sc->sc_nvm_wordsize = 64;
2353 				sc->sc_nvm_addrbits = 6;
2354 			}
2355 		}
2356 		break;
2357 	case WM_T_82571:
2358 	case WM_T_82572:
2359 		/* SPI */
2360 		sc->nvm.read = wm_nvm_read_eerd;
2361 		/* Not use WM_F_LOCK_EECD because we use EERD */
2362 		sc->sc_flags |= WM_F_EEPROM_SPI;
2363 		wm_nvm_set_addrbits_size_eecd(sc);
2364 		sc->phy.acquire = wm_get_swsm_semaphore;
2365 		sc->phy.release = wm_put_swsm_semaphore;
2366 		sc->nvm.acquire = wm_get_nvm_82571;
2367 		sc->nvm.release = wm_put_nvm_82571;
2368 		break;
2369 	case WM_T_82573:
2370 	case WM_T_82574:
2371 	case WM_T_82583:
2372 		sc->nvm.read = wm_nvm_read_eerd;
2373 		/* Not use WM_F_LOCK_EECD because we use EERD */
2374 		if (sc->sc_type == WM_T_82573) {
2375 			sc->phy.acquire = wm_get_swsm_semaphore;
2376 			sc->phy.release = wm_put_swsm_semaphore;
2377 			sc->nvm.acquire = wm_get_nvm_82571;
2378 			sc->nvm.release = wm_put_nvm_82571;
2379 		} else {
2380 			/* Both PHY and NVM use the same semaphore. */
2381 			sc->phy.acquire = sc->nvm.acquire
2382 			    = wm_get_swfwhw_semaphore;
2383 			sc->phy.release = sc->nvm.release
2384 			    = wm_put_swfwhw_semaphore;
2385 		}
2386 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2387 			sc->sc_flags |= WM_F_EEPROM_FLASH;
2388 			sc->sc_nvm_wordsize = 2048;
2389 		} else {
2390 			/* SPI */
2391 			sc->sc_flags |= WM_F_EEPROM_SPI;
2392 			wm_nvm_set_addrbits_size_eecd(sc);
2393 		}
2394 		break;
2395 	case WM_T_82575:
2396 	case WM_T_82576:
2397 	case WM_T_82580:
2398 	case WM_T_I350:
2399 	case WM_T_I354:
2400 	case WM_T_80003:
2401 		/* SPI */
2402 		sc->sc_flags |= WM_F_EEPROM_SPI;
2403 		wm_nvm_set_addrbits_size_eecd(sc);
2404 		if ((sc->sc_type == WM_T_80003)
2405 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
2406 			sc->nvm.read = wm_nvm_read_eerd;
2407 			/* Don't use WM_F_LOCK_EECD because we use EERD */
2408 		} else {
2409 			sc->nvm.read = wm_nvm_read_spi;
2410 			sc->sc_flags |= WM_F_LOCK_EECD;
2411 		}
2412 		sc->phy.acquire = wm_get_phy_82575;
2413 		sc->phy.release = wm_put_phy_82575;
2414 		sc->nvm.acquire = wm_get_nvm_80003;
2415 		sc->nvm.release = wm_put_nvm_80003;
2416 		break;
2417 	case WM_T_ICH8:
2418 	case WM_T_ICH9:
2419 	case WM_T_ICH10:
2420 	case WM_T_PCH:
2421 	case WM_T_PCH2:
2422 	case WM_T_PCH_LPT:
2423 		sc->nvm.read = wm_nvm_read_ich8;
2424 		/* FLASH */
2425 		sc->sc_flags |= WM_F_EEPROM_FLASH;
2426 		sc->sc_nvm_wordsize = 2048;
2427 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2428 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2429 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2430 			aprint_error_dev(sc->sc_dev,
2431 			    "can't map FLASH registers\n");
2432 			goto out;
2433 		}
2434 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2435 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2436 		    ICH_FLASH_SECTOR_SIZE;
2437 		sc->sc_ich8_flash_bank_size =
2438 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2439 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2440 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2441 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2442 		sc->sc_flashreg_offset = 0;
2443 		sc->phy.acquire = wm_get_swflag_ich8lan;
2444 		sc->phy.release = wm_put_swflag_ich8lan;
2445 		sc->nvm.acquire = wm_get_nvm_ich8lan;
2446 		sc->nvm.release = wm_put_nvm_ich8lan;
2447 		break;
2448 	case WM_T_PCH_SPT:
2449 	case WM_T_PCH_CNP:
2450 		sc->nvm.read = wm_nvm_read_spt;
2451 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
2452 		sc->sc_flags |= WM_F_EEPROM_FLASH;
2453 		sc->sc_flasht = sc->sc_st;
2454 		sc->sc_flashh = sc->sc_sh;
2455 		sc->sc_ich8_flash_base = 0;
2456 		sc->sc_nvm_wordsize =
2457 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2458 		    * NVM_SIZE_MULTIPLIER;
2459 		/* It is size in bytes, we want words */
2460 		sc->sc_nvm_wordsize /= 2;
2461 		/* Assume 2 banks */
2462 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2463 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2464 		sc->phy.acquire = wm_get_swflag_ich8lan;
2465 		sc->phy.release = wm_put_swflag_ich8lan;
2466 		sc->nvm.acquire = wm_get_nvm_ich8lan;
2467 		sc->nvm.release = wm_put_nvm_ich8lan;
2468 		break;
2469 	case WM_T_I210:
2470 	case WM_T_I211:
2471 		/* Allow a single clear of the SW semaphore on I210 and newer*/
2472 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
2473 		if (wm_nvm_flash_presence_i210(sc)) {
2474 			sc->nvm.read = wm_nvm_read_eerd;
2475 			/* Don't use WM_F_LOCK_EECD because we use EERD */
2476 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2477 			wm_nvm_set_addrbits_size_eecd(sc);
2478 		} else {
2479 			sc->nvm.read = wm_nvm_read_invm;
2480 			sc->sc_flags |= WM_F_EEPROM_INVM;
2481 			sc->sc_nvm_wordsize = INVM_SIZE;
2482 		}
2483 		sc->phy.acquire = wm_get_phy_82575;
2484 		sc->phy.release = wm_put_phy_82575;
2485 		sc->nvm.acquire = wm_get_nvm_80003;
2486 		sc->nvm.release = wm_put_nvm_80003;
2487 		break;
2488 	default:
2489 		break;
2490 	}
2491 
2492 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
2493 	switch (sc->sc_type) {
2494 	case WM_T_82571:
2495 	case WM_T_82572:
2496 		reg = CSR_READ(sc, WMREG_SWSM2);
2497 		if ((reg & SWSM2_LOCK) == 0) {
2498 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2499 			force_clear_smbi = true;
2500 		} else
2501 			force_clear_smbi = false;
2502 		break;
2503 	case WM_T_82573:
2504 	case WM_T_82574:
2505 	case WM_T_82583:
2506 		force_clear_smbi = true;
2507 		break;
2508 	default:
2509 		force_clear_smbi = false;
2510 		break;
2511 	}
2512 	if (force_clear_smbi) {
2513 		reg = CSR_READ(sc, WMREG_SWSM);
2514 		if ((reg & SWSM_SMBI) != 0)
2515 			aprint_error_dev(sc->sc_dev,
2516 			    "Please update the Bootagent\n");
2517 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2518 	}
2519 
2520 	/*
2521 	 * Defer printing the EEPROM type until after verifying the checksum
2522 	 * This allows the EEPROM type to be printed correctly in the case
2523 	 * that no EEPROM is attached.
2524 	 */
2525 	/*
2526 	 * Validate the EEPROM checksum. If the checksum fails, flag
2527 	 * this for later, so we can fail future reads from the EEPROM.
2528 	 */
2529 	if (wm_nvm_validate_checksum(sc)) {
2530 		/*
2531 		 * Read twice again because some PCI-e parts fail the
2532 		 * first check due to the link being in sleep state.
2533 		 */
2534 		if (wm_nvm_validate_checksum(sc))
2535 			sc->sc_flags |= WM_F_EEPROM_INVALID;
2536 	}
2537 
2538 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
2539 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2540 	else {
2541 		aprint_verbose_dev(sc->sc_dev, "%u words ",
2542 		    sc->sc_nvm_wordsize);
2543 		if (sc->sc_flags & WM_F_EEPROM_INVM)
2544 			aprint_verbose("iNVM");
2545 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2546 			aprint_verbose("FLASH(HW)");
2547 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2548 			aprint_verbose("FLASH");
2549 		else {
2550 			if (sc->sc_flags & WM_F_EEPROM_SPI)
2551 				eetype = "SPI";
2552 			else
2553 				eetype = "MicroWire";
2554 			aprint_verbose("(%d address bits) %s EEPROM",
2555 			    sc->sc_nvm_addrbits, eetype);
2556 		}
2557 	}
2558 	wm_nvm_version(sc);
2559 	aprint_verbose("\n");
2560 
2561 	/*
2562 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
2563 	 * incorrect.
2564 	 */
2565 	wm_gmii_setup_phytype(sc, 0, 0);
2566 
2567 	/* Check for WM_F_WOL on some chips before wm_reset() */
2568 	switch (sc->sc_type) {
2569 	case WM_T_ICH8:
2570 	case WM_T_ICH9:
2571 	case WM_T_ICH10:
2572 	case WM_T_PCH:
2573 	case WM_T_PCH2:
2574 	case WM_T_PCH_LPT:
2575 	case WM_T_PCH_SPT:
2576 	case WM_T_PCH_CNP:
2577 		apme_mask = WUC_APME;
2578 		eeprom_data = CSR_READ(sc, WMREG_WUC);
2579 		if ((eeprom_data & apme_mask) != 0)
2580 			sc->sc_flags |= WM_F_WOL;
2581 		break;
2582 	default:
2583 		break;
2584 	}
2585 
2586 	/* Reset the chip to a known state. */
2587 	wm_reset(sc);
2588 
2589 	/*
2590 	 * Check for I21[01] PLL workaround.
2591 	 *
2592 	 * Three cases:
2593 	 * a) Chip is I211.
2594 	 * b) Chip is I210 and it uses INVM (not FLASH).
2595 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
2596 	 */
2597 	if (sc->sc_type == WM_T_I211)
2598 		sc->sc_flags |= WM_F_PLL_WA_I210;
2599 	if (sc->sc_type == WM_T_I210) {
2600 		if (!wm_nvm_flash_presence_i210(sc))
2601 			sc->sc_flags |= WM_F_PLL_WA_I210;
2602 		else if ((sc->sc_nvm_ver_major < 3)
2603 		    || ((sc->sc_nvm_ver_major == 3)
2604 			&& (sc->sc_nvm_ver_minor < 25))) {
2605 			aprint_verbose_dev(sc->sc_dev,
2606 			    "ROM image version %d.%d is older than 3.25\n",
2607 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2608 			sc->sc_flags |= WM_F_PLL_WA_I210;
2609 		}
2610 	}
2611 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2612 		wm_pll_workaround_i210(sc);
2613 
2614 	wm_get_wakeup(sc);
2615 
2616 	/* Non-AMT based hardware can now take control from firmware */
2617 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2618 		wm_get_hw_control(sc);
2619 
2620 	/*
2621 	 * Read the Ethernet address from the EEPROM, if not first found
2622 	 * in device properties.
2623 	 */
2624 	ea = prop_dictionary_get(dict, "mac-address");
2625 	if (ea != NULL) {
2626 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2627 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2628 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
2629 	} else {
2630 		if (wm_read_mac_addr(sc, enaddr) != 0) {
2631 			aprint_error_dev(sc->sc_dev,
2632 			    "unable to read Ethernet address\n");
2633 			goto out;
2634 		}
2635 	}
2636 
2637 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2638 	    ether_sprintf(enaddr));
2639 
2640 	/*
2641 	 * Read the config info from the EEPROM, and set up various
2642 	 * bits in the control registers based on their contents.
2643 	 */
2644 	pn = prop_dictionary_get(dict, "i82543-cfg1");
2645 	if (pn != NULL) {
2646 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2647 		cfg1 = (uint16_t) prop_number_signed_value(pn);
2648 	} else {
2649 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2650 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2651 			goto out;
2652 		}
2653 	}
2654 
2655 	pn = prop_dictionary_get(dict, "i82543-cfg2");
2656 	if (pn != NULL) {
2657 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2658 		cfg2 = (uint16_t) prop_number_signed_value(pn);
2659 	} else {
2660 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2661 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2662 			goto out;
2663 		}
2664 	}
2665 
2666 	/* check for WM_F_WOL */
2667 	switch (sc->sc_type) {
2668 	case WM_T_82542_2_0:
2669 	case WM_T_82542_2_1:
2670 	case WM_T_82543:
2671 		/* dummy? */
2672 		eeprom_data = 0;
2673 		apme_mask = NVM_CFG3_APME;
2674 		break;
2675 	case WM_T_82544:
2676 		apme_mask = NVM_CFG2_82544_APM_EN;
2677 		eeprom_data = cfg2;
2678 		break;
2679 	case WM_T_82546:
2680 	case WM_T_82546_3:
2681 	case WM_T_82571:
2682 	case WM_T_82572:
2683 	case WM_T_82573:
2684 	case WM_T_82574:
2685 	case WM_T_82583:
2686 	case WM_T_80003:
2687 	case WM_T_82575:
2688 	case WM_T_82576:
2689 		apme_mask = NVM_CFG3_APME;
2690 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2691 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2692 		break;
2693 	case WM_T_82580:
2694 	case WM_T_I350:
2695 	case WM_T_I354:
2696 	case WM_T_I210:
2697 	case WM_T_I211:
2698 		apme_mask = NVM_CFG3_APME;
2699 		wm_nvm_read(sc,
2700 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2701 		    1, &eeprom_data);
2702 		break;
2703 	case WM_T_ICH8:
2704 	case WM_T_ICH9:
2705 	case WM_T_ICH10:
2706 	case WM_T_PCH:
2707 	case WM_T_PCH2:
2708 	case WM_T_PCH_LPT:
2709 	case WM_T_PCH_SPT:
2710 	case WM_T_PCH_CNP:
2711 		/* Already checked before wm_reset () */
2712 		apme_mask = eeprom_data = 0;
2713 		break;
2714 	default: /* XXX 82540 */
2715 		apme_mask = NVM_CFG3_APME;
2716 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2717 		break;
2718 	}
2719 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2720 	if ((eeprom_data & apme_mask) != 0)
2721 		sc->sc_flags |= WM_F_WOL;
2722 
2723 	/*
2724 	 * We have the eeprom settings, now apply the special cases
2725 	 * where the eeprom may be wrong or the board won't support
2726 	 * wake on lan on a particular port
2727 	 */
2728 	switch (sc->sc_pcidevid) {
2729 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
2730 		sc->sc_flags &= ~WM_F_WOL;
2731 		break;
2732 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
2733 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
2734 		/* Wake events only supported on port A for dual fiber
2735 		 * regardless of eeprom setting */
2736 		if (sc->sc_funcid == 1)
2737 			sc->sc_flags &= ~WM_F_WOL;
2738 		break;
2739 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
2740 		/* If quad port adapter, disable WoL on all but port A */
2741 		if (sc->sc_funcid != 0)
2742 			sc->sc_flags &= ~WM_F_WOL;
2743 		break;
2744 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
2745 		/* Wake events only supported on port A for dual fiber
2746 		 * regardless of eeprom setting */
2747 		if (sc->sc_funcid == 1)
2748 			sc->sc_flags &= ~WM_F_WOL;
2749 		break;
2750 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
2751 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
2752 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
2753 		/* If quad port adapter, disable WoL on all but port A */
2754 		if (sc->sc_funcid != 0)
2755 			sc->sc_flags &= ~WM_F_WOL;
2756 		break;
2757 	}
2758 
2759 	if (sc->sc_type >= WM_T_82575) {
2760 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2761 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
2762 			    nvmword);
2763 			if ((sc->sc_type == WM_T_82575) ||
2764 			    (sc->sc_type == WM_T_82576)) {
2765 				/* Check NVM for autonegotiation */
2766 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
2767 				    != 0)
2768 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2769 			}
2770 			if ((sc->sc_type == WM_T_82575) ||
2771 			    (sc->sc_type == WM_T_I350)) {
2772 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
2773 					sc->sc_flags |= WM_F_MAS;
2774 			}
2775 		}
2776 	}
2777 
2778 	/*
2779 	 * XXX need special handling for some multiple port cards
2780 	 * to disable a paticular port.
2781 	 */
2782 
2783 	if (sc->sc_type >= WM_T_82544) {
2784 		pn = prop_dictionary_get(dict, "i82543-swdpin");
2785 		if (pn != NULL) {
2786 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2787 			swdpin = (uint16_t) prop_number_signed_value(pn);
2788 		} else {
2789 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2790 				aprint_error_dev(sc->sc_dev,
2791 				    "unable to read SWDPIN\n");
2792 				goto out;
2793 			}
2794 		}
2795 	}
2796 
2797 	if (cfg1 & NVM_CFG1_ILOS)
2798 		sc->sc_ctrl |= CTRL_ILOS;
2799 
2800 	/*
2801 	 * XXX
2802 	 * This code isn't correct because pin 2 and 3 are located
2803 	 * in different position on newer chips. Check all datasheet.
2804 	 *
2805 	 * Until resolve this problem, check if a chip < 82580
2806 	 */
2807 	if (sc->sc_type <= WM_T_82580) {
2808 		if (sc->sc_type >= WM_T_82544) {
2809 			sc->sc_ctrl |=
2810 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2811 			    CTRL_SWDPIO_SHIFT;
2812 			sc->sc_ctrl |=
2813 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2814 			    CTRL_SWDPINS_SHIFT;
2815 		} else {
2816 			sc->sc_ctrl |=
2817 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2818 			    CTRL_SWDPIO_SHIFT;
2819 		}
2820 	}
2821 
2822 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
2823 		wm_nvm_read(sc,
2824 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2825 		    1, &nvmword);
2826 		if (nvmword & NVM_CFG3_ILOS)
2827 			sc->sc_ctrl |= CTRL_ILOS;
2828 	}
2829 
2830 #if 0
2831 	if (sc->sc_type >= WM_T_82544) {
2832 		if (cfg1 & NVM_CFG1_IPS0)
2833 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2834 		if (cfg1 & NVM_CFG1_IPS1)
2835 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2836 		sc->sc_ctrl_ext |=
2837 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2838 		    CTRL_EXT_SWDPIO_SHIFT;
2839 		sc->sc_ctrl_ext |=
2840 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2841 		    CTRL_EXT_SWDPINS_SHIFT;
2842 	} else {
2843 		sc->sc_ctrl_ext |=
2844 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2845 		    CTRL_EXT_SWDPIO_SHIFT;
2846 	}
2847 #endif
2848 
2849 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2850 #if 0
2851 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2852 #endif
2853 
2854 	if (sc->sc_type == WM_T_PCH) {
2855 		uint16_t val;
2856 
2857 		/* Save the NVM K1 bit setting */
2858 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2859 
2860 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2861 			sc->sc_nvm_k1_enabled = 1;
2862 		else
2863 			sc->sc_nvm_k1_enabled = 0;
2864 	}
2865 
2866 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
2867 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2868 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2869 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2870 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
2871 	    || sc->sc_type == WM_T_82573
2872 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2873 		/* Copper only */
2874 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2875 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2876 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2877 	    || (sc->sc_type ==WM_T_I211)) {
2878 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
2879 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2880 		switch (link_mode) {
2881 		case CTRL_EXT_LINK_MODE_1000KX:
2882 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
2883 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2884 			break;
2885 		case CTRL_EXT_LINK_MODE_SGMII:
2886 			if (wm_sgmii_uses_mdio(sc)) {
2887 				aprint_normal_dev(sc->sc_dev,
2888 				    "SGMII(MDIO)\n");
2889 				sc->sc_flags |= WM_F_SGMII;
2890 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2891 				break;
2892 			}
2893 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2894 			/*FALLTHROUGH*/
2895 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2896 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
2897 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2898 				if (link_mode
2899 				    == CTRL_EXT_LINK_MODE_SGMII) {
2900 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2901 					sc->sc_flags |= WM_F_SGMII;
2902 					aprint_verbose_dev(sc->sc_dev,
2903 					    "SGMII\n");
2904 				} else {
2905 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2906 					aprint_verbose_dev(sc->sc_dev,
2907 					    "SERDES\n");
2908 				}
2909 				break;
2910 			}
2911 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2912 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
2913 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2914 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
2915 				sc->sc_flags |= WM_F_SGMII;
2916 			}
2917 			/* Do not change link mode for 100BaseFX */
2918 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
2919 				break;
2920 
2921 			/* Change current link mode setting */
2922 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
2923 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2924 				reg |= CTRL_EXT_LINK_MODE_SGMII;
2925 			else
2926 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2927 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2928 			break;
2929 		case CTRL_EXT_LINK_MODE_GMII:
2930 		default:
2931 			aprint_normal_dev(sc->sc_dev, "Copper\n");
2932 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2933 			break;
2934 		}
2935 
2936 		reg &= ~CTRL_EXT_I2C_ENA;
2937 		if ((sc->sc_flags & WM_F_SGMII) != 0)
2938 			reg |= CTRL_EXT_I2C_ENA;
2939 		else
2940 			reg &= ~CTRL_EXT_I2C_ENA;
2941 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2942 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
2943 			if (!wm_sgmii_uses_mdio(sc))
2944 				wm_gmii_setup_phytype(sc, 0, 0);
2945 			wm_reset_mdicnfg_82580(sc);
2946 		}
2947 	} else if (sc->sc_type < WM_T_82543 ||
2948 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2949 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2950 			aprint_error_dev(sc->sc_dev,
2951 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
2952 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2953 		}
2954 	} else {
2955 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
2956 			aprint_error_dev(sc->sc_dev,
2957 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2958 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2959 		}
2960 	}
2961 
2962 	if (sc->sc_type >= WM_T_PCH2)
2963 		sc->sc_flags |= WM_F_EEE;
2964 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
2965 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
2966 		/* XXX: Need special handling for I354. (not yet) */
2967 		if (sc->sc_type != WM_T_I354)
2968 			sc->sc_flags |= WM_F_EEE;
2969 	}
2970 
2971 	/*
2972 	 * The I350 has a bug where it always strips the CRC whether
2973 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
2974 	 */
2975 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
2976 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
2977 		sc->sc_flags |= WM_F_CRC_STRIP;
2978 
2979 	/* Set device properties (macflags) */
2980 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2981 
2982 	if (sc->sc_flags != 0) {
2983 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
2984 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
2985 	}
2986 
2987 #ifdef WM_MPSAFE
2988 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2989 #else
2990 	sc->sc_core_lock = NULL;
2991 #endif
2992 
2993 	/* Initialize the media structures accordingly. */
2994 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2995 		wm_gmii_mediainit(sc, wmp->wmp_product);
2996 	else
2997 		wm_tbi_mediainit(sc); /* All others */
2998 
2999 	ifp = &sc->sc_ethercom.ec_if;
3000 	xname = device_xname(sc->sc_dev);
3001 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
3002 	ifp->if_softc = sc;
3003 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3004 #ifdef WM_MPSAFE
3005 	ifp->if_extflags = IFEF_MPSAFE;
3006 #endif
3007 	ifp->if_ioctl = wm_ioctl;
3008 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3009 		ifp->if_start = wm_nq_start;
3010 		/*
3011 		 * When the number of CPUs is one and the controller can use
3012 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
3013 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
3014 		 * and the other is used for link status changing.
3015 		 * In this situation, wm_nq_transmit() is disadvantageous
3016 		 * because of wm_select_txqueue() and pcq(9) overhead.
3017 		 */
3018 		if (wm_is_using_multiqueue(sc))
3019 			ifp->if_transmit = wm_nq_transmit;
3020 	} else {
3021 		ifp->if_start = wm_start;
3022 		/*
3023 		 * wm_transmit() has the same disadvantage as wm_transmit().
3024 		 */
3025 		if (wm_is_using_multiqueue(sc))
3026 			ifp->if_transmit = wm_transmit;
3027 	}
3028 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
3029 	ifp->if_init = wm_init;
3030 	ifp->if_stop = wm_stop;
3031 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
3032 	IFQ_SET_READY(&ifp->if_snd);
3033 
3034 	/* Check for jumbo frame */
3035 	switch (sc->sc_type) {
3036 	case WM_T_82573:
3037 		/* XXX limited to 9234 if ASPM is disabled */
3038 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
3039 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
3040 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3041 		break;
3042 	case WM_T_82571:
3043 	case WM_T_82572:
3044 	case WM_T_82574:
3045 	case WM_T_82583:
3046 	case WM_T_82575:
3047 	case WM_T_82576:
3048 	case WM_T_82580:
3049 	case WM_T_I350:
3050 	case WM_T_I354:
3051 	case WM_T_I210:
3052 	case WM_T_I211:
3053 	case WM_T_80003:
3054 	case WM_T_ICH9:
3055 	case WM_T_ICH10:
3056 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
3057 	case WM_T_PCH_LPT:
3058 	case WM_T_PCH_SPT:
3059 	case WM_T_PCH_CNP:
3060 		/* XXX limited to 9234 */
3061 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3062 		break;
3063 	case WM_T_PCH:
3064 		/* XXX limited to 4096 */
3065 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3066 		break;
3067 	case WM_T_82542_2_0:
3068 	case WM_T_82542_2_1:
3069 	case WM_T_ICH8:
3070 		/* No support for jumbo frame */
3071 		break;
3072 	default:
3073 		/* ETHER_MAX_LEN_JUMBO */
3074 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3075 		break;
3076 	}
3077 
3078 	/* If we're a i82543 or greater, we can support VLANs. */
3079 	if (sc->sc_type >= WM_T_82543) {
3080 		sc->sc_ethercom.ec_capabilities |=
3081 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
3082 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
3083 	}
3084 
3085 	if ((sc->sc_flags & WM_F_EEE) != 0)
3086 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
3087 
3088 	/*
3089 	 * We can perform TCPv4 and UDPv4 checksums in-bound.  Only
3090 	 * on i82543 and later.
3091 	 */
3092 	if (sc->sc_type >= WM_T_82543) {
3093 		ifp->if_capabilities |=
3094 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
3095 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
3096 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
3097 		    IFCAP_CSUM_TCPv6_Tx |
3098 		    IFCAP_CSUM_UDPv6_Tx;
3099 	}
3100 
3101 	/*
3102 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
3103 	 *
3104 	 *	82541GI (8086:1076) ... no
3105 	 *	82572EI (8086:10b9) ... yes
3106 	 */
3107 	if (sc->sc_type >= WM_T_82571) {
3108 		ifp->if_capabilities |=
3109 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
3110 	}
3111 
3112 	/*
3113 	 * If we're a i82544 or greater (except i82547), we can do
3114 	 * TCP segmentation offload.
3115 	 */
3116 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
3117 		ifp->if_capabilities |= IFCAP_TSOv4;
3118 	}
3119 
3120 	if (sc->sc_type >= WM_T_82571) {
3121 		ifp->if_capabilities |= IFCAP_TSOv6;
3122 	}
3123 
3124 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
3125 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
3126 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
3127 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
3128 
3129 	/* Attach the interface. */
3130 	if_initialize(ifp);
3131 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
3132 	ether_ifattach(ifp, enaddr);
3133 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
3134 	if_register(ifp);
3135 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
3136 	    RND_FLAG_DEFAULT);
3137 
3138 #ifdef WM_EVENT_COUNTERS
3139 	/* Attach event counters. */
3140 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
3141 	    NULL, xname, "linkintr");
3142 
3143 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
3144 	    NULL, xname, "tx_xoff");
3145 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
3146 	    NULL, xname, "tx_xon");
3147 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
3148 	    NULL, xname, "rx_xoff");
3149 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
3150 	    NULL, xname, "rx_xon");
3151 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
3152 	    NULL, xname, "rx_macctl");
3153 #endif /* WM_EVENT_COUNTERS */
3154 
3155 	sc->sc_txrx_use_workqueue = false;
3156 
3157 	if (wm_phy_need_linkdown_discard(sc)) {
3158 		DPRINTF(sc, WM_DEBUG_LINK,
3159 		    ("%s: %s: Set linkdown discard flag\n",
3160 			device_xname(sc->sc_dev), __func__));
3161 		wm_set_linkdown_discard(sc);
3162 	}
3163 
3164 	wm_init_sysctls(sc);
3165 
3166 	if (pmf_device_register(self, wm_suspend, wm_resume))
3167 		pmf_class_network_register(self, ifp);
3168 	else
3169 		aprint_error_dev(self, "couldn't establish power handler\n");
3170 
3171 	sc->sc_flags |= WM_F_ATTACHED;
3172 out:
3173 	return;
3174 }
3175 
3176 /* The detach function (ca_detach) */
3177 static int
3178 wm_detach(device_t self, int flags __unused)
3179 {
3180 	struct wm_softc *sc = device_private(self);
3181 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3182 	int i;
3183 
3184 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
3185 		return 0;
3186 
3187 	/* Stop the interface. Callouts are stopped in it. */
3188 	wm_stop(ifp, 1);
3189 
3190 	pmf_device_deregister(self);
3191 
3192 	sysctl_teardown(&sc->sc_sysctllog);
3193 
3194 #ifdef WM_EVENT_COUNTERS
3195 	evcnt_detach(&sc->sc_ev_linkintr);
3196 
3197 	evcnt_detach(&sc->sc_ev_tx_xoff);
3198 	evcnt_detach(&sc->sc_ev_tx_xon);
3199 	evcnt_detach(&sc->sc_ev_rx_xoff);
3200 	evcnt_detach(&sc->sc_ev_rx_xon);
3201 	evcnt_detach(&sc->sc_ev_rx_macctl);
3202 #endif /* WM_EVENT_COUNTERS */
3203 
3204 	rnd_detach_source(&sc->rnd_source);
3205 
3206 	/* Tell the firmware about the release */
3207 	WM_CORE_LOCK(sc);
3208 	wm_release_manageability(sc);
3209 	wm_release_hw_control(sc);
3210 	wm_enable_wakeup(sc);
3211 	WM_CORE_UNLOCK(sc);
3212 
3213 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3214 
3215 	ether_ifdetach(ifp);
3216 	if_detach(ifp);
3217 	if_percpuq_destroy(sc->sc_ipq);
3218 
3219 	/* Delete all remaining media. */
3220 	ifmedia_fini(&sc->sc_mii.mii_media);
3221 
3222 	/* Unload RX dmamaps and free mbufs */
3223 	for (i = 0; i < sc->sc_nqueues; i++) {
3224 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
3225 		mutex_enter(rxq->rxq_lock);
3226 		wm_rxdrain(rxq);
3227 		mutex_exit(rxq->rxq_lock);
3228 	}
3229 	/* Must unlock here */
3230 
3231 	/* Disestablish the interrupt handler */
3232 	for (i = 0; i < sc->sc_nintrs; i++) {
3233 		if (sc->sc_ihs[i] != NULL) {
3234 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
3235 			sc->sc_ihs[i] = NULL;
3236 		}
3237 	}
3238 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
3239 
3240 	/* wm_stop() ensure workqueue is stopped. */
3241 	workqueue_destroy(sc->sc_queue_wq);
3242 
3243 	for (i = 0; i < sc->sc_nqueues; i++)
3244 		softint_disestablish(sc->sc_queue[i].wmq_si);
3245 
3246 	wm_free_txrx_queues(sc);
3247 
3248 	/* Unmap the registers */
3249 	if (sc->sc_ss) {
3250 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
3251 		sc->sc_ss = 0;
3252 	}
3253 	if (sc->sc_ios) {
3254 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
3255 		sc->sc_ios = 0;
3256 	}
3257 	if (sc->sc_flashs) {
3258 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
3259 		sc->sc_flashs = 0;
3260 	}
3261 
3262 	if (sc->sc_core_lock)
3263 		mutex_obj_free(sc->sc_core_lock);
3264 	if (sc->sc_ich_phymtx)
3265 		mutex_obj_free(sc->sc_ich_phymtx);
3266 	if (sc->sc_ich_nvmmtx)
3267 		mutex_obj_free(sc->sc_ich_nvmmtx);
3268 
3269 	return 0;
3270 }
3271 
3272 static bool
3273 wm_suspend(device_t self, const pmf_qual_t *qual)
3274 {
3275 	struct wm_softc *sc = device_private(self);
3276 
3277 	wm_release_manageability(sc);
3278 	wm_release_hw_control(sc);
3279 	wm_enable_wakeup(sc);
3280 
3281 	return true;
3282 }
3283 
3284 static bool
3285 wm_resume(device_t self, const pmf_qual_t *qual)
3286 {
3287 	struct wm_softc *sc = device_private(self);
3288 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3289 	pcireg_t reg;
3290 	char buf[256];
3291 
3292 	reg = CSR_READ(sc, WMREG_WUS);
3293 	if (reg != 0) {
3294 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
3295 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
3296 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
3297 	}
3298 
3299 	if (sc->sc_type >= WM_T_PCH2)
3300 		wm_resume_workarounds_pchlan(sc);
3301 	if ((ifp->if_flags & IFF_UP) == 0) {
3302 		/* >= PCH_SPT hardware workaround before reset. */
3303 		if (sc->sc_type >= WM_T_PCH_SPT)
3304 			wm_flush_desc_rings(sc);
3305 
3306 		wm_reset(sc);
3307 		/* Non-AMT based hardware can now take control from firmware */
3308 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
3309 			wm_get_hw_control(sc);
3310 		wm_init_manageability(sc);
3311 	} else {
3312 		/*
3313 		 * We called pmf_class_network_register(), so if_init() is
3314 		 * automatically called when IFF_UP. wm_reset(),
3315 		 * wm_get_hw_control() and wm_init_manageability() are called
3316 		 * via wm_init().
3317 		 */
3318 	}
3319 
3320 	return true;
3321 }
3322 
3323 /*
3324  * wm_watchdog:		[ifnet interface function]
3325  *
3326  *	Watchdog timer handler.
3327  */
3328 static void
3329 wm_watchdog(struct ifnet *ifp)
3330 {
3331 	int qid;
3332 	struct wm_softc *sc = ifp->if_softc;
3333 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
3334 
3335 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
3336 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
3337 
3338 		wm_watchdog_txq(ifp, txq, &hang_queue);
3339 	}
3340 
3341 	/* IF any of queues hanged up, reset the interface. */
3342 	if (hang_queue != 0) {
3343 		(void)wm_init(ifp);
3344 
3345 		/*
3346 		 * There are still some upper layer processing which call
3347 		 * ifp->if_start(). e.g. ALTQ or one CPU system
3348 		 */
3349 		/* Try to get more packets going. */
3350 		ifp->if_start(ifp);
3351 	}
3352 }
3353 
3354 
3355 static void
3356 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
3357 {
3358 
3359 	mutex_enter(txq->txq_lock);
3360 	if (txq->txq_sending &&
3361 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
3362 		wm_watchdog_txq_locked(ifp, txq, hang);
3363 
3364 	mutex_exit(txq->txq_lock);
3365 }
3366 
3367 static void
3368 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
3369     uint16_t *hang)
3370 {
3371 	struct wm_softc *sc = ifp->if_softc;
3372 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
3373 
3374 	KASSERT(mutex_owned(txq->txq_lock));
3375 
3376 	/*
3377 	 * Since we're using delayed interrupts, sweep up
3378 	 * before we report an error.
3379 	 */
3380 	wm_txeof(txq, UINT_MAX);
3381 
3382 	if (txq->txq_sending)
3383 		*hang |= __BIT(wmq->wmq_id);
3384 
3385 	if (txq->txq_free == WM_NTXDESC(txq)) {
3386 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
3387 		    device_xname(sc->sc_dev));
3388 	} else {
3389 #ifdef WM_DEBUG
3390 		int i, j;
3391 		struct wm_txsoft *txs;
3392 #endif
3393 		log(LOG_ERR,
3394 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3395 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
3396 		    txq->txq_next);
3397 		if_statinc(ifp, if_oerrors);
3398 #ifdef WM_DEBUG
3399 		for (i = txq->txq_sdirty; i != txq->txq_snext;
3400 		    i = WM_NEXTTXS(txq, i)) {
3401 			txs = &txq->txq_soft[i];
3402 			printf("txs %d tx %d -> %d\n",
3403 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
3404 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
3405 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3406 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3407 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
3408 					printf("\t %#08x%08x\n",
3409 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
3410 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
3411 				} else {
3412 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3413 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
3414 					    txq->txq_descs[j].wtx_addr.wa_low);
3415 					printf("\t %#04x%02x%02x%08x\n",
3416 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
3417 					    txq->txq_descs[j].wtx_fields.wtxu_options,
3418 					    txq->txq_descs[j].wtx_fields.wtxu_status,
3419 					    txq->txq_descs[j].wtx_cmdlen);
3420 				}
3421 				if (j == txs->txs_lastdesc)
3422 					break;
3423 			}
3424 		}
3425 #endif
3426 	}
3427 }
3428 
3429 /*
3430  * wm_tick:
3431  *
3432  *	One second timer, used to check link status, sweep up
3433  *	completed transmit jobs, etc.
3434  */
3435 static void
3436 wm_tick(void *arg)
3437 {
3438 	struct wm_softc *sc = arg;
3439 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3440 #ifndef WM_MPSAFE
3441 	int s = splnet();
3442 #endif
3443 
3444 	WM_CORE_LOCK(sc);
3445 
3446 	if (sc->sc_core_stopping) {
3447 		WM_CORE_UNLOCK(sc);
3448 #ifndef WM_MPSAFE
3449 		splx(s);
3450 #endif
3451 		return;
3452 	}
3453 
3454 	if (sc->sc_type >= WM_T_82542_2_1) {
3455 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3456 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3457 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3458 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3459 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3460 	}
3461 
3462 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
3463 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
3464 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
3465 	    + CSR_READ(sc, WMREG_CRCERRS)
3466 	    + CSR_READ(sc, WMREG_ALGNERRC)
3467 	    + CSR_READ(sc, WMREG_SYMERRC)
3468 	    + CSR_READ(sc, WMREG_RXERRC)
3469 	    + CSR_READ(sc, WMREG_SEC)
3470 	    + CSR_READ(sc, WMREG_CEXTERR)
3471 	    + CSR_READ(sc, WMREG_RLEC));
3472 	/*
3473 	 * WMREG_RNBC is incremented when there is no available buffers in host
3474 	 * memory. It does not mean the number of dropped packet. Because
3475 	 * ethernet controller can receive packets in such case if there is
3476 	 * space in phy's FIFO.
3477 	 *
3478 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
3479 	 * own EVCNT instead of if_iqdrops.
3480 	 */
3481 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
3482 	IF_STAT_PUTREF(ifp);
3483 
3484 	if (sc->sc_flags & WM_F_HAS_MII)
3485 		mii_tick(&sc->sc_mii);
3486 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
3487 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3488 		wm_serdes_tick(sc);
3489 	else
3490 		wm_tbi_tick(sc);
3491 
3492 	WM_CORE_UNLOCK(sc);
3493 
3494 	wm_watchdog(ifp);
3495 
3496 	callout_schedule(&sc->sc_tick_ch, hz);
3497 }
3498 
3499 static int
3500 wm_ifflags_cb(struct ethercom *ec)
3501 {
3502 	struct ifnet *ifp = &ec->ec_if;
3503 	struct wm_softc *sc = ifp->if_softc;
3504 	u_short iffchange;
3505 	int ecchange;
3506 	bool needreset = false;
3507 	int rc = 0;
3508 
3509 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
3510 		device_xname(sc->sc_dev), __func__));
3511 
3512 	WM_CORE_LOCK(sc);
3513 
3514 	/*
3515 	 * Check for if_flags.
3516 	 * Main usage is to prevent linkdown when opening bpf.
3517 	 */
3518 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
3519 	sc->sc_if_flags = ifp->if_flags;
3520 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
3521 		needreset = true;
3522 		goto ec;
3523 	}
3524 
3525 	/* iff related updates */
3526 	if ((iffchange & IFF_PROMISC) != 0)
3527 		wm_set_filter(sc);
3528 
3529 	wm_set_vlan(sc);
3530 
3531 ec:
3532 	/* Check for ec_capenable. */
3533 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
3534 	sc->sc_ec_capenable = ec->ec_capenable;
3535 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
3536 		needreset = true;
3537 		goto out;
3538 	}
3539 
3540 	/* ec related updates */
3541 	wm_set_eee(sc);
3542 
3543 out:
3544 	if (needreset)
3545 		rc = ENETRESET;
3546 	WM_CORE_UNLOCK(sc);
3547 
3548 	return rc;
3549 }
3550 
3551 static bool
3552 wm_phy_need_linkdown_discard(struct wm_softc *sc)
3553 {
3554 
3555 	switch (sc->sc_phytype) {
3556 	case WMPHY_82577: /* ihphy */
3557 	case WMPHY_82578: /* atphy */
3558 	case WMPHY_82579: /* ihphy */
3559 	case WMPHY_I217: /* ihphy */
3560 	case WMPHY_82580: /* ihphy */
3561 	case WMPHY_I350: /* ihphy */
3562 		return true;
3563 	default:
3564 		return false;
3565 	}
3566 }
3567 
3568 static void
3569 wm_set_linkdown_discard(struct wm_softc *sc)
3570 {
3571 
3572 	for (int i = 0; i < sc->sc_nqueues; i++) {
3573 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3574 
3575 		mutex_enter(txq->txq_lock);
3576 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
3577 		mutex_exit(txq->txq_lock);
3578 	}
3579 }
3580 
3581 static void
3582 wm_clear_linkdown_discard(struct wm_softc *sc)
3583 {
3584 
3585 	for (int i = 0; i < sc->sc_nqueues; i++) {
3586 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3587 
3588 		mutex_enter(txq->txq_lock);
3589 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
3590 		mutex_exit(txq->txq_lock);
3591 	}
3592 }
3593 
3594 /*
3595  * wm_ioctl:		[ifnet interface function]
3596  *
3597  *	Handle control requests from the operator.
3598  */
3599 static int
3600 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3601 {
3602 	struct wm_softc *sc = ifp->if_softc;
3603 	struct ifreq *ifr = (struct ifreq *)data;
3604 	struct ifaddr *ifa = (struct ifaddr *)data;
3605 	struct sockaddr_dl *sdl;
3606 	int s, error;
3607 
3608 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
3609 		device_xname(sc->sc_dev), __func__));
3610 
3611 #ifndef WM_MPSAFE
3612 	s = splnet();
3613 #endif
3614 	switch (cmd) {
3615 	case SIOCSIFMEDIA:
3616 		WM_CORE_LOCK(sc);
3617 		/* Flow control requires full-duplex mode. */
3618 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3619 		    (ifr->ifr_media & IFM_FDX) == 0)
3620 			ifr->ifr_media &= ~IFM_ETH_FMASK;
3621 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3622 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3623 				/* We can do both TXPAUSE and RXPAUSE. */
3624 				ifr->ifr_media |=
3625 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3626 			}
3627 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3628 		}
3629 		WM_CORE_UNLOCK(sc);
3630 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3631 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
3632 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE) {
3633 				DPRINTF(sc, WM_DEBUG_LINK,
3634 				    ("%s: %s: Set linkdown discard flag\n",
3635 					device_xname(sc->sc_dev), __func__));
3636 				wm_set_linkdown_discard(sc);
3637 			}
3638 		}
3639 		break;
3640 	case SIOCINITIFADDR:
3641 		WM_CORE_LOCK(sc);
3642 		if (ifa->ifa_addr->sa_family == AF_LINK) {
3643 			sdl = satosdl(ifp->if_dl->ifa_addr);
3644 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3645 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3646 			/* Unicast address is the first multicast entry */
3647 			wm_set_filter(sc);
3648 			error = 0;
3649 			WM_CORE_UNLOCK(sc);
3650 			break;
3651 		}
3652 		WM_CORE_UNLOCK(sc);
3653 		/*FALLTHROUGH*/
3654 	default:
3655 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
3656 			if (((ifp->if_flags & IFF_UP) != 0) &&
3657 			    ((ifr->ifr_flags & IFF_UP) == 0)) {
3658 				DPRINTF(sc, WM_DEBUG_LINK,
3659 				    ("%s: %s: Set linkdown discard flag\n",
3660 					device_xname(sc->sc_dev), __func__));
3661 				wm_set_linkdown_discard(sc);
3662 			}
3663 		}
3664 #ifdef WM_MPSAFE
3665 		s = splnet();
3666 #endif
3667 		/* It may call wm_start, so unlock here */
3668 		error = ether_ioctl(ifp, cmd, data);
3669 #ifdef WM_MPSAFE
3670 		splx(s);
3671 #endif
3672 		if (error != ENETRESET)
3673 			break;
3674 
3675 		error = 0;
3676 
3677 		if (cmd == SIOCSIFCAP)
3678 			error = (*ifp->if_init)(ifp);
3679 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3680 			;
3681 		else if (ifp->if_flags & IFF_RUNNING) {
3682 			/*
3683 			 * Multicast list has changed; set the hardware filter
3684 			 * accordingly.
3685 			 */
3686 			WM_CORE_LOCK(sc);
3687 			wm_set_filter(sc);
3688 			WM_CORE_UNLOCK(sc);
3689 		}
3690 		break;
3691 	}
3692 
3693 #ifndef WM_MPSAFE
3694 	splx(s);
3695 #endif
3696 	return error;
3697 }
3698 
3699 /* MAC address related */
3700 
3701 /*
3702  * Get the offset of MAC address and return it.
3703  * If error occured, use offset 0.
3704  */
3705 static uint16_t
3706 wm_check_alt_mac_addr(struct wm_softc *sc)
3707 {
3708 	uint16_t myea[ETHER_ADDR_LEN / 2];
3709 	uint16_t offset = NVM_OFF_MACADDR;
3710 
3711 	/* Try to read alternative MAC address pointer */
3712 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3713 		return 0;
3714 
3715 	/* Check pointer if it's valid or not. */
3716 	if ((offset == 0x0000) || (offset == 0xffff))
3717 		return 0;
3718 
3719 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3720 	/*
3721 	 * Check whether alternative MAC address is valid or not.
3722 	 * Some cards have non 0xffff pointer but those don't use
3723 	 * alternative MAC address in reality.
3724 	 *
3725 	 * Check whether the broadcast bit is set or not.
3726 	 */
3727 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
3728 		if (((myea[0] & 0xff) & 0x01) == 0)
3729 			return offset; /* Found */
3730 
3731 	/* Not found */
3732 	return 0;
3733 }
3734 
3735 static int
3736 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3737 {
3738 	uint16_t myea[ETHER_ADDR_LEN / 2];
3739 	uint16_t offset = NVM_OFF_MACADDR;
3740 	int do_invert = 0;
3741 
3742 	switch (sc->sc_type) {
3743 	case WM_T_82580:
3744 	case WM_T_I350:
3745 	case WM_T_I354:
3746 		/* EEPROM Top Level Partitioning */
3747 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3748 		break;
3749 	case WM_T_82571:
3750 	case WM_T_82575:
3751 	case WM_T_82576:
3752 	case WM_T_80003:
3753 	case WM_T_I210:
3754 	case WM_T_I211:
3755 		offset = wm_check_alt_mac_addr(sc);
3756 		if (offset == 0)
3757 			if ((sc->sc_funcid & 0x01) == 1)
3758 				do_invert = 1;
3759 		break;
3760 	default:
3761 		if ((sc->sc_funcid & 0x01) == 1)
3762 			do_invert = 1;
3763 		break;
3764 	}
3765 
3766 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
3767 		goto bad;
3768 
3769 	enaddr[0] = myea[0] & 0xff;
3770 	enaddr[1] = myea[0] >> 8;
3771 	enaddr[2] = myea[1] & 0xff;
3772 	enaddr[3] = myea[1] >> 8;
3773 	enaddr[4] = myea[2] & 0xff;
3774 	enaddr[5] = myea[2] >> 8;
3775 
3776 	/*
3777 	 * Toggle the LSB of the MAC address on the second port
3778 	 * of some dual port cards.
3779 	 */
3780 	if (do_invert != 0)
3781 		enaddr[5] ^= 1;
3782 
3783 	return 0;
3784 
3785  bad:
3786 	return -1;
3787 }
3788 
3789 /*
3790  * wm_set_ral:
3791  *
3792  *	Set an entery in the receive address list.
3793  */
3794 static void
3795 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3796 {
3797 	uint32_t ral_lo, ral_hi, addrl, addrh;
3798 	uint32_t wlock_mac;
3799 	int rv;
3800 
3801 	if (enaddr != NULL) {
3802 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
3803 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
3804 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
3805 		ral_hi |= RAL_AV;
3806 	} else {
3807 		ral_lo = 0;
3808 		ral_hi = 0;
3809 	}
3810 
3811 	switch (sc->sc_type) {
3812 	case WM_T_82542_2_0:
3813 	case WM_T_82542_2_1:
3814 	case WM_T_82543:
3815 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
3816 		CSR_WRITE_FLUSH(sc);
3817 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
3818 		CSR_WRITE_FLUSH(sc);
3819 		break;
3820 	case WM_T_PCH2:
3821 	case WM_T_PCH_LPT:
3822 	case WM_T_PCH_SPT:
3823 	case WM_T_PCH_CNP:
3824 		if (idx == 0) {
3825 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
3826 			CSR_WRITE_FLUSH(sc);
3827 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
3828 			CSR_WRITE_FLUSH(sc);
3829 			return;
3830 		}
3831 		if (sc->sc_type != WM_T_PCH2) {
3832 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
3833 			    FWSM_WLOCK_MAC);
3834 			addrl = WMREG_SHRAL(idx - 1);
3835 			addrh = WMREG_SHRAH(idx - 1);
3836 		} else {
3837 			wlock_mac = 0;
3838 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
3839 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
3840 		}
3841 
3842 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
3843 			rv = wm_get_swflag_ich8lan(sc);
3844 			if (rv != 0)
3845 				return;
3846 			CSR_WRITE(sc, addrl, ral_lo);
3847 			CSR_WRITE_FLUSH(sc);
3848 			CSR_WRITE(sc, addrh, ral_hi);
3849 			CSR_WRITE_FLUSH(sc);
3850 			wm_put_swflag_ich8lan(sc);
3851 		}
3852 
3853 		break;
3854 	default:
3855 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
3856 		CSR_WRITE_FLUSH(sc);
3857 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
3858 		CSR_WRITE_FLUSH(sc);
3859 		break;
3860 	}
3861 }
3862 
3863 /*
3864  * wm_mchash:
3865  *
3866  *	Compute the hash of the multicast address for the 4096-bit
3867  *	multicast filter.
3868  */
3869 static uint32_t
3870 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3871 {
3872 	static const int lo_shift[4] = { 4, 3, 2, 0 };
3873 	static const int hi_shift[4] = { 4, 5, 6, 8 };
3874 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3875 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3876 	uint32_t hash;
3877 
3878 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3879 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3880 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3881 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
3882 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3883 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3884 		return (hash & 0x3ff);
3885 	}
3886 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3887 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3888 
3889 	return (hash & 0xfff);
3890 }
3891 
3892 /*
3893  *
3894  *
3895  */
3896 static int
3897 wm_rar_count(struct wm_softc *sc)
3898 {
3899 	int size;
3900 
3901 	switch (sc->sc_type) {
3902 	case WM_T_ICH8:
3903 		size = WM_RAL_TABSIZE_ICH8 -1;
3904 		break;
3905 	case WM_T_ICH9:
3906 	case WM_T_ICH10:
3907 	case WM_T_PCH:
3908 		size = WM_RAL_TABSIZE_ICH8;
3909 		break;
3910 	case WM_T_PCH2:
3911 		size = WM_RAL_TABSIZE_PCH2;
3912 		break;
3913 	case WM_T_PCH_LPT:
3914 	case WM_T_PCH_SPT:
3915 	case WM_T_PCH_CNP:
3916 		size = WM_RAL_TABSIZE_PCH_LPT;
3917 		break;
3918 	case WM_T_82575:
3919 	case WM_T_I210:
3920 	case WM_T_I211:
3921 		size = WM_RAL_TABSIZE_82575;
3922 		break;
3923 	case WM_T_82576:
3924 	case WM_T_82580:
3925 		size = WM_RAL_TABSIZE_82576;
3926 		break;
3927 	case WM_T_I350:
3928 	case WM_T_I354:
3929 		size = WM_RAL_TABSIZE_I350;
3930 		break;
3931 	default:
3932 		size = WM_RAL_TABSIZE;
3933 	}
3934 
3935 	return size;
3936 }
3937 
3938 /*
3939  * wm_set_filter:
3940  *
3941  *	Set up the receive filter.
3942  */
3943 static void
3944 wm_set_filter(struct wm_softc *sc)
3945 {
3946 	struct ethercom *ec = &sc->sc_ethercom;
3947 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3948 	struct ether_multi *enm;
3949 	struct ether_multistep step;
3950 	bus_addr_t mta_reg;
3951 	uint32_t hash, reg, bit;
3952 	int i, size, ralmax, rv;
3953 
3954 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
3955 		device_xname(sc->sc_dev), __func__));
3956 
3957 	if (sc->sc_type >= WM_T_82544)
3958 		mta_reg = WMREG_CORDOVA_MTA;
3959 	else
3960 		mta_reg = WMREG_MTA;
3961 
3962 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3963 
3964 	if (ifp->if_flags & IFF_BROADCAST)
3965 		sc->sc_rctl |= RCTL_BAM;
3966 	if (ifp->if_flags & IFF_PROMISC) {
3967 		sc->sc_rctl |= RCTL_UPE;
3968 		ETHER_LOCK(ec);
3969 		ec->ec_flags |= ETHER_F_ALLMULTI;
3970 		ETHER_UNLOCK(ec);
3971 		goto allmulti;
3972 	}
3973 
3974 	/*
3975 	 * Set the station address in the first RAL slot, and
3976 	 * clear the remaining slots.
3977 	 */
3978 	size = wm_rar_count(sc);
3979 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3980 
3981 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
3982 	    || (sc->sc_type == WM_T_PCH_CNP)) {
3983 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3984 		switch (i) {
3985 		case 0:
3986 			/* We can use all entries */
3987 			ralmax = size;
3988 			break;
3989 		case 1:
3990 			/* Only RAR[0] */
3991 			ralmax = 1;
3992 			break;
3993 		default:
3994 			/* Available SHRA + RAR[0] */
3995 			ralmax = i + 1;
3996 		}
3997 	} else
3998 		ralmax = size;
3999 	for (i = 1; i < size; i++) {
4000 		if (i < ralmax)
4001 			wm_set_ral(sc, NULL, i);
4002 	}
4003 
4004 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4005 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4006 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4007 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
4008 		size = WM_ICH8_MC_TABSIZE;
4009 	else
4010 		size = WM_MC_TABSIZE;
4011 	/* Clear out the multicast table. */
4012 	for (i = 0; i < size; i++) {
4013 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
4014 		CSR_WRITE_FLUSH(sc);
4015 	}
4016 
4017 	ETHER_LOCK(ec);
4018 	ETHER_FIRST_MULTI(step, ec, enm);
4019 	while (enm != NULL) {
4020 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4021 			ec->ec_flags |= ETHER_F_ALLMULTI;
4022 			ETHER_UNLOCK(ec);
4023 			/*
4024 			 * We must listen to a range of multicast addresses.
4025 			 * For now, just accept all multicasts, rather than
4026 			 * trying to set only those filter bits needed to match
4027 			 * the range.  (At this time, the only use of address
4028 			 * ranges is for IP multicast routing, for which the
4029 			 * range is big enough to require all bits set.)
4030 			 */
4031 			goto allmulti;
4032 		}
4033 
4034 		hash = wm_mchash(sc, enm->enm_addrlo);
4035 
4036 		reg = (hash >> 5);
4037 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4038 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4039 		    || (sc->sc_type == WM_T_PCH2)
4040 		    || (sc->sc_type == WM_T_PCH_LPT)
4041 		    || (sc->sc_type == WM_T_PCH_SPT)
4042 		    || (sc->sc_type == WM_T_PCH_CNP))
4043 			reg &= 0x1f;
4044 		else
4045 			reg &= 0x7f;
4046 		bit = hash & 0x1f;
4047 
4048 		hash = CSR_READ(sc, mta_reg + (reg << 2));
4049 		hash |= 1U << bit;
4050 
4051 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
4052 			/*
4053 			 * 82544 Errata 9: Certain register cannot be written
4054 			 * with particular alignments in PCI-X bus operation
4055 			 * (FCAH, MTA and VFTA).
4056 			 */
4057 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4058 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4059 			CSR_WRITE_FLUSH(sc);
4060 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4061 			CSR_WRITE_FLUSH(sc);
4062 		} else {
4063 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4064 			CSR_WRITE_FLUSH(sc);
4065 		}
4066 
4067 		ETHER_NEXT_MULTI(step, enm);
4068 	}
4069 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
4070 	ETHER_UNLOCK(ec);
4071 
4072 	goto setit;
4073 
4074  allmulti:
4075 	sc->sc_rctl |= RCTL_MPE;
4076 
4077  setit:
4078 	if (sc->sc_type >= WM_T_PCH2) {
4079 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4080 		    && (ifp->if_mtu > ETHERMTU))
4081 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
4082 		else
4083 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
4084 		if (rv != 0)
4085 			device_printf(sc->sc_dev,
4086 			    "Failed to do workaround for jumbo frame.\n");
4087 	}
4088 
4089 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4090 }
4091 
4092 /* Reset and init related */
4093 
4094 static void
4095 wm_set_vlan(struct wm_softc *sc)
4096 {
4097 
4098 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4099 		device_xname(sc->sc_dev), __func__));
4100 
4101 	/* Deal with VLAN enables. */
4102 	if (VLAN_ATTACHED(&sc->sc_ethercom))
4103 		sc->sc_ctrl |= CTRL_VME;
4104 	else
4105 		sc->sc_ctrl &= ~CTRL_VME;
4106 
4107 	/* Write the control registers. */
4108 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4109 }
4110 
4111 static void
4112 wm_set_pcie_completion_timeout(struct wm_softc *sc)
4113 {
4114 	uint32_t gcr;
4115 	pcireg_t ctrl2;
4116 
4117 	gcr = CSR_READ(sc, WMREG_GCR);
4118 
4119 	/* Only take action if timeout value is defaulted to 0 */
4120 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
4121 		goto out;
4122 
4123 	if ((gcr & GCR_CAP_VER2) == 0) {
4124 		gcr |= GCR_CMPL_TMOUT_10MS;
4125 		goto out;
4126 	}
4127 
4128 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
4129 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
4130 	ctrl2 |= WM_PCIE_DCSR2_16MS;
4131 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
4132 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
4133 
4134 out:
4135 	/* Disable completion timeout resend */
4136 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
4137 
4138 	CSR_WRITE(sc, WMREG_GCR, gcr);
4139 }
4140 
4141 void
4142 wm_get_auto_rd_done(struct wm_softc *sc)
4143 {
4144 	int i;
4145 
4146 	/* wait for eeprom to reload */
4147 	switch (sc->sc_type) {
4148 	case WM_T_82571:
4149 	case WM_T_82572:
4150 	case WM_T_82573:
4151 	case WM_T_82574:
4152 	case WM_T_82583:
4153 	case WM_T_82575:
4154 	case WM_T_82576:
4155 	case WM_T_82580:
4156 	case WM_T_I350:
4157 	case WM_T_I354:
4158 	case WM_T_I210:
4159 	case WM_T_I211:
4160 	case WM_T_80003:
4161 	case WM_T_ICH8:
4162 	case WM_T_ICH9:
4163 		for (i = 0; i < 10; i++) {
4164 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4165 				break;
4166 			delay(1000);
4167 		}
4168 		if (i == 10) {
4169 			log(LOG_ERR, "%s: auto read from eeprom failed to "
4170 			    "complete\n", device_xname(sc->sc_dev));
4171 		}
4172 		break;
4173 	default:
4174 		break;
4175 	}
4176 }
4177 
4178 void
4179 wm_lan_init_done(struct wm_softc *sc)
4180 {
4181 	uint32_t reg = 0;
4182 	int i;
4183 
4184 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4185 		device_xname(sc->sc_dev), __func__));
4186 
4187 	/* Wait for eeprom to reload */
4188 	switch (sc->sc_type) {
4189 	case WM_T_ICH10:
4190 	case WM_T_PCH:
4191 	case WM_T_PCH2:
4192 	case WM_T_PCH_LPT:
4193 	case WM_T_PCH_SPT:
4194 	case WM_T_PCH_CNP:
4195 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4196 			reg = CSR_READ(sc, WMREG_STATUS);
4197 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
4198 				break;
4199 			delay(100);
4200 		}
4201 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4202 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
4203 			    "complete\n", device_xname(sc->sc_dev), __func__);
4204 		}
4205 		break;
4206 	default:
4207 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4208 		    __func__);
4209 		break;
4210 	}
4211 
4212 	reg &= ~STATUS_LAN_INIT_DONE;
4213 	CSR_WRITE(sc, WMREG_STATUS, reg);
4214 }
4215 
4216 void
4217 wm_get_cfg_done(struct wm_softc *sc)
4218 {
4219 	int mask;
4220 	uint32_t reg;
4221 	int i;
4222 
4223 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4224 		device_xname(sc->sc_dev), __func__));
4225 
4226 	/* Wait for eeprom to reload */
4227 	switch (sc->sc_type) {
4228 	case WM_T_82542_2_0:
4229 	case WM_T_82542_2_1:
4230 		/* null */
4231 		break;
4232 	case WM_T_82543:
4233 	case WM_T_82544:
4234 	case WM_T_82540:
4235 	case WM_T_82545:
4236 	case WM_T_82545_3:
4237 	case WM_T_82546:
4238 	case WM_T_82546_3:
4239 	case WM_T_82541:
4240 	case WM_T_82541_2:
4241 	case WM_T_82547:
4242 	case WM_T_82547_2:
4243 	case WM_T_82573:
4244 	case WM_T_82574:
4245 	case WM_T_82583:
4246 		/* generic */
4247 		delay(10*1000);
4248 		break;
4249 	case WM_T_80003:
4250 	case WM_T_82571:
4251 	case WM_T_82572:
4252 	case WM_T_82575:
4253 	case WM_T_82576:
4254 	case WM_T_82580:
4255 	case WM_T_I350:
4256 	case WM_T_I354:
4257 	case WM_T_I210:
4258 	case WM_T_I211:
4259 		if (sc->sc_type == WM_T_82571) {
4260 			/* Only 82571 shares port 0 */
4261 			mask = EEMNGCTL_CFGDONE_0;
4262 		} else
4263 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4264 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4265 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4266 				break;
4267 			delay(1000);
4268 		}
4269 		if (i >= WM_PHY_CFG_TIMEOUT)
4270 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
4271 				device_xname(sc->sc_dev), __func__));
4272 		break;
4273 	case WM_T_ICH8:
4274 	case WM_T_ICH9:
4275 	case WM_T_ICH10:
4276 	case WM_T_PCH:
4277 	case WM_T_PCH2:
4278 	case WM_T_PCH_LPT:
4279 	case WM_T_PCH_SPT:
4280 	case WM_T_PCH_CNP:
4281 		delay(10*1000);
4282 		if (sc->sc_type >= WM_T_ICH10)
4283 			wm_lan_init_done(sc);
4284 		else
4285 			wm_get_auto_rd_done(sc);
4286 
4287 		/* Clear PHY Reset Asserted bit */
4288 		reg = CSR_READ(sc, WMREG_STATUS);
4289 		if ((reg & STATUS_PHYRA) != 0)
4290 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
4291 		break;
4292 	default:
4293 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4294 		    __func__);
4295 		break;
4296 	}
4297 }
4298 
4299 int
4300 wm_phy_post_reset(struct wm_softc *sc)
4301 {
4302 	device_t dev = sc->sc_dev;
4303 	uint16_t reg;
4304 	int rv = 0;
4305 
4306 	/* This function is only for ICH8 and newer. */
4307 	if (sc->sc_type < WM_T_ICH8)
4308 		return 0;
4309 
4310 	if (wm_phy_resetisblocked(sc)) {
4311 		/* XXX */
4312 		device_printf(dev, "PHY is blocked\n");
4313 		return -1;
4314 	}
4315 
4316 	/* Allow time for h/w to get to quiescent state after reset */
4317 	delay(10*1000);
4318 
4319 	/* Perform any necessary post-reset workarounds */
4320 	if (sc->sc_type == WM_T_PCH)
4321 		rv = wm_hv_phy_workarounds_ich8lan(sc);
4322 	else if (sc->sc_type == WM_T_PCH2)
4323 		rv = wm_lv_phy_workarounds_ich8lan(sc);
4324 	if (rv != 0)
4325 		return rv;
4326 
4327 	/* Clear the host wakeup bit after lcd reset */
4328 	if (sc->sc_type >= WM_T_PCH) {
4329 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
4330 		reg &= ~BM_WUC_HOST_WU_BIT;
4331 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
4332 	}
4333 
4334 	/* Configure the LCD with the extended configuration region in NVM */
4335 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
4336 		return rv;
4337 
4338 	/* Configure the LCD with the OEM bits in NVM */
4339 	rv = wm_oem_bits_config_ich8lan(sc, true);
4340 
4341 	if (sc->sc_type == WM_T_PCH2) {
4342 		/* Ungate automatic PHY configuration on non-managed 82579 */
4343 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
4344 			delay(10 * 1000);
4345 			wm_gate_hw_phy_config_ich8lan(sc, false);
4346 		}
4347 		/* Set EEE LPI Update Timer to 200usec */
4348 		rv = sc->phy.acquire(sc);
4349 		if (rv)
4350 			return rv;
4351 		rv = wm_write_emi_reg_locked(dev,
4352 		    I82579_LPI_UPDATE_TIMER, 0x1387);
4353 		sc->phy.release(sc);
4354 	}
4355 
4356 	return rv;
4357 }
4358 
4359 /* Only for PCH and newer */
4360 static int
4361 wm_write_smbus_addr(struct wm_softc *sc)
4362 {
4363 	uint32_t strap, freq;
4364 	uint16_t phy_data;
4365 	int rv;
4366 
4367 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4368 		device_xname(sc->sc_dev), __func__));
4369 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
4370 
4371 	strap = CSR_READ(sc, WMREG_STRAP);
4372 	freq = __SHIFTOUT(strap, STRAP_FREQ);
4373 
4374 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
4375 	if (rv != 0)
4376 		return -1;
4377 
4378 	phy_data &= ~HV_SMB_ADDR_ADDR;
4379 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
4380 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
4381 
4382 	if (sc->sc_phytype == WMPHY_I217) {
4383 		/* Restore SMBus frequency */
4384 		if (freq --) {
4385 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
4386 			    | HV_SMB_ADDR_FREQ_HIGH);
4387 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
4388 			    HV_SMB_ADDR_FREQ_LOW);
4389 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
4390 			    HV_SMB_ADDR_FREQ_HIGH);
4391 		} else
4392 			DPRINTF(sc, WM_DEBUG_INIT,
4393 			    ("%s: %s Unsupported SMB frequency in PHY\n",
4394 				device_xname(sc->sc_dev), __func__));
4395 	}
4396 
4397 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
4398 	    phy_data);
4399 }
4400 
4401 static int
4402 wm_init_lcd_from_nvm(struct wm_softc *sc)
4403 {
4404 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
4405 	uint16_t phy_page = 0;
4406 	int rv = 0;
4407 
4408 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4409 		device_xname(sc->sc_dev), __func__));
4410 
4411 	switch (sc->sc_type) {
4412 	case WM_T_ICH8:
4413 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
4414 		    || (sc->sc_phytype != WMPHY_IGP_3))
4415 			return 0;
4416 
4417 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
4418 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
4419 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
4420 			break;
4421 		}
4422 		/* FALLTHROUGH */
4423 	case WM_T_PCH:
4424 	case WM_T_PCH2:
4425 	case WM_T_PCH_LPT:
4426 	case WM_T_PCH_SPT:
4427 	case WM_T_PCH_CNP:
4428 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
4429 		break;
4430 	default:
4431 		return 0;
4432 	}
4433 
4434 	if ((rv = sc->phy.acquire(sc)) != 0)
4435 		return rv;
4436 
4437 	reg = CSR_READ(sc, WMREG_FEXTNVM);
4438 	if ((reg & sw_cfg_mask) == 0)
4439 		goto release;
4440 
4441 	/*
4442 	 * Make sure HW does not configure LCD from PHY extended configuration
4443 	 * before SW configuration
4444 	 */
4445 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
4446 	if ((sc->sc_type < WM_T_PCH2)
4447 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
4448 		goto release;
4449 
4450 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
4451 		device_xname(sc->sc_dev), __func__));
4452 	/* word_addr is in DWORD */
4453 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
4454 
4455 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
4456 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
4457 	if (cnf_size == 0)
4458 		goto release;
4459 
4460 	if (((sc->sc_type == WM_T_PCH)
4461 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
4462 	    || (sc->sc_type > WM_T_PCH)) {
4463 		/*
4464 		 * HW configures the SMBus address and LEDs when the OEM and
4465 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
4466 		 * are cleared, SW will configure them instead.
4467 		 */
4468 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
4469 			device_xname(sc->sc_dev), __func__));
4470 		if ((rv = wm_write_smbus_addr(sc)) != 0)
4471 			goto release;
4472 
4473 		reg = CSR_READ(sc, WMREG_LEDCTL);
4474 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
4475 		    (uint16_t)reg);
4476 		if (rv != 0)
4477 			goto release;
4478 	}
4479 
4480 	/* Configure LCD from extended configuration region. */
4481 	for (i = 0; i < cnf_size; i++) {
4482 		uint16_t reg_data, reg_addr;
4483 
4484 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
4485 			goto release;
4486 
4487 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
4488 			goto release;
4489 
4490 		if (reg_addr == IGPHY_PAGE_SELECT)
4491 			phy_page = reg_data;
4492 
4493 		reg_addr &= IGPHY_MAXREGADDR;
4494 		reg_addr |= phy_page;
4495 
4496 		KASSERT(sc->phy.writereg_locked != NULL);
4497 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
4498 		    reg_data);
4499 	}
4500 
4501 release:
4502 	sc->phy.release(sc);
4503 	return rv;
4504 }
4505 
4506 /*
4507  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
4508  *  @sc:       pointer to the HW structure
4509  *  @d0_state: boolean if entering d0 or d3 device state
4510  *
4511  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
4512  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
4513  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
4514  */
4515 int
4516 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
4517 {
4518 	uint32_t mac_reg;
4519 	uint16_t oem_reg;
4520 	int rv;
4521 
4522 	if (sc->sc_type < WM_T_PCH)
4523 		return 0;
4524 
4525 	rv = sc->phy.acquire(sc);
4526 	if (rv != 0)
4527 		return rv;
4528 
4529 	if (sc->sc_type == WM_T_PCH) {
4530 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4531 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
4532 			goto release;
4533 	}
4534 
4535 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
4536 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
4537 		goto release;
4538 
4539 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
4540 
4541 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
4542 	if (rv != 0)
4543 		goto release;
4544 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
4545 
4546 	if (d0_state) {
4547 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
4548 			oem_reg |= HV_OEM_BITS_A1KDIS;
4549 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
4550 			oem_reg |= HV_OEM_BITS_LPLU;
4551 	} else {
4552 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
4553 		    != 0)
4554 			oem_reg |= HV_OEM_BITS_A1KDIS;
4555 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
4556 		    != 0)
4557 			oem_reg |= HV_OEM_BITS_LPLU;
4558 	}
4559 
4560 	/* Set Restart auto-neg to activate the bits */
4561 	if ((d0_state || (sc->sc_type != WM_T_PCH))
4562 	    && (wm_phy_resetisblocked(sc) == false))
4563 		oem_reg |= HV_OEM_BITS_ANEGNOW;
4564 
4565 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
4566 
4567 release:
4568 	sc->phy.release(sc);
4569 
4570 	return rv;
4571 }
4572 
4573 /* Init hardware bits */
4574 void
4575 wm_initialize_hardware_bits(struct wm_softc *sc)
4576 {
4577 	uint32_t tarc0, tarc1, reg;
4578 
4579 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4580 		device_xname(sc->sc_dev), __func__));
4581 
4582 	/* For 82571 variant, 80003 and ICHs */
4583 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
4584 	    || (sc->sc_type >= WM_T_80003)) {
4585 
4586 		/* Transmit Descriptor Control 0 */
4587 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
4588 		reg |= TXDCTL_COUNT_DESC;
4589 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
4590 
4591 		/* Transmit Descriptor Control 1 */
4592 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
4593 		reg |= TXDCTL_COUNT_DESC;
4594 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
4595 
4596 		/* TARC0 */
4597 		tarc0 = CSR_READ(sc, WMREG_TARC0);
4598 		switch (sc->sc_type) {
4599 		case WM_T_82571:
4600 		case WM_T_82572:
4601 		case WM_T_82573:
4602 		case WM_T_82574:
4603 		case WM_T_82583:
4604 		case WM_T_80003:
4605 			/* Clear bits 30..27 */
4606 			tarc0 &= ~__BITS(30, 27);
4607 			break;
4608 		default:
4609 			break;
4610 		}
4611 
4612 		switch (sc->sc_type) {
4613 		case WM_T_82571:
4614 		case WM_T_82572:
4615 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
4616 
4617 			tarc1 = CSR_READ(sc, WMREG_TARC1);
4618 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
4619 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
4620 			/* 8257[12] Errata No.7 */
4621 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
4622 
4623 			/* TARC1 bit 28 */
4624 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4625 				tarc1 &= ~__BIT(28);
4626 			else
4627 				tarc1 |= __BIT(28);
4628 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
4629 
4630 			/*
4631 			 * 8257[12] Errata No.13
4632 			 * Disable Dyamic Clock Gating.
4633 			 */
4634 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4635 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
4636 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4637 			break;
4638 		case WM_T_82573:
4639 		case WM_T_82574:
4640 		case WM_T_82583:
4641 			if ((sc->sc_type == WM_T_82574)
4642 			    || (sc->sc_type == WM_T_82583))
4643 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
4644 
4645 			/* Extended Device Control */
4646 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4647 			reg &= ~__BIT(23);	/* Clear bit 23 */
4648 			reg |= __BIT(22);	/* Set bit 22 */
4649 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4650 
4651 			/* Device Control */
4652 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
4653 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4654 
4655 			/* PCIe Control Register */
4656 			/*
4657 			 * 82573 Errata (unknown).
4658 			 *
4659 			 * 82574 Errata 25 and 82583 Errata 12
4660 			 * "Dropped Rx Packets":
4661 			 *   NVM Image Version 2.1.4 and newer has no this bug.
4662 			 */
4663 			reg = CSR_READ(sc, WMREG_GCR);
4664 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
4665 			CSR_WRITE(sc, WMREG_GCR, reg);
4666 
4667 			if ((sc->sc_type == WM_T_82574)
4668 			    || (sc->sc_type == WM_T_82583)) {
4669 				/*
4670 				 * Document says this bit must be set for
4671 				 * proper operation.
4672 				 */
4673 				reg = CSR_READ(sc, WMREG_GCR);
4674 				reg |= __BIT(22);
4675 				CSR_WRITE(sc, WMREG_GCR, reg);
4676 
4677 				/*
4678 				 * Apply workaround for hardware errata
4679 				 * documented in errata docs Fixes issue where
4680 				 * some error prone or unreliable PCIe
4681 				 * completions are occurring, particularly
4682 				 * with ASPM enabled. Without fix, issue can
4683 				 * cause Tx timeouts.
4684 				 */
4685 				reg = CSR_READ(sc, WMREG_GCR2);
4686 				reg |= __BIT(0);
4687 				CSR_WRITE(sc, WMREG_GCR2, reg);
4688 			}
4689 			break;
4690 		case WM_T_80003:
4691 			/* TARC0 */
4692 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
4693 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
4694 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
4695 
4696 			/* TARC1 bit 28 */
4697 			tarc1 = CSR_READ(sc, WMREG_TARC1);
4698 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4699 				tarc1 &= ~__BIT(28);
4700 			else
4701 				tarc1 |= __BIT(28);
4702 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
4703 			break;
4704 		case WM_T_ICH8:
4705 		case WM_T_ICH9:
4706 		case WM_T_ICH10:
4707 		case WM_T_PCH:
4708 		case WM_T_PCH2:
4709 		case WM_T_PCH_LPT:
4710 		case WM_T_PCH_SPT:
4711 		case WM_T_PCH_CNP:
4712 			/* TARC0 */
4713 			if (sc->sc_type == WM_T_ICH8) {
4714 				/* Set TARC0 bits 29 and 28 */
4715 				tarc0 |= __BITS(29, 28);
4716 			} else if (sc->sc_type == WM_T_PCH_SPT) {
4717 				tarc0 |= __BIT(29);
4718 				/*
4719 				 *  Drop bit 28. From Linux.
4720 				 * See I218/I219 spec update
4721 				 * "5. Buffer Overrun While the I219 is
4722 				 * Processing DMA Transactions"
4723 				 */
4724 				tarc0 &= ~__BIT(28);
4725 			}
4726 			/* Set TARC0 bits 23,24,26,27 */
4727 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
4728 
4729 			/* CTRL_EXT */
4730 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4731 			reg |= __BIT(22);	/* Set bit 22 */
4732 			/*
4733 			 * Enable PHY low-power state when MAC is at D3
4734 			 * w/o WoL
4735 			 */
4736 			if (sc->sc_type >= WM_T_PCH)
4737 				reg |= CTRL_EXT_PHYPDEN;
4738 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4739 
4740 			/* TARC1 */
4741 			tarc1 = CSR_READ(sc, WMREG_TARC1);
4742 			/* bit 28 */
4743 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4744 				tarc1 &= ~__BIT(28);
4745 			else
4746 				tarc1 |= __BIT(28);
4747 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
4748 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
4749 
4750 			/* Device Status */
4751 			if (sc->sc_type == WM_T_ICH8) {
4752 				reg = CSR_READ(sc, WMREG_STATUS);
4753 				reg &= ~__BIT(31);
4754 				CSR_WRITE(sc, WMREG_STATUS, reg);
4755 
4756 			}
4757 
4758 			/* IOSFPC */
4759 			if (sc->sc_type == WM_T_PCH_SPT) {
4760 				reg = CSR_READ(sc, WMREG_IOSFPC);
4761 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
4762 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
4763 			}
4764 			/*
4765 			 * Work-around descriptor data corruption issue during
4766 			 * NFS v2 UDP traffic, just disable the NFS filtering
4767 			 * capability.
4768 			 */
4769 			reg = CSR_READ(sc, WMREG_RFCTL);
4770 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
4771 			CSR_WRITE(sc, WMREG_RFCTL, reg);
4772 			break;
4773 		default:
4774 			break;
4775 		}
4776 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
4777 
4778 		switch (sc->sc_type) {
4779 		/*
4780 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
4781 		 * Avoid RSS Hash Value bug.
4782 		 */
4783 		case WM_T_82571:
4784 		case WM_T_82572:
4785 		case WM_T_82573:
4786 		case WM_T_80003:
4787 		case WM_T_ICH8:
4788 			reg = CSR_READ(sc, WMREG_RFCTL);
4789 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
4790 			CSR_WRITE(sc, WMREG_RFCTL, reg);
4791 			break;
4792 		case WM_T_82574:
4793 			/* Use extened Rx descriptor. */
4794 			reg = CSR_READ(sc, WMREG_RFCTL);
4795 			reg |= WMREG_RFCTL_EXSTEN;
4796 			CSR_WRITE(sc, WMREG_RFCTL, reg);
4797 			break;
4798 		default:
4799 			break;
4800 		}
4801 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
4802 		/*
4803 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
4804 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
4805 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
4806 		 * Correctly by the Device"
4807 		 *
4808 		 * I354(C2000) Errata AVR53:
4809 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
4810 		 * Hang"
4811 		 */
4812 		reg = CSR_READ(sc, WMREG_RFCTL);
4813 		reg |= WMREG_RFCTL_IPV6EXDIS;
4814 		CSR_WRITE(sc, WMREG_RFCTL, reg);
4815 	}
4816 }
4817 
4818 static uint32_t
4819 wm_rxpbs_adjust_82580(uint32_t val)
4820 {
4821 	uint32_t rv = 0;
4822 
4823 	if (val < __arraycount(wm_82580_rxpbs_table))
4824 		rv = wm_82580_rxpbs_table[val];
4825 
4826 	return rv;
4827 }
4828 
4829 /*
4830  * wm_reset_phy:
4831  *
4832  *	generic PHY reset function.
4833  *	Same as e1000_phy_hw_reset_generic()
4834  */
4835 static int
4836 wm_reset_phy(struct wm_softc *sc)
4837 {
4838 	uint32_t reg;
4839 
4840 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4841 		device_xname(sc->sc_dev), __func__));
4842 	if (wm_phy_resetisblocked(sc))
4843 		return -1;
4844 
4845 	sc->phy.acquire(sc);
4846 
4847 	reg = CSR_READ(sc, WMREG_CTRL);
4848 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
4849 	CSR_WRITE_FLUSH(sc);
4850 
4851 	delay(sc->phy.reset_delay_us);
4852 
4853 	CSR_WRITE(sc, WMREG_CTRL, reg);
4854 	CSR_WRITE_FLUSH(sc);
4855 
4856 	delay(150);
4857 
4858 	sc->phy.release(sc);
4859 
4860 	wm_get_cfg_done(sc);
4861 	wm_phy_post_reset(sc);
4862 
4863 	return 0;
4864 }
4865 
4866 /*
4867  * wm_flush_desc_rings - remove all descriptors from the descriptor rings.
4868  *
4869  * In i219, the descriptor rings must be emptied before resetting the HW
4870  * or before changing the device state to D3 during runtime (runtime PM).
4871  *
4872  * Failure to do this will cause the HW to enter a unit hang state which can
4873  * only be released by PCI reset on the device.
4874  *
4875  * I219 does not use multiqueue, so it is enough to check sc->sc_queue[0] only.
4876  */
4877 static void
4878 wm_flush_desc_rings(struct wm_softc *sc)
4879 {
4880 	pcireg_t preg;
4881 	uint32_t reg;
4882 	struct wm_txqueue *txq;
4883 	wiseman_txdesc_t *txd;
4884 	int nexttx;
4885 	uint32_t rctl;
4886 
4887 	/* First, disable MULR fix in FEXTNVM11 */
4888 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
4889 	reg |= FEXTNVM11_DIS_MULRFIX;
4890 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
4891 
4892 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
4893 	reg = CSR_READ(sc, WMREG_TDLEN(0));
4894 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
4895 		return;
4896 
4897 	/*
4898 	 * Remove all descriptors from the tx_ring.
4899 	 *
4900 	 * We want to clear all pending descriptors from the TX ring. Zeroing
4901 	 * happens when the HW reads the regs. We assign the ring itself as
4902 	 * the data of the next descriptor. We don't care about the data we are
4903 	 * about to reset the HW.
4904 	 */
4905 #ifdef WM_DEBUG
4906 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x)\n", preg);
4907 #endif
4908 	reg = CSR_READ(sc, WMREG_TCTL);
4909 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
4910 
4911 	txq = &sc->sc_queue[0].wmq_txq;
4912 	nexttx = txq->txq_next;
4913 	txd = &txq->txq_descs[nexttx];
4914 	wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
4915 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
4916 	txd->wtx_fields.wtxu_status = 0;
4917 	txd->wtx_fields.wtxu_options = 0;
4918 	txd->wtx_fields.wtxu_vlan = 0;
4919 
4920 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
4921 	    BUS_SPACE_BARRIER_WRITE);
4922 
4923 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
4924 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
4925 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
4926 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
4927 	delay(250);
4928 
4929 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
4930 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
4931 		return;
4932 
4933 	/*
4934 	 * Mark all descriptors in the RX ring as consumed and disable the
4935 	 * rx ring.
4936 	 */
4937 #ifdef WM_DEBUG
4938 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
4939 #endif
4940 	rctl = CSR_READ(sc, WMREG_RCTL);
4941 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
4942 	CSR_WRITE_FLUSH(sc);
4943 	delay(150);
4944 
4945 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
4946 	/* Zero the lower 14 bits (prefetch and host thresholds) */
4947 	reg &= 0xffffc000;
4948 	/*
4949 	 * Update thresholds: prefetch threshold to 31, host threshold
4950 	 * to 1 and make sure the granularity is "descriptors" and not
4951 	 * "cache lines"
4952 	 */
4953 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
4954 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
4955 
4956 	/* Momentarily enable the RX ring for the changes to take effect */
4957 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
4958 	CSR_WRITE_FLUSH(sc);
4959 	delay(150);
4960 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
4961 }
4962 
4963 /*
4964  * wm_reset:
4965  *
4966  *	Reset the i82542 chip.
4967  */
4968 static void
4969 wm_reset(struct wm_softc *sc)
4970 {
4971 	int phy_reset = 0;
4972 	int i, error = 0;
4973 	uint32_t reg;
4974 	uint16_t kmreg;
4975 	int rv;
4976 
4977 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4978 		device_xname(sc->sc_dev), __func__));
4979 	KASSERT(sc->sc_type != 0);
4980 
4981 	/*
4982 	 * Allocate on-chip memory according to the MTU size.
4983 	 * The Packet Buffer Allocation register must be written
4984 	 * before the chip is reset.
4985 	 */
4986 	switch (sc->sc_type) {
4987 	case WM_T_82547:
4988 	case WM_T_82547_2:
4989 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4990 		    PBA_22K : PBA_30K;
4991 		for (i = 0; i < sc->sc_nqueues; i++) {
4992 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4993 			txq->txq_fifo_head = 0;
4994 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
4995 			txq->txq_fifo_size =
4996 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
4997 			txq->txq_fifo_stall = 0;
4998 		}
4999 		break;
5000 	case WM_T_82571:
5001 	case WM_T_82572:
5002 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
5003 	case WM_T_80003:
5004 		sc->sc_pba = PBA_32K;
5005 		break;
5006 	case WM_T_82573:
5007 		sc->sc_pba = PBA_12K;
5008 		break;
5009 	case WM_T_82574:
5010 	case WM_T_82583:
5011 		sc->sc_pba = PBA_20K;
5012 		break;
5013 	case WM_T_82576:
5014 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
5015 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
5016 		break;
5017 	case WM_T_82580:
5018 	case WM_T_I350:
5019 	case WM_T_I354:
5020 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
5021 		break;
5022 	case WM_T_I210:
5023 	case WM_T_I211:
5024 		sc->sc_pba = PBA_34K;
5025 		break;
5026 	case WM_T_ICH8:
5027 		/* Workaround for a bit corruption issue in FIFO memory */
5028 		sc->sc_pba = PBA_8K;
5029 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
5030 		break;
5031 	case WM_T_ICH9:
5032 	case WM_T_ICH10:
5033 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
5034 		    PBA_14K : PBA_10K;
5035 		break;
5036 	case WM_T_PCH:
5037 	case WM_T_PCH2:	/* XXX 14K? */
5038 	case WM_T_PCH_LPT:
5039 	case WM_T_PCH_SPT:
5040 	case WM_T_PCH_CNP:
5041 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
5042 		    PBA_12K : PBA_26K;
5043 		break;
5044 	default:
5045 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
5046 		    PBA_40K : PBA_48K;
5047 		break;
5048 	}
5049 	/*
5050 	 * Only old or non-multiqueue devices have the PBA register
5051 	 * XXX Need special handling for 82575.
5052 	 */
5053 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5054 	    || (sc->sc_type == WM_T_82575))
5055 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
5056 
5057 	/* Prevent the PCI-E bus from sticking */
5058 	if (sc->sc_flags & WM_F_PCIE) {
5059 		int timeout = 800;
5060 
5061 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
5062 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5063 
5064 		while (timeout--) {
5065 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
5066 			    == 0)
5067 				break;
5068 			delay(100);
5069 		}
5070 		if (timeout == 0)
5071 			device_printf(sc->sc_dev,
5072 			    "failed to disable busmastering\n");
5073 	}
5074 
5075 	/* Set the completion timeout for interface */
5076 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
5077 	    || (sc->sc_type == WM_T_82580)
5078 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5079 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
5080 		wm_set_pcie_completion_timeout(sc);
5081 
5082 	/* Clear interrupt */
5083 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5084 	if (wm_is_using_msix(sc)) {
5085 		if (sc->sc_type != WM_T_82574) {
5086 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5087 			CSR_WRITE(sc, WMREG_EIAC, 0);
5088 		} else
5089 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5090 	}
5091 
5092 	/* Stop the transmit and receive processes. */
5093 	CSR_WRITE(sc, WMREG_RCTL, 0);
5094 	sc->sc_rctl &= ~RCTL_EN;
5095 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
5096 	CSR_WRITE_FLUSH(sc);
5097 
5098 	/* XXX set_tbi_sbp_82543() */
5099 
5100 	delay(10*1000);
5101 
5102 	/* Must acquire the MDIO ownership before MAC reset */
5103 	switch (sc->sc_type) {
5104 	case WM_T_82573:
5105 	case WM_T_82574:
5106 	case WM_T_82583:
5107 		error = wm_get_hw_semaphore_82573(sc);
5108 		break;
5109 	default:
5110 		break;
5111 	}
5112 
5113 	/*
5114 	 * 82541 Errata 29? & 82547 Errata 28?
5115 	 * See also the description about PHY_RST bit in CTRL register
5116 	 * in 8254x_GBe_SDM.pdf.
5117 	 */
5118 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
5119 		CSR_WRITE(sc, WMREG_CTRL,
5120 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
5121 		CSR_WRITE_FLUSH(sc);
5122 		delay(5000);
5123 	}
5124 
5125 	switch (sc->sc_type) {
5126 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
5127 	case WM_T_82541:
5128 	case WM_T_82541_2:
5129 	case WM_T_82547:
5130 	case WM_T_82547_2:
5131 		/*
5132 		 * On some chipsets, a reset through a memory-mapped write
5133 		 * cycle can cause the chip to reset before completing the
5134 		 * write cycle. This causes major headache that can be avoided
5135 		 * by issuing the reset via indirect register writes through
5136 		 * I/O space.
5137 		 *
5138 		 * So, if we successfully mapped the I/O BAR at attach time,
5139 		 * use that. Otherwise, try our luck with a memory-mapped
5140 		 * reset.
5141 		 */
5142 		if (sc->sc_flags & WM_F_IOH_VALID)
5143 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
5144 		else
5145 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
5146 		break;
5147 	case WM_T_82545_3:
5148 	case WM_T_82546_3:
5149 		/* Use the shadow control register on these chips. */
5150 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
5151 		break;
5152 	case WM_T_80003:
5153 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5154 		sc->phy.acquire(sc);
5155 		CSR_WRITE(sc, WMREG_CTRL, reg);
5156 		sc->phy.release(sc);
5157 		break;
5158 	case WM_T_ICH8:
5159 	case WM_T_ICH9:
5160 	case WM_T_ICH10:
5161 	case WM_T_PCH:
5162 	case WM_T_PCH2:
5163 	case WM_T_PCH_LPT:
5164 	case WM_T_PCH_SPT:
5165 	case WM_T_PCH_CNP:
5166 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5167 		if (wm_phy_resetisblocked(sc) == false) {
5168 			/*
5169 			 * Gate automatic PHY configuration by hardware on
5170 			 * non-managed 82579
5171 			 */
5172 			if ((sc->sc_type == WM_T_PCH2)
5173 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
5174 				== 0))
5175 				wm_gate_hw_phy_config_ich8lan(sc, true);
5176 
5177 			reg |= CTRL_PHY_RESET;
5178 			phy_reset = 1;
5179 		} else
5180 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
5181 		sc->phy.acquire(sc);
5182 		CSR_WRITE(sc, WMREG_CTRL, reg);
5183 		/* Don't insert a completion barrier when reset */
5184 		delay(20*1000);
5185 		mutex_exit(sc->sc_ich_phymtx);
5186 		break;
5187 	case WM_T_82580:
5188 	case WM_T_I350:
5189 	case WM_T_I354:
5190 	case WM_T_I210:
5191 	case WM_T_I211:
5192 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5193 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
5194 			CSR_WRITE_FLUSH(sc);
5195 		delay(5000);
5196 		break;
5197 	case WM_T_82542_2_0:
5198 	case WM_T_82542_2_1:
5199 	case WM_T_82543:
5200 	case WM_T_82540:
5201 	case WM_T_82545:
5202 	case WM_T_82546:
5203 	case WM_T_82571:
5204 	case WM_T_82572:
5205 	case WM_T_82573:
5206 	case WM_T_82574:
5207 	case WM_T_82575:
5208 	case WM_T_82576:
5209 	case WM_T_82583:
5210 	default:
5211 		/* Everything else can safely use the documented method. */
5212 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5213 		break;
5214 	}
5215 
5216 	/* Must release the MDIO ownership after MAC reset */
5217 	switch (sc->sc_type) {
5218 	case WM_T_82573:
5219 	case WM_T_82574:
5220 	case WM_T_82583:
5221 		if (error == 0)
5222 			wm_put_hw_semaphore_82573(sc);
5223 		break;
5224 	default:
5225 		break;
5226 	}
5227 
5228 	/* Set Phy Config Counter to 50msec */
5229 	if (sc->sc_type == WM_T_PCH2) {
5230 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
5231 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
5232 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
5233 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
5234 	}
5235 
5236 	if (phy_reset != 0)
5237 		wm_get_cfg_done(sc);
5238 
5239 	/* Reload EEPROM */
5240 	switch (sc->sc_type) {
5241 	case WM_T_82542_2_0:
5242 	case WM_T_82542_2_1:
5243 	case WM_T_82543:
5244 	case WM_T_82544:
5245 		delay(10);
5246 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5247 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5248 		CSR_WRITE_FLUSH(sc);
5249 		delay(2000);
5250 		break;
5251 	case WM_T_82540:
5252 	case WM_T_82545:
5253 	case WM_T_82545_3:
5254 	case WM_T_82546:
5255 	case WM_T_82546_3:
5256 		delay(5*1000);
5257 		/* XXX Disable HW ARPs on ASF enabled adapters */
5258 		break;
5259 	case WM_T_82541:
5260 	case WM_T_82541_2:
5261 	case WM_T_82547:
5262 	case WM_T_82547_2:
5263 		delay(20000);
5264 		/* XXX Disable HW ARPs on ASF enabled adapters */
5265 		break;
5266 	case WM_T_82571:
5267 	case WM_T_82572:
5268 	case WM_T_82573:
5269 	case WM_T_82574:
5270 	case WM_T_82583:
5271 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
5272 			delay(10);
5273 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5274 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5275 			CSR_WRITE_FLUSH(sc);
5276 		}
5277 		/* check EECD_EE_AUTORD */
5278 		wm_get_auto_rd_done(sc);
5279 		/*
5280 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
5281 		 * is set.
5282 		 */
5283 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
5284 		    || (sc->sc_type == WM_T_82583))
5285 			delay(25*1000);
5286 		break;
5287 	case WM_T_82575:
5288 	case WM_T_82576:
5289 	case WM_T_82580:
5290 	case WM_T_I350:
5291 	case WM_T_I354:
5292 	case WM_T_I210:
5293 	case WM_T_I211:
5294 	case WM_T_80003:
5295 		/* check EECD_EE_AUTORD */
5296 		wm_get_auto_rd_done(sc);
5297 		break;
5298 	case WM_T_ICH8:
5299 	case WM_T_ICH9:
5300 	case WM_T_ICH10:
5301 	case WM_T_PCH:
5302 	case WM_T_PCH2:
5303 	case WM_T_PCH_LPT:
5304 	case WM_T_PCH_SPT:
5305 	case WM_T_PCH_CNP:
5306 		break;
5307 	default:
5308 		panic("%s: unknown type\n", __func__);
5309 	}
5310 
5311 	/* Check whether EEPROM is present or not */
5312 	switch (sc->sc_type) {
5313 	case WM_T_82575:
5314 	case WM_T_82576:
5315 	case WM_T_82580:
5316 	case WM_T_I350:
5317 	case WM_T_I354:
5318 	case WM_T_ICH8:
5319 	case WM_T_ICH9:
5320 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
5321 			/* Not found */
5322 			sc->sc_flags |= WM_F_EEPROM_INVALID;
5323 			if (sc->sc_type == WM_T_82575)
5324 				wm_reset_init_script_82575(sc);
5325 		}
5326 		break;
5327 	default:
5328 		break;
5329 	}
5330 
5331 	if (phy_reset != 0)
5332 		wm_phy_post_reset(sc);
5333 
5334 	if ((sc->sc_type == WM_T_82580)
5335 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
5336 		/* Clear global device reset status bit */
5337 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
5338 	}
5339 
5340 	/* Clear any pending interrupt events. */
5341 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5342 	reg = CSR_READ(sc, WMREG_ICR);
5343 	if (wm_is_using_msix(sc)) {
5344 		if (sc->sc_type != WM_T_82574) {
5345 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5346 			CSR_WRITE(sc, WMREG_EIAC, 0);
5347 		} else
5348 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5349 	}
5350 
5351 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5352 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5353 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
5354 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
5355 		reg = CSR_READ(sc, WMREG_KABGTXD);
5356 		reg |= KABGTXD_BGSQLBIAS;
5357 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
5358 	}
5359 
5360 	/* Reload sc_ctrl */
5361 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5362 
5363 	wm_set_eee(sc);
5364 
5365 	/*
5366 	 * For PCH, this write will make sure that any noise will be detected
5367 	 * as a CRC error and be dropped rather than show up as a bad packet
5368 	 * to the DMA engine
5369 	 */
5370 	if (sc->sc_type == WM_T_PCH)
5371 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
5372 
5373 	if (sc->sc_type >= WM_T_82544)
5374 		CSR_WRITE(sc, WMREG_WUC, 0);
5375 
5376 	if (sc->sc_type < WM_T_82575)
5377 		wm_disable_aspm(sc); /* Workaround for some chips */
5378 
5379 	wm_reset_mdicnfg_82580(sc);
5380 
5381 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
5382 		wm_pll_workaround_i210(sc);
5383 
5384 	if (sc->sc_type == WM_T_80003) {
5385 		/* Default to TRUE to enable the MDIC W/A */
5386 		sc->sc_flags |= WM_F_80003_MDIC_WA;
5387 
5388 		rv = wm_kmrn_readreg(sc,
5389 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
5390 		if (rv == 0) {
5391 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
5392 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
5393 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
5394 			else
5395 				sc->sc_flags |= WM_F_80003_MDIC_WA;
5396 		}
5397 	}
5398 }
5399 
5400 /*
5401  * wm_add_rxbuf:
5402  *
5403  *	Add a receive buffer to the indiciated descriptor.
5404  */
5405 static int
5406 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
5407 {
5408 	struct wm_softc *sc = rxq->rxq_sc;
5409 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
5410 	struct mbuf *m;
5411 	int error;
5412 
5413 	KASSERT(mutex_owned(rxq->rxq_lock));
5414 
5415 	MGETHDR(m, M_DONTWAIT, MT_DATA);
5416 	if (m == NULL)
5417 		return ENOBUFS;
5418 
5419 	MCLGET(m, M_DONTWAIT);
5420 	if ((m->m_flags & M_EXT) == 0) {
5421 		m_freem(m);
5422 		return ENOBUFS;
5423 	}
5424 
5425 	if (rxs->rxs_mbuf != NULL)
5426 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5427 
5428 	rxs->rxs_mbuf = m;
5429 
5430 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5431 	/*
5432 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
5433 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
5434 	 */
5435 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
5436 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
5437 	if (error) {
5438 		/* XXX XXX XXX */
5439 		aprint_error_dev(sc->sc_dev,
5440 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
5441 		panic("wm_add_rxbuf");
5442 	}
5443 
5444 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5445 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5446 
5447 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5448 		if ((sc->sc_rctl & RCTL_EN) != 0)
5449 			wm_init_rxdesc(rxq, idx);
5450 	} else
5451 		wm_init_rxdesc(rxq, idx);
5452 
5453 	return 0;
5454 }
5455 
5456 /*
5457  * wm_rxdrain:
5458  *
5459  *	Drain the receive queue.
5460  */
5461 static void
5462 wm_rxdrain(struct wm_rxqueue *rxq)
5463 {
5464 	struct wm_softc *sc = rxq->rxq_sc;
5465 	struct wm_rxsoft *rxs;
5466 	int i;
5467 
5468 	KASSERT(mutex_owned(rxq->rxq_lock));
5469 
5470 	for (i = 0; i < WM_NRXDESC; i++) {
5471 		rxs = &rxq->rxq_soft[i];
5472 		if (rxs->rxs_mbuf != NULL) {
5473 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5474 			m_freem(rxs->rxs_mbuf);
5475 			rxs->rxs_mbuf = NULL;
5476 		}
5477 	}
5478 }
5479 
5480 /*
5481  * Setup registers for RSS.
5482  *
5483  * XXX not yet VMDq support
5484  */
5485 static void
5486 wm_init_rss(struct wm_softc *sc)
5487 {
5488 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
5489 	int i;
5490 
5491 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
5492 
5493 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
5494 		unsigned int qid, reta_ent;
5495 
5496 		qid  = i % sc->sc_nqueues;
5497 		switch (sc->sc_type) {
5498 		case WM_T_82574:
5499 			reta_ent = __SHIFTIN(qid,
5500 			    RETA_ENT_QINDEX_MASK_82574);
5501 			break;
5502 		case WM_T_82575:
5503 			reta_ent = __SHIFTIN(qid,
5504 			    RETA_ENT_QINDEX1_MASK_82575);
5505 			break;
5506 		default:
5507 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
5508 			break;
5509 		}
5510 
5511 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
5512 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
5513 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
5514 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
5515 	}
5516 
5517 	rss_getkey((uint8_t *)rss_key);
5518 	for (i = 0; i < RSSRK_NUM_REGS; i++)
5519 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
5520 
5521 	if (sc->sc_type == WM_T_82574)
5522 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
5523 	else
5524 		mrqc = MRQC_ENABLE_RSS_MQ;
5525 
5526 	/*
5527 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
5528 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
5529 	 */
5530 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
5531 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
5532 #if 0
5533 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
5534 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
5535 #endif
5536 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
5537 
5538 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
5539 }
5540 
5541 /*
5542  * Adjust TX and RX queue numbers which the system actulally uses.
5543  *
5544  * The numbers are affected by below parameters.
5545  *     - The nubmer of hardware queues
5546  *     - The number of MSI-X vectors (= "nvectors" argument)
5547  *     - ncpu
5548  */
5549 static void
5550 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
5551 {
5552 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
5553 
5554 	if (nvectors < 2) {
5555 		sc->sc_nqueues = 1;
5556 		return;
5557 	}
5558 
5559 	switch (sc->sc_type) {
5560 	case WM_T_82572:
5561 		hw_ntxqueues = 2;
5562 		hw_nrxqueues = 2;
5563 		break;
5564 	case WM_T_82574:
5565 		hw_ntxqueues = 2;
5566 		hw_nrxqueues = 2;
5567 		break;
5568 	case WM_T_82575:
5569 		hw_ntxqueues = 4;
5570 		hw_nrxqueues = 4;
5571 		break;
5572 	case WM_T_82576:
5573 		hw_ntxqueues = 16;
5574 		hw_nrxqueues = 16;
5575 		break;
5576 	case WM_T_82580:
5577 	case WM_T_I350:
5578 	case WM_T_I354:
5579 		hw_ntxqueues = 8;
5580 		hw_nrxqueues = 8;
5581 		break;
5582 	case WM_T_I210:
5583 		hw_ntxqueues = 4;
5584 		hw_nrxqueues = 4;
5585 		break;
5586 	case WM_T_I211:
5587 		hw_ntxqueues = 2;
5588 		hw_nrxqueues = 2;
5589 		break;
5590 		/*
5591 		 * As below ethernet controllers does not support MSI-X,
5592 		 * this driver let them not use multiqueue.
5593 		 *     - WM_T_80003
5594 		 *     - WM_T_ICH8
5595 		 *     - WM_T_ICH9
5596 		 *     - WM_T_ICH10
5597 		 *     - WM_T_PCH
5598 		 *     - WM_T_PCH2
5599 		 *     - WM_T_PCH_LPT
5600 		 */
5601 	default:
5602 		hw_ntxqueues = 1;
5603 		hw_nrxqueues = 1;
5604 		break;
5605 	}
5606 
5607 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
5608 
5609 	/*
5610 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
5611 	 * the number of queues used actually.
5612 	 */
5613 	if (nvectors < hw_nqueues + 1)
5614 		sc->sc_nqueues = nvectors - 1;
5615 	else
5616 		sc->sc_nqueues = hw_nqueues;
5617 
5618 	/*
5619 	 * As queues more then cpus cannot improve scaling, we limit
5620 	 * the number of queues used actually.
5621 	 */
5622 	if (ncpu < sc->sc_nqueues)
5623 		sc->sc_nqueues = ncpu;
5624 }
5625 
5626 static inline bool
5627 wm_is_using_msix(struct wm_softc *sc)
5628 {
5629 
5630 	return (sc->sc_nintrs > 1);
5631 }
5632 
5633 static inline bool
5634 wm_is_using_multiqueue(struct wm_softc *sc)
5635 {
5636 
5637 	return (sc->sc_nqueues > 1);
5638 }
5639 
5640 static int
5641 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
5642 {
5643 	struct wm_queue *wmq = &sc->sc_queue[qidx];
5644 
5645 	wmq->wmq_id = qidx;
5646 	wmq->wmq_intr_idx = intr_idx;
5647 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
5648 	    wm_handle_queue, wmq);
5649 	if (wmq->wmq_si != NULL)
5650 		return 0;
5651 
5652 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
5653 	    wmq->wmq_id);
5654 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
5655 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
5656 	return ENOMEM;
5657 }
5658 
5659 /*
5660  * Both single interrupt MSI and INTx can use this function.
5661  */
5662 static int
5663 wm_setup_legacy(struct wm_softc *sc)
5664 {
5665 	pci_chipset_tag_t pc = sc->sc_pc;
5666 	const char *intrstr = NULL;
5667 	char intrbuf[PCI_INTRSTR_LEN];
5668 	int error;
5669 
5670 	error = wm_alloc_txrx_queues(sc);
5671 	if (error) {
5672 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
5673 		    error);
5674 		return ENOMEM;
5675 	}
5676 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
5677 	    sizeof(intrbuf));
5678 #ifdef WM_MPSAFE
5679 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
5680 #endif
5681 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
5682 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
5683 	if (sc->sc_ihs[0] == NULL) {
5684 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
5685 		    (pci_intr_type(pc, sc->sc_intrs[0])
5686 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
5687 		return ENOMEM;
5688 	}
5689 
5690 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
5691 	sc->sc_nintrs = 1;
5692 
5693 	return wm_softint_establish_queue(sc, 0, 0);
5694 }
5695 
5696 static int
5697 wm_setup_msix(struct wm_softc *sc)
5698 {
5699 	void *vih;
5700 	kcpuset_t *affinity;
5701 	int qidx, error, intr_idx, txrx_established;
5702 	pci_chipset_tag_t pc = sc->sc_pc;
5703 	const char *intrstr = NULL;
5704 	char intrbuf[PCI_INTRSTR_LEN];
5705 	char intr_xname[INTRDEVNAMEBUF];
5706 
5707 	if (sc->sc_nqueues < ncpu) {
5708 		/*
5709 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
5710 		 * interrupts start from CPU#1.
5711 		 */
5712 		sc->sc_affinity_offset = 1;
5713 	} else {
5714 		/*
5715 		 * In this case, this device use all CPUs. So, we unify
5716 		 * affinitied cpu_index to msix vector number for readability.
5717 		 */
5718 		sc->sc_affinity_offset = 0;
5719 	}
5720 
5721 	error = wm_alloc_txrx_queues(sc);
5722 	if (error) {
5723 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
5724 		    error);
5725 		return ENOMEM;
5726 	}
5727 
5728 	kcpuset_create(&affinity, false);
5729 	intr_idx = 0;
5730 
5731 	/*
5732 	 * TX and RX
5733 	 */
5734 	txrx_established = 0;
5735 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5736 		struct wm_queue *wmq = &sc->sc_queue[qidx];
5737 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
5738 
5739 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
5740 		    sizeof(intrbuf));
5741 #ifdef WM_MPSAFE
5742 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
5743 		    PCI_INTR_MPSAFE, true);
5744 #endif
5745 		memset(intr_xname, 0, sizeof(intr_xname));
5746 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
5747 		    device_xname(sc->sc_dev), qidx);
5748 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
5749 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
5750 		if (vih == NULL) {
5751 			aprint_error_dev(sc->sc_dev,
5752 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
5753 			    intrstr ? " at " : "",
5754 			    intrstr ? intrstr : "");
5755 
5756 			goto fail;
5757 		}
5758 		kcpuset_zero(affinity);
5759 		/* Round-robin affinity */
5760 		kcpuset_set(affinity, affinity_to);
5761 		error = interrupt_distribute(vih, affinity, NULL);
5762 		if (error == 0) {
5763 			aprint_normal_dev(sc->sc_dev,
5764 			    "for TX and RX interrupting at %s affinity to %u\n",
5765 			    intrstr, affinity_to);
5766 		} else {
5767 			aprint_normal_dev(sc->sc_dev,
5768 			    "for TX and RX interrupting at %s\n", intrstr);
5769 		}
5770 		sc->sc_ihs[intr_idx] = vih;
5771 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
5772 			goto fail;
5773 		txrx_established++;
5774 		intr_idx++;
5775 	}
5776 
5777 	/* LINK */
5778 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
5779 	    sizeof(intrbuf));
5780 #ifdef WM_MPSAFE
5781 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
5782 #endif
5783 	memset(intr_xname, 0, sizeof(intr_xname));
5784 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
5785 	    device_xname(sc->sc_dev));
5786 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
5787 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
5788 	if (vih == NULL) {
5789 		aprint_error_dev(sc->sc_dev,
5790 		    "unable to establish MSI-X(for LINK)%s%s\n",
5791 		    intrstr ? " at " : "",
5792 		    intrstr ? intrstr : "");
5793 
5794 		goto fail;
5795 	}
5796 	/* Keep default affinity to LINK interrupt */
5797 	aprint_normal_dev(sc->sc_dev,
5798 	    "for LINK interrupting at %s\n", intrstr);
5799 	sc->sc_ihs[intr_idx] = vih;
5800 	sc->sc_link_intr_idx = intr_idx;
5801 
5802 	sc->sc_nintrs = sc->sc_nqueues + 1;
5803 	kcpuset_destroy(affinity);
5804 	return 0;
5805 
5806  fail:
5807 	for (qidx = 0; qidx < txrx_established; qidx++) {
5808 		struct wm_queue *wmq = &sc->sc_queue[qidx];
5809 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
5810 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
5811 	}
5812 
5813 	kcpuset_destroy(affinity);
5814 	return ENOMEM;
5815 }
5816 
5817 static void
5818 wm_unset_stopping_flags(struct wm_softc *sc)
5819 {
5820 	int i;
5821 
5822 	KASSERT(WM_CORE_LOCKED(sc));
5823 
5824 	/* Must unset stopping flags in ascending order. */
5825 	for (i = 0; i < sc->sc_nqueues; i++) {
5826 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5827 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5828 
5829 		mutex_enter(txq->txq_lock);
5830 		txq->txq_stopping = false;
5831 		mutex_exit(txq->txq_lock);
5832 
5833 		mutex_enter(rxq->rxq_lock);
5834 		rxq->rxq_stopping = false;
5835 		mutex_exit(rxq->rxq_lock);
5836 	}
5837 
5838 	sc->sc_core_stopping = false;
5839 }
5840 
5841 static void
5842 wm_set_stopping_flags(struct wm_softc *sc)
5843 {
5844 	int i;
5845 
5846 	KASSERT(WM_CORE_LOCKED(sc));
5847 
5848 	sc->sc_core_stopping = true;
5849 
5850 	/* Must set stopping flags in ascending order. */
5851 	for (i = 0; i < sc->sc_nqueues; i++) {
5852 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5853 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5854 
5855 		mutex_enter(rxq->rxq_lock);
5856 		rxq->rxq_stopping = true;
5857 		mutex_exit(rxq->rxq_lock);
5858 
5859 		mutex_enter(txq->txq_lock);
5860 		txq->txq_stopping = true;
5861 		mutex_exit(txq->txq_lock);
5862 	}
5863 }
5864 
5865 /*
5866  * Write interrupt interval value to ITR or EITR
5867  */
5868 static void
5869 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
5870 {
5871 
5872 	if (!wmq->wmq_set_itr)
5873 		return;
5874 
5875 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5876 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
5877 
5878 		/*
5879 		 * 82575 doesn't have CNT_INGR field.
5880 		 * So, overwrite counter field by software.
5881 		 */
5882 		if (sc->sc_type == WM_T_82575)
5883 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
5884 		else
5885 			eitr |= EITR_CNT_INGR;
5886 
5887 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
5888 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
5889 		/*
5890 		 * 82574 has both ITR and EITR. SET EITR when we use
5891 		 * the multi queue function with MSI-X.
5892 		 */
5893 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
5894 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
5895 	} else {
5896 		KASSERT(wmq->wmq_id == 0);
5897 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
5898 	}
5899 
5900 	wmq->wmq_set_itr = false;
5901 }
5902 
5903 /*
5904  * TODO
5905  * Below dynamic calculation of itr is almost the same as linux igb,
5906  * however it does not fit to wm(4). So, we will have been disable AIM
5907  * until we will find appropriate calculation of itr.
5908  */
5909 /*
5910  * calculate interrupt interval value to be going to write register in
5911  * wm_itrs_writereg(). This function does not write ITR/EITR register.
5912  */
5913 static void
5914 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
5915 {
5916 #ifdef NOTYET
5917 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
5918 	struct wm_txqueue *txq = &wmq->wmq_txq;
5919 	uint32_t avg_size = 0;
5920 	uint32_t new_itr;
5921 
5922 	if (rxq->rxq_packets)
5923 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
5924 	if (txq->txq_packets)
5925 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
5926 
5927 	if (avg_size == 0) {
5928 		new_itr = 450; /* restore default value */
5929 		goto out;
5930 	}
5931 
5932 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
5933 	avg_size += 24;
5934 
5935 	/* Don't starve jumbo frames */
5936 	avg_size = uimin(avg_size, 3000);
5937 
5938 	/* Give a little boost to mid-size frames */
5939 	if ((avg_size > 300) && (avg_size < 1200))
5940 		new_itr = avg_size / 3;
5941 	else
5942 		new_itr = avg_size / 2;
5943 
5944 out:
5945 	/*
5946 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
5947 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
5948 	 */
5949 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
5950 		new_itr *= 4;
5951 
5952 	if (new_itr != wmq->wmq_itr) {
5953 		wmq->wmq_itr = new_itr;
5954 		wmq->wmq_set_itr = true;
5955 	} else
5956 		wmq->wmq_set_itr = false;
5957 
5958 	rxq->rxq_packets = 0;
5959 	rxq->rxq_bytes = 0;
5960 	txq->txq_packets = 0;
5961 	txq->txq_bytes = 0;
5962 #endif
5963 }
5964 
5965 static void
5966 wm_init_sysctls(struct wm_softc *sc)
5967 {
5968 	struct sysctllog **log;
5969 	const struct sysctlnode *rnode, *qnode, *cnode;
5970 	int i, rv;
5971 	const char *dvname;
5972 
5973 	log = &sc->sc_sysctllog;
5974 	dvname = device_xname(sc->sc_dev);
5975 
5976 	rv = sysctl_createv(log, 0, NULL, &rnode,
5977 	    0, CTLTYPE_NODE, dvname,
5978 	    SYSCTL_DESCR("wm information and settings"),
5979 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
5980 	if (rv != 0)
5981 		goto err;
5982 
5983 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
5984 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
5985 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
5986 	if (rv != 0)
5987 		goto teardown;
5988 
5989 	for (i = 0; i < sc->sc_nqueues; i++) {
5990 		struct wm_queue *wmq = &sc->sc_queue[i];
5991 		struct wm_txqueue *txq = &wmq->wmq_txq;
5992 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
5993 
5994 		snprintf(sc->sc_queue[i].sysctlname,
5995 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
5996 
5997 		if (sysctl_createv(log, 0, &rnode, &qnode,
5998 		    0, CTLTYPE_NODE,
5999 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
6000 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
6001 			break;
6002 
6003 		if (sysctl_createv(log, 0, &qnode, &cnode,
6004 		    CTLFLAG_READONLY, CTLTYPE_INT,
6005 		    "txq_free", SYSCTL_DESCR("TX queue free"),
6006 		    NULL, 0, &txq->txq_free,
6007 		    0, CTL_CREATE, CTL_EOL) != 0)
6008 			break;
6009 		if (sysctl_createv(log, 0, &qnode, &cnode,
6010 		    CTLFLAG_READONLY, CTLTYPE_INT,
6011 		    "txd_head", SYSCTL_DESCR("TX descriptor head"),
6012 		    wm_sysctl_tdh_handler, 0, (void *)txq,
6013 		    0, CTL_CREATE, CTL_EOL) != 0)
6014 			break;
6015 		if (sysctl_createv(log, 0, &qnode, &cnode,
6016 		    CTLFLAG_READONLY, CTLTYPE_INT,
6017 		    "txd_tail", SYSCTL_DESCR("TX descriptor tail"),
6018 		    wm_sysctl_tdt_handler, 0, (void *)txq,
6019 		    0, CTL_CREATE, CTL_EOL) != 0)
6020 			break;
6021 		if (sysctl_createv(log, 0, &qnode, &cnode,
6022 		    CTLFLAG_READONLY, CTLTYPE_INT,
6023 		    "txq_next", SYSCTL_DESCR("TX queue next"),
6024 		    NULL, 0, &txq->txq_next,
6025 		    0, CTL_CREATE, CTL_EOL) != 0)
6026 			break;
6027 		if (sysctl_createv(log, 0, &qnode, &cnode,
6028 		    CTLFLAG_READONLY, CTLTYPE_INT,
6029 		    "txq_sfree", SYSCTL_DESCR("TX queue sfree"),
6030 		    NULL, 0, &txq->txq_sfree,
6031 		    0, CTL_CREATE, CTL_EOL) != 0)
6032 			break;
6033 		if (sysctl_createv(log, 0, &qnode, &cnode,
6034 		    CTLFLAG_READONLY, CTLTYPE_INT,
6035 		    "txq_snext", SYSCTL_DESCR("TX queue snext"),
6036 		    NULL, 0, &txq->txq_snext,
6037 		    0, CTL_CREATE, CTL_EOL) != 0)
6038 			break;
6039 		if (sysctl_createv(log, 0, &qnode, &cnode,
6040 		    CTLFLAG_READONLY, CTLTYPE_INT,
6041 		    "txq_sdirty", SYSCTL_DESCR("TX queue sdirty"),
6042 		    NULL, 0, &txq->txq_sdirty,
6043 		    0, CTL_CREATE, CTL_EOL) != 0)
6044 			break;
6045 		if (sysctl_createv(log, 0, &qnode, &cnode,
6046 		    CTLFLAG_READONLY, CTLTYPE_INT,
6047 		    "txq_flags", SYSCTL_DESCR("TX queue flags"),
6048 		    NULL, 0, &txq->txq_flags,
6049 		    0, CTL_CREATE, CTL_EOL) != 0)
6050 			break;
6051 		if (sysctl_createv(log, 0, &qnode, &cnode,
6052 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
6053 		    "txq_stopping", SYSCTL_DESCR("TX queue stopping"),
6054 		    NULL, 0, &txq->txq_stopping,
6055 		    0, CTL_CREATE, CTL_EOL) != 0)
6056 			break;
6057 		if (sysctl_createv(log, 0, &qnode, &cnode,
6058 		    CTLFLAG_READONLY, CTLTYPE_BOOL,
6059 		    "txq_sending", SYSCTL_DESCR("TX queue sending"),
6060 		    NULL, 0, &txq->txq_sending,
6061 		    0, CTL_CREATE, CTL_EOL) != 0)
6062 			break;
6063 
6064 		if (sysctl_createv(log, 0, &qnode, &cnode,
6065 		    CTLFLAG_READONLY, CTLTYPE_INT,
6066 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
6067 		    NULL, 0, &rxq->rxq_ptr,
6068 		    0, CTL_CREATE, CTL_EOL) != 0)
6069 			break;
6070 	}
6071 
6072 #ifdef WM_DEBUG
6073 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
6074 	    CTLTYPE_INT, "debug_flags",
6075 	    SYSCTL_DESCR(
6076 		    "Debug flags:\n"	\
6077 		    "\t0x01 LINK\n"	\
6078 		    "\t0x02 TX\n"	\
6079 		    "\t0x04 RX\n"	\
6080 		    "\t0x08 GMII\n"	\
6081 		    "\t0x10 MANAGE\n"	\
6082 		    "\t0x20 NVM\n"	\
6083 		    "\t0x40 INIT\n"	\
6084 		    "\t0x80 LOCK"),
6085 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
6086 	if (rv != 0)
6087 		goto teardown;
6088 #endif
6089 
6090 	return;
6091 
6092 teardown:
6093 	sysctl_teardown(log);
6094 err:
6095 	sc->sc_sysctllog = NULL;
6096 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
6097 	    __func__, rv);
6098 }
6099 
6100 /*
6101  * wm_init:		[ifnet interface function]
6102  *
6103  *	Initialize the interface.
6104  */
6105 static int
6106 wm_init(struct ifnet *ifp)
6107 {
6108 	struct wm_softc *sc = ifp->if_softc;
6109 	int ret;
6110 
6111 	WM_CORE_LOCK(sc);
6112 	ret = wm_init_locked(ifp);
6113 	WM_CORE_UNLOCK(sc);
6114 
6115 	return ret;
6116 }
6117 
6118 static int
6119 wm_init_locked(struct ifnet *ifp)
6120 {
6121 	struct wm_softc *sc = ifp->if_softc;
6122 	struct ethercom *ec = &sc->sc_ethercom;
6123 	int i, j, trynum, error = 0;
6124 	uint32_t reg, sfp_mask = 0;
6125 
6126 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
6127 		device_xname(sc->sc_dev), __func__));
6128 	KASSERT(WM_CORE_LOCKED(sc));
6129 
6130 	/*
6131 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
6132 	 * There is a small but measurable benefit to avoiding the adjusment
6133 	 * of the descriptor so that the headers are aligned, for normal mtu,
6134 	 * on such platforms.  One possibility is that the DMA itself is
6135 	 * slightly more efficient if the front of the entire packet (instead
6136 	 * of the front of the headers) is aligned.
6137 	 *
6138 	 * Note we must always set align_tweak to 0 if we are using
6139 	 * jumbo frames.
6140 	 */
6141 #ifdef __NO_STRICT_ALIGNMENT
6142 	sc->sc_align_tweak = 0;
6143 #else
6144 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
6145 		sc->sc_align_tweak = 0;
6146 	else
6147 		sc->sc_align_tweak = 2;
6148 #endif /* __NO_STRICT_ALIGNMENT */
6149 
6150 	/* Cancel any pending I/O. */
6151 	wm_stop_locked(ifp, false, false);
6152 
6153 	/* Update statistics before reset */
6154 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
6155 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
6156 
6157 	/* >= PCH_SPT hardware workaround before reset. */
6158 	if (sc->sc_type >= WM_T_PCH_SPT)
6159 		wm_flush_desc_rings(sc);
6160 
6161 	/* Reset the chip to a known state. */
6162 	wm_reset(sc);
6163 
6164 	/*
6165 	 * AMT based hardware can now take control from firmware
6166 	 * Do this after reset.
6167 	 */
6168 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
6169 		wm_get_hw_control(sc);
6170 
6171 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
6172 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
6173 		wm_legacy_irq_quirk_spt(sc);
6174 
6175 	/* Init hardware bits */
6176 	wm_initialize_hardware_bits(sc);
6177 
6178 	/* Reset the PHY. */
6179 	if (sc->sc_flags & WM_F_HAS_MII)
6180 		wm_gmii_reset(sc);
6181 
6182 	if (sc->sc_type >= WM_T_ICH8) {
6183 		reg = CSR_READ(sc, WMREG_GCR);
6184 		/*
6185 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
6186 		 * default after reset.
6187 		 */
6188 		if (sc->sc_type == WM_T_ICH8)
6189 			reg |= GCR_NO_SNOOP_ALL;
6190 		else
6191 			reg &= ~GCR_NO_SNOOP_ALL;
6192 		CSR_WRITE(sc, WMREG_GCR, reg);
6193 	}
6194 
6195 	if ((sc->sc_type >= WM_T_ICH8)
6196 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
6197 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
6198 
6199 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
6200 		reg |= CTRL_EXT_RO_DIS;
6201 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6202 	}
6203 
6204 	/* Calculate (E)ITR value */
6205 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
6206 		/*
6207 		 * For NEWQUEUE's EITR (except for 82575).
6208 		 * 82575's EITR should be set same throttling value as other
6209 		 * old controllers' ITR because the interrupt/sec calculation
6210 		 * is the same, that is, 1,000,000,000 / (N * 256).
6211 		 *
6212 		 * 82574's EITR should be set same throttling value as ITR.
6213 		 *
6214 		 * For N interrupts/sec, set this value to:
6215 		 * 1,000,000 / N in contrast to ITR throttling value.
6216 		 */
6217 		sc->sc_itr_init = 450;
6218 	} else if (sc->sc_type >= WM_T_82543) {
6219 		/*
6220 		 * Set up the interrupt throttling register (units of 256ns)
6221 		 * Note that a footnote in Intel's documentation says this
6222 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
6223 		 * or 10Mbit mode.  Empirically, it appears to be the case
6224 		 * that that is also true for the 1024ns units of the other
6225 		 * interrupt-related timer registers -- so, really, we ought
6226 		 * to divide this value by 4 when the link speed is low.
6227 		 *
6228 		 * XXX implement this division at link speed change!
6229 		 */
6230 
6231 		/*
6232 		 * For N interrupts/sec, set this value to:
6233 		 * 1,000,000,000 / (N * 256).  Note that we set the
6234 		 * absolute and packet timer values to this value
6235 		 * divided by 4 to get "simple timer" behavior.
6236 		 */
6237 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
6238 	}
6239 
6240 	error = wm_init_txrx_queues(sc);
6241 	if (error)
6242 		goto out;
6243 
6244 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
6245 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
6246 	    (sc->sc_type >= WM_T_82575))
6247 		wm_serdes_power_up_link_82575(sc);
6248 
6249 	/* Clear out the VLAN table -- we don't use it (yet). */
6250 	CSR_WRITE(sc, WMREG_VET, 0);
6251 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
6252 		trynum = 10; /* Due to hw errata */
6253 	else
6254 		trynum = 1;
6255 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
6256 		for (j = 0; j < trynum; j++)
6257 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
6258 
6259 	/*
6260 	 * Set up flow-control parameters.
6261 	 *
6262 	 * XXX Values could probably stand some tuning.
6263 	 */
6264 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
6265 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
6266 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
6267 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
6268 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
6269 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
6270 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
6271 	}
6272 
6273 	sc->sc_fcrtl = FCRTL_DFLT;
6274 	if (sc->sc_type < WM_T_82543) {
6275 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
6276 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
6277 	} else {
6278 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
6279 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
6280 	}
6281 
6282 	if (sc->sc_type == WM_T_80003)
6283 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
6284 	else
6285 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
6286 
6287 	/* Writes the control register. */
6288 	wm_set_vlan(sc);
6289 
6290 	if (sc->sc_flags & WM_F_HAS_MII) {
6291 		uint16_t kmreg;
6292 
6293 		switch (sc->sc_type) {
6294 		case WM_T_80003:
6295 		case WM_T_ICH8:
6296 		case WM_T_ICH9:
6297 		case WM_T_ICH10:
6298 		case WM_T_PCH:
6299 		case WM_T_PCH2:
6300 		case WM_T_PCH_LPT:
6301 		case WM_T_PCH_SPT:
6302 		case WM_T_PCH_CNP:
6303 			/*
6304 			 * Set the mac to wait the maximum time between each
6305 			 * iteration and increase the max iterations when
6306 			 * polling the phy; this fixes erroneous timeouts at
6307 			 * 10Mbps.
6308 			 */
6309 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
6310 			    0xFFFF);
6311 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6312 			    &kmreg);
6313 			kmreg |= 0x3F;
6314 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6315 			    kmreg);
6316 			break;
6317 		default:
6318 			break;
6319 		}
6320 
6321 		if (sc->sc_type == WM_T_80003) {
6322 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
6323 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
6324 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6325 
6326 			/* Bypass RX and TX FIFO's */
6327 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
6328 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
6329 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
6330 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
6331 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
6332 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
6333 		}
6334 	}
6335 #if 0
6336 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
6337 #endif
6338 
6339 	/* Set up checksum offload parameters. */
6340 	reg = CSR_READ(sc, WMREG_RXCSUM);
6341 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
6342 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
6343 		reg |= RXCSUM_IPOFL;
6344 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
6345 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
6346 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
6347 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
6348 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
6349 
6350 	/* Set registers about MSI-X */
6351 	if (wm_is_using_msix(sc)) {
6352 		uint32_t ivar, qintr_idx;
6353 		struct wm_queue *wmq;
6354 		unsigned int qid;
6355 
6356 		if (sc->sc_type == WM_T_82575) {
6357 			/* Interrupt control */
6358 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
6359 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
6360 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6361 
6362 			/* TX and RX */
6363 			for (i = 0; i < sc->sc_nqueues; i++) {
6364 				wmq = &sc->sc_queue[i];
6365 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
6366 				    EITR_TX_QUEUE(wmq->wmq_id)
6367 				    | EITR_RX_QUEUE(wmq->wmq_id));
6368 			}
6369 			/* Link status */
6370 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
6371 			    EITR_OTHER);
6372 		} else if (sc->sc_type == WM_T_82574) {
6373 			/* Interrupt control */
6374 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
6375 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
6376 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6377 
6378 			/*
6379 			 * Workaround issue with spurious interrupts
6380 			 * in MSI-X mode.
6381 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
6382 			 * initialized yet. So re-initialize WMREG_RFCTL here.
6383 			 */
6384 			reg = CSR_READ(sc, WMREG_RFCTL);
6385 			reg |= WMREG_RFCTL_ACKDIS;
6386 			CSR_WRITE(sc, WMREG_RFCTL, reg);
6387 
6388 			ivar = 0;
6389 			/* TX and RX */
6390 			for (i = 0; i < sc->sc_nqueues; i++) {
6391 				wmq = &sc->sc_queue[i];
6392 				qid = wmq->wmq_id;
6393 				qintr_idx = wmq->wmq_intr_idx;
6394 
6395 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6396 				    IVAR_TX_MASK_Q_82574(qid));
6397 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6398 				    IVAR_RX_MASK_Q_82574(qid));
6399 			}
6400 			/* Link status */
6401 			ivar |= __SHIFTIN((IVAR_VALID_82574
6402 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
6403 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
6404 		} else {
6405 			/* Interrupt control */
6406 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
6407 			    | GPIE_EIAME | GPIE_PBA);
6408 
6409 			switch (sc->sc_type) {
6410 			case WM_T_82580:
6411 			case WM_T_I350:
6412 			case WM_T_I354:
6413 			case WM_T_I210:
6414 			case WM_T_I211:
6415 				/* TX and RX */
6416 				for (i = 0; i < sc->sc_nqueues; i++) {
6417 					wmq = &sc->sc_queue[i];
6418 					qid = wmq->wmq_id;
6419 					qintr_idx = wmq->wmq_intr_idx;
6420 
6421 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
6422 					ivar &= ~IVAR_TX_MASK_Q(qid);
6423 					ivar |= __SHIFTIN((qintr_idx
6424 						| IVAR_VALID),
6425 					    IVAR_TX_MASK_Q(qid));
6426 					ivar &= ~IVAR_RX_MASK_Q(qid);
6427 					ivar |= __SHIFTIN((qintr_idx
6428 						| IVAR_VALID),
6429 					    IVAR_RX_MASK_Q(qid));
6430 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
6431 				}
6432 				break;
6433 			case WM_T_82576:
6434 				/* TX and RX */
6435 				for (i = 0; i < sc->sc_nqueues; i++) {
6436 					wmq = &sc->sc_queue[i];
6437 					qid = wmq->wmq_id;
6438 					qintr_idx = wmq->wmq_intr_idx;
6439 
6440 					ivar = CSR_READ(sc,
6441 					    WMREG_IVAR_Q_82576(qid));
6442 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
6443 					ivar |= __SHIFTIN((qintr_idx
6444 						| IVAR_VALID),
6445 					    IVAR_TX_MASK_Q_82576(qid));
6446 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
6447 					ivar |= __SHIFTIN((qintr_idx
6448 						| IVAR_VALID),
6449 					    IVAR_RX_MASK_Q_82576(qid));
6450 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
6451 					    ivar);
6452 				}
6453 				break;
6454 			default:
6455 				break;
6456 			}
6457 
6458 			/* Link status */
6459 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
6460 			    IVAR_MISC_OTHER);
6461 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
6462 		}
6463 
6464 		if (wm_is_using_multiqueue(sc)) {
6465 			wm_init_rss(sc);
6466 
6467 			/*
6468 			** NOTE: Receive Full-Packet Checksum Offload
6469 			** is mutually exclusive with Multiqueue. However
6470 			** this is not the same as TCP/IP checksums which
6471 			** still work.
6472 			*/
6473 			reg = CSR_READ(sc, WMREG_RXCSUM);
6474 			reg |= RXCSUM_PCSD;
6475 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
6476 		}
6477 	}
6478 
6479 	/* Set up the interrupt registers. */
6480 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
6481 
6482 	/* Enable SFP module insertion interrupt if it's required */
6483 	if ((sc->sc_flags & WM_F_SFP) != 0) {
6484 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
6485 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6486 		sfp_mask = ICR_GPI(0);
6487 	}
6488 
6489 	if (wm_is_using_msix(sc)) {
6490 		uint32_t mask;
6491 		struct wm_queue *wmq;
6492 
6493 		switch (sc->sc_type) {
6494 		case WM_T_82574:
6495 			mask = 0;
6496 			for (i = 0; i < sc->sc_nqueues; i++) {
6497 				wmq = &sc->sc_queue[i];
6498 				mask |= ICR_TXQ(wmq->wmq_id);
6499 				mask |= ICR_RXQ(wmq->wmq_id);
6500 			}
6501 			mask |= ICR_OTHER;
6502 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
6503 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
6504 			break;
6505 		default:
6506 			if (sc->sc_type == WM_T_82575) {
6507 				mask = 0;
6508 				for (i = 0; i < sc->sc_nqueues; i++) {
6509 					wmq = &sc->sc_queue[i];
6510 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
6511 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
6512 				}
6513 				mask |= EITR_OTHER;
6514 			} else {
6515 				mask = 0;
6516 				for (i = 0; i < sc->sc_nqueues; i++) {
6517 					wmq = &sc->sc_queue[i];
6518 					mask |= 1 << wmq->wmq_intr_idx;
6519 				}
6520 				mask |= 1 << sc->sc_link_intr_idx;
6521 			}
6522 			CSR_WRITE(sc, WMREG_EIAC, mask);
6523 			CSR_WRITE(sc, WMREG_EIAM, mask);
6524 			CSR_WRITE(sc, WMREG_EIMS, mask);
6525 
6526 			/* For other interrupts */
6527 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
6528 			break;
6529 		}
6530 	} else {
6531 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
6532 		    ICR_RXO | ICR_RXT0 | sfp_mask;
6533 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
6534 	}
6535 
6536 	/* Set up the inter-packet gap. */
6537 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6538 
6539 	if (sc->sc_type >= WM_T_82543) {
6540 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6541 			struct wm_queue *wmq = &sc->sc_queue[qidx];
6542 			wm_itrs_writereg(sc, wmq);
6543 		}
6544 		/*
6545 		 * Link interrupts occur much less than TX
6546 		 * interrupts and RX interrupts. So, we don't
6547 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
6548 		 * FreeBSD's if_igb.
6549 		 */
6550 	}
6551 
6552 	/* Set the VLAN ethernetype. */
6553 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
6554 
6555 	/*
6556 	 * Set up the transmit control register; we start out with
6557 	 * a collision distance suitable for FDX, but update it whe
6558 	 * we resolve the media type.
6559 	 */
6560 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
6561 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
6562 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6563 	if (sc->sc_type >= WM_T_82571)
6564 		sc->sc_tctl |= TCTL_MULR;
6565 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6566 
6567 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6568 		/* Write TDT after TCTL.EN is set. See the document. */
6569 		CSR_WRITE(sc, WMREG_TDT(0), 0);
6570 	}
6571 
6572 	if (sc->sc_type == WM_T_80003) {
6573 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
6574 		reg &= ~TCTL_EXT_GCEX_MASK;
6575 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
6576 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
6577 	}
6578 
6579 	/* Set the media. */
6580 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
6581 		goto out;
6582 
6583 	/* Configure for OS presence */
6584 	wm_init_manageability(sc);
6585 
6586 	/*
6587 	 * Set up the receive control register; we actually program the
6588 	 * register when we set the receive filter. Use multicast address
6589 	 * offset type 0.
6590 	 *
6591 	 * Only the i82544 has the ability to strip the incoming CRC, so we
6592 	 * don't enable that feature.
6593 	 */
6594 	sc->sc_mchash_type = 0;
6595 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
6596 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
6597 
6598 	/* 82574 use one buffer extended Rx descriptor. */
6599 	if (sc->sc_type == WM_T_82574)
6600 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
6601 
6602 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
6603 		sc->sc_rctl |= RCTL_SECRC;
6604 
6605 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
6606 	    && (ifp->if_mtu > ETHERMTU)) {
6607 		sc->sc_rctl |= RCTL_LPE;
6608 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6609 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
6610 	}
6611 
6612 	if (MCLBYTES == 2048)
6613 		sc->sc_rctl |= RCTL_2k;
6614 	else {
6615 		if (sc->sc_type >= WM_T_82543) {
6616 			switch (MCLBYTES) {
6617 			case 4096:
6618 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
6619 				break;
6620 			case 8192:
6621 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
6622 				break;
6623 			case 16384:
6624 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
6625 				break;
6626 			default:
6627 				panic("wm_init: MCLBYTES %d unsupported",
6628 				    MCLBYTES);
6629 				break;
6630 			}
6631 		} else
6632 			panic("wm_init: i82542 requires MCLBYTES = 2048");
6633 	}
6634 
6635 	/* Enable ECC */
6636 	switch (sc->sc_type) {
6637 	case WM_T_82571:
6638 		reg = CSR_READ(sc, WMREG_PBA_ECC);
6639 		reg |= PBA_ECC_CORR_EN;
6640 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
6641 		break;
6642 	case WM_T_PCH_LPT:
6643 	case WM_T_PCH_SPT:
6644 	case WM_T_PCH_CNP:
6645 		reg = CSR_READ(sc, WMREG_PBECCSTS);
6646 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
6647 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
6648 
6649 		sc->sc_ctrl |= CTRL_MEHE;
6650 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6651 		break;
6652 	default:
6653 		break;
6654 	}
6655 
6656 	/*
6657 	 * Set the receive filter.
6658 	 *
6659 	 * For 82575 and 82576, the RX descriptors must be initialized after
6660 	 * the setting of RCTL.EN in wm_set_filter()
6661 	 */
6662 	wm_set_filter(sc);
6663 
6664 	/* On 575 and later set RDT only if RX enabled */
6665 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6666 		int qidx;
6667 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6668 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
6669 			for (i = 0; i < WM_NRXDESC; i++) {
6670 				mutex_enter(rxq->rxq_lock);
6671 				wm_init_rxdesc(rxq, i);
6672 				mutex_exit(rxq->rxq_lock);
6673 
6674 			}
6675 		}
6676 	}
6677 
6678 	wm_unset_stopping_flags(sc);
6679 
6680 	/* Start the one second link check clock. */
6681 	callout_schedule(&sc->sc_tick_ch, hz);
6682 
6683 	/* ...all done! */
6684 	ifp->if_flags |= IFF_RUNNING;
6685 
6686  out:
6687 	/* Save last flags for the callback */
6688 	sc->sc_if_flags = ifp->if_flags;
6689 	sc->sc_ec_capenable = ec->ec_capenable;
6690 	if (error)
6691 		log(LOG_ERR, "%s: interface not running\n",
6692 		    device_xname(sc->sc_dev));
6693 	return error;
6694 }
6695 
6696 /*
6697  * wm_stop:		[ifnet interface function]
6698  *
6699  *	Stop transmission on the interface.
6700  */
6701 static void
6702 wm_stop(struct ifnet *ifp, int disable)
6703 {
6704 	struct wm_softc *sc = ifp->if_softc;
6705 
6706 	ASSERT_SLEEPABLE();
6707 
6708 	WM_CORE_LOCK(sc);
6709 	wm_stop_locked(ifp, disable ? true : false, true);
6710 	WM_CORE_UNLOCK(sc);
6711 
6712 	/*
6713 	 * After wm_set_stopping_flags(), it is guaranteed
6714 	 * wm_handle_queue_work() does not call workqueue_enqueue().
6715 	 * However, workqueue_wait() cannot call in wm_stop_locked()
6716 	 * because it can sleep...
6717 	 * so, call workqueue_wait() here.
6718 	 */
6719 	for (int i = 0; i < sc->sc_nqueues; i++)
6720 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
6721 }
6722 
6723 static void
6724 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
6725 {
6726 	struct wm_softc *sc = ifp->if_softc;
6727 	struct wm_txsoft *txs;
6728 	int i, qidx;
6729 
6730 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
6731 		device_xname(sc->sc_dev), __func__));
6732 	KASSERT(WM_CORE_LOCKED(sc));
6733 
6734 	wm_set_stopping_flags(sc);
6735 
6736 	if (sc->sc_flags & WM_F_HAS_MII) {
6737 		/* Down the MII. */
6738 		mii_down(&sc->sc_mii);
6739 	} else {
6740 #if 0
6741 		/* Should we clear PHY's status properly? */
6742 		wm_reset(sc);
6743 #endif
6744 	}
6745 
6746 	/* Stop the transmit and receive processes. */
6747 	CSR_WRITE(sc, WMREG_TCTL, 0);
6748 	CSR_WRITE(sc, WMREG_RCTL, 0);
6749 	sc->sc_rctl &= ~RCTL_EN;
6750 
6751 	/*
6752 	 * Clear the interrupt mask to ensure the device cannot assert its
6753 	 * interrupt line.
6754 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
6755 	 * service any currently pending or shared interrupt.
6756 	 */
6757 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
6758 	sc->sc_icr = 0;
6759 	if (wm_is_using_msix(sc)) {
6760 		if (sc->sc_type != WM_T_82574) {
6761 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
6762 			CSR_WRITE(sc, WMREG_EIAC, 0);
6763 		} else
6764 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
6765 	}
6766 
6767 	/*
6768 	 * Stop callouts after interrupts are disabled; if we have
6769 	 * to wait for them, we will be releasing the CORE_LOCK
6770 	 * briefly, which will unblock interrupts on the current CPU.
6771 	 */
6772 
6773 	/* Stop the one second clock. */
6774 	if (wait)
6775 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
6776 	else
6777 		callout_stop(&sc->sc_tick_ch);
6778 
6779 	/* Stop the 82547 Tx FIFO stall check timer. */
6780 	if (sc->sc_type == WM_T_82547) {
6781 		if (wait)
6782 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
6783 		else
6784 			callout_stop(&sc->sc_txfifo_ch);
6785 	}
6786 
6787 	/* Release any queued transmit buffers. */
6788 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6789 		struct wm_queue *wmq = &sc->sc_queue[qidx];
6790 		struct wm_txqueue *txq = &wmq->wmq_txq;
6791 		struct mbuf *m;
6792 
6793 		mutex_enter(txq->txq_lock);
6794 		txq->txq_sending = false; /* Ensure watchdog disabled */
6795 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6796 			txs = &txq->txq_soft[i];
6797 			if (txs->txs_mbuf != NULL) {
6798 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
6799 				m_freem(txs->txs_mbuf);
6800 				txs->txs_mbuf = NULL;
6801 			}
6802 		}
6803 		/* Drain txq_interq */
6804 		while ((m = pcq_get(txq->txq_interq)) != NULL)
6805 			m_freem(m);
6806 		mutex_exit(txq->txq_lock);
6807 	}
6808 
6809 	/* Mark the interface as down and cancel the watchdog timer. */
6810 	ifp->if_flags &= ~IFF_RUNNING;
6811 
6812 	if (disable) {
6813 		for (i = 0; i < sc->sc_nqueues; i++) {
6814 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6815 			mutex_enter(rxq->rxq_lock);
6816 			wm_rxdrain(rxq);
6817 			mutex_exit(rxq->rxq_lock);
6818 		}
6819 	}
6820 
6821 #if 0 /* notyet */
6822 	if (sc->sc_type >= WM_T_82544)
6823 		CSR_WRITE(sc, WMREG_WUC, 0);
6824 #endif
6825 }
6826 
6827 static void
6828 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
6829 {
6830 	struct mbuf *m;
6831 	int i;
6832 
6833 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
6834 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
6835 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
6836 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
6837 		    m->m_data, m->m_len, m->m_flags);
6838 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
6839 	    i, i == 1 ? "" : "s");
6840 }
6841 
6842 /*
6843  * wm_82547_txfifo_stall:
6844  *
6845  *	Callout used to wait for the 82547 Tx FIFO to drain,
6846  *	reset the FIFO pointers, and restart packet transmission.
6847  */
6848 static void
6849 wm_82547_txfifo_stall(void *arg)
6850 {
6851 	struct wm_softc *sc = arg;
6852 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6853 
6854 	mutex_enter(txq->txq_lock);
6855 
6856 	if (txq->txq_stopping)
6857 		goto out;
6858 
6859 	if (txq->txq_fifo_stall) {
6860 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
6861 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
6862 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
6863 			/*
6864 			 * Packets have drained.  Stop transmitter, reset
6865 			 * FIFO pointers, restart transmitter, and kick
6866 			 * the packet queue.
6867 			 */
6868 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
6869 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
6870 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
6871 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
6872 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
6873 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
6874 			CSR_WRITE(sc, WMREG_TCTL, tctl);
6875 			CSR_WRITE_FLUSH(sc);
6876 
6877 			txq->txq_fifo_head = 0;
6878 			txq->txq_fifo_stall = 0;
6879 			wm_start_locked(&sc->sc_ethercom.ec_if);
6880 		} else {
6881 			/*
6882 			 * Still waiting for packets to drain; try again in
6883 			 * another tick.
6884 			 */
6885 			callout_schedule(&sc->sc_txfifo_ch, 1);
6886 		}
6887 	}
6888 
6889 out:
6890 	mutex_exit(txq->txq_lock);
6891 }
6892 
6893 /*
6894  * wm_82547_txfifo_bugchk:
6895  *
6896  *	Check for bug condition in the 82547 Tx FIFO.  We need to
6897  *	prevent enqueueing a packet that would wrap around the end
6898  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
6899  *
6900  *	We do this by checking the amount of space before the end
6901  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
6902  *	the Tx FIFO, wait for all remaining packets to drain, reset
6903  *	the internal FIFO pointers to the beginning, and restart
6904  *	transmission on the interface.
6905  */
6906 #define	WM_FIFO_HDR		0x10
6907 #define	WM_82547_PAD_LEN	0x3e0
6908 static int
6909 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
6910 {
6911 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6912 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
6913 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
6914 
6915 	/* Just return if already stalled. */
6916 	if (txq->txq_fifo_stall)
6917 		return 1;
6918 
6919 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
6920 		/* Stall only occurs in half-duplex mode. */
6921 		goto send_packet;
6922 	}
6923 
6924 	if (len >= WM_82547_PAD_LEN + space) {
6925 		txq->txq_fifo_stall = 1;
6926 		callout_schedule(&sc->sc_txfifo_ch, 1);
6927 		return 1;
6928 	}
6929 
6930  send_packet:
6931 	txq->txq_fifo_head += len;
6932 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
6933 		txq->txq_fifo_head -= txq->txq_fifo_size;
6934 
6935 	return 0;
6936 }
6937 
6938 static int
6939 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
6940 {
6941 	int error;
6942 
6943 	/*
6944 	 * Allocate the control data structures, and create and load the
6945 	 * DMA map for it.
6946 	 *
6947 	 * NOTE: All Tx descriptors must be in the same 4G segment of
6948 	 * memory.  So must Rx descriptors.  We simplify by allocating
6949 	 * both sets within the same 4G segment.
6950 	 */
6951 	if (sc->sc_type < WM_T_82544)
6952 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
6953 	else
6954 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
6955 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6956 		txq->txq_descsize = sizeof(nq_txdesc_t);
6957 	else
6958 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
6959 
6960 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
6961 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
6962 		    1, &txq->txq_desc_rseg, 0)) != 0) {
6963 		aprint_error_dev(sc->sc_dev,
6964 		    "unable to allocate TX control data, error = %d\n",
6965 		    error);
6966 		goto fail_0;
6967 	}
6968 
6969 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
6970 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
6971 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
6972 		aprint_error_dev(sc->sc_dev,
6973 		    "unable to map TX control data, error = %d\n", error);
6974 		goto fail_1;
6975 	}
6976 
6977 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
6978 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
6979 		aprint_error_dev(sc->sc_dev,
6980 		    "unable to create TX control data DMA map, error = %d\n",
6981 		    error);
6982 		goto fail_2;
6983 	}
6984 
6985 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
6986 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
6987 		aprint_error_dev(sc->sc_dev,
6988 		    "unable to load TX control data DMA map, error = %d\n",
6989 		    error);
6990 		goto fail_3;
6991 	}
6992 
6993 	return 0;
6994 
6995  fail_3:
6996 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
6997  fail_2:
6998 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
6999 	    WM_TXDESCS_SIZE(txq));
7000  fail_1:
7001 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7002  fail_0:
7003 	return error;
7004 }
7005 
7006 static void
7007 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7008 {
7009 
7010 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
7011 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7012 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7013 	    WM_TXDESCS_SIZE(txq));
7014 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7015 }
7016 
7017 static int
7018 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
7019 {
7020 	int error;
7021 	size_t rxq_descs_size;
7022 
7023 	/*
7024 	 * Allocate the control data structures, and create and load the
7025 	 * DMA map for it.
7026 	 *
7027 	 * NOTE: All Tx descriptors must be in the same 4G segment of
7028 	 * memory.  So must Rx descriptors.  We simplify by allocating
7029 	 * both sets within the same 4G segment.
7030 	 */
7031 	rxq->rxq_ndesc = WM_NRXDESC;
7032 	if (sc->sc_type == WM_T_82574)
7033 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
7034 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7035 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
7036 	else
7037 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
7038 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
7039 
7040 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
7041 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
7042 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
7043 		aprint_error_dev(sc->sc_dev,
7044 		    "unable to allocate RX control data, error = %d\n",
7045 		    error);
7046 		goto fail_0;
7047 	}
7048 
7049 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
7050 		    rxq->rxq_desc_rseg, rxq_descs_size,
7051 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
7052 		aprint_error_dev(sc->sc_dev,
7053 		    "unable to map RX control data, error = %d\n", error);
7054 		goto fail_1;
7055 	}
7056 
7057 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
7058 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
7059 		aprint_error_dev(sc->sc_dev,
7060 		    "unable to create RX control data DMA map, error = %d\n",
7061 		    error);
7062 		goto fail_2;
7063 	}
7064 
7065 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
7066 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
7067 		aprint_error_dev(sc->sc_dev,
7068 		    "unable to load RX control data DMA map, error = %d\n",
7069 		    error);
7070 		goto fail_3;
7071 	}
7072 
7073 	return 0;
7074 
7075  fail_3:
7076 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
7077  fail_2:
7078 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
7079 	    rxq_descs_size);
7080  fail_1:
7081 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
7082  fail_0:
7083 	return error;
7084 }
7085 
7086 static void
7087 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
7088 {
7089 
7090 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
7091 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
7092 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
7093 	    rxq->rxq_descsize * rxq->rxq_ndesc);
7094 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
7095 }
7096 
7097 
7098 static int
7099 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
7100 {
7101 	int i, error;
7102 
7103 	/* Create the transmit buffer DMA maps. */
7104 	WM_TXQUEUELEN(txq) =
7105 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
7106 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
7107 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7108 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
7109 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
7110 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
7111 			aprint_error_dev(sc->sc_dev,
7112 			    "unable to create Tx DMA map %d, error = %d\n",
7113 			    i, error);
7114 			goto fail;
7115 		}
7116 	}
7117 
7118 	return 0;
7119 
7120  fail:
7121 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7122 		if (txq->txq_soft[i].txs_dmamap != NULL)
7123 			bus_dmamap_destroy(sc->sc_dmat,
7124 			    txq->txq_soft[i].txs_dmamap);
7125 	}
7126 	return error;
7127 }
7128 
7129 static void
7130 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
7131 {
7132 	int i;
7133 
7134 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7135 		if (txq->txq_soft[i].txs_dmamap != NULL)
7136 			bus_dmamap_destroy(sc->sc_dmat,
7137 			    txq->txq_soft[i].txs_dmamap);
7138 	}
7139 }
7140 
7141 static int
7142 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7143 {
7144 	int i, error;
7145 
7146 	/* Create the receive buffer DMA maps. */
7147 	for (i = 0; i < rxq->rxq_ndesc; i++) {
7148 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
7149 			    MCLBYTES, 0, 0,
7150 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
7151 			aprint_error_dev(sc->sc_dev,
7152 			    "unable to create Rx DMA map %d error = %d\n",
7153 			    i, error);
7154 			goto fail;
7155 		}
7156 		rxq->rxq_soft[i].rxs_mbuf = NULL;
7157 	}
7158 
7159 	return 0;
7160 
7161  fail:
7162 	for (i = 0; i < rxq->rxq_ndesc; i++) {
7163 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
7164 			bus_dmamap_destroy(sc->sc_dmat,
7165 			    rxq->rxq_soft[i].rxs_dmamap);
7166 	}
7167 	return error;
7168 }
7169 
7170 static void
7171 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7172 {
7173 	int i;
7174 
7175 	for (i = 0; i < rxq->rxq_ndesc; i++) {
7176 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
7177 			bus_dmamap_destroy(sc->sc_dmat,
7178 			    rxq->rxq_soft[i].rxs_dmamap);
7179 	}
7180 }
7181 
7182 /*
7183  * wm_alloc_quques:
7184  *	Allocate {tx,rx}descs and {tx,rx} buffers
7185  */
7186 static int
7187 wm_alloc_txrx_queues(struct wm_softc *sc)
7188 {
7189 	int i, error, tx_done, rx_done;
7190 
7191 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
7192 	    KM_SLEEP);
7193 	if (sc->sc_queue == NULL) {
7194 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
7195 		error = ENOMEM;
7196 		goto fail_0;
7197 	}
7198 
7199 	/* For transmission */
7200 	error = 0;
7201 	tx_done = 0;
7202 	for (i = 0; i < sc->sc_nqueues; i++) {
7203 #ifdef WM_EVENT_COUNTERS
7204 		int j;
7205 		const char *xname;
7206 #endif
7207 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7208 		txq->txq_sc = sc;
7209 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
7210 
7211 		error = wm_alloc_tx_descs(sc, txq);
7212 		if (error)
7213 			break;
7214 		error = wm_alloc_tx_buffer(sc, txq);
7215 		if (error) {
7216 			wm_free_tx_descs(sc, txq);
7217 			break;
7218 		}
7219 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
7220 		if (txq->txq_interq == NULL) {
7221 			wm_free_tx_descs(sc, txq);
7222 			wm_free_tx_buffer(sc, txq);
7223 			error = ENOMEM;
7224 			break;
7225 		}
7226 
7227 #ifdef WM_EVENT_COUNTERS
7228 		xname = device_xname(sc->sc_dev);
7229 
7230 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
7231 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
7232 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
7233 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
7234 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
7235 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
7236 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
7237 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
7238 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
7239 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
7240 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
7241 
7242 		for (j = 0; j < WM_NTXSEGS; j++) {
7243 			snprintf(txq->txq_txseg_evcnt_names[j],
7244 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
7245 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
7246 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
7247 		}
7248 
7249 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
7250 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
7251 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
7252 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
7253 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
7254 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
7255 #endif /* WM_EVENT_COUNTERS */
7256 
7257 		tx_done++;
7258 	}
7259 	if (error)
7260 		goto fail_1;
7261 
7262 	/* For receive */
7263 	error = 0;
7264 	rx_done = 0;
7265 	for (i = 0; i < sc->sc_nqueues; i++) {
7266 #ifdef WM_EVENT_COUNTERS
7267 		const char *xname;
7268 #endif
7269 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7270 		rxq->rxq_sc = sc;
7271 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
7272 
7273 		error = wm_alloc_rx_descs(sc, rxq);
7274 		if (error)
7275 			break;
7276 
7277 		error = wm_alloc_rx_buffer(sc, rxq);
7278 		if (error) {
7279 			wm_free_rx_descs(sc, rxq);
7280 			break;
7281 		}
7282 
7283 #ifdef WM_EVENT_COUNTERS
7284 		xname = device_xname(sc->sc_dev);
7285 
7286 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
7287 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
7288 
7289 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
7290 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
7291 #endif /* WM_EVENT_COUNTERS */
7292 
7293 		rx_done++;
7294 	}
7295 	if (error)
7296 		goto fail_2;
7297 
7298 	return 0;
7299 
7300  fail_2:
7301 	for (i = 0; i < rx_done; i++) {
7302 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7303 		wm_free_rx_buffer(sc, rxq);
7304 		wm_free_rx_descs(sc, rxq);
7305 		if (rxq->rxq_lock)
7306 			mutex_obj_free(rxq->rxq_lock);
7307 	}
7308  fail_1:
7309 	for (i = 0; i < tx_done; i++) {
7310 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7311 		pcq_destroy(txq->txq_interq);
7312 		wm_free_tx_buffer(sc, txq);
7313 		wm_free_tx_descs(sc, txq);
7314 		if (txq->txq_lock)
7315 			mutex_obj_free(txq->txq_lock);
7316 	}
7317 
7318 	kmem_free(sc->sc_queue,
7319 	    sizeof(struct wm_queue) * sc->sc_nqueues);
7320  fail_0:
7321 	return error;
7322 }
7323 
7324 /*
7325  * wm_free_quques:
7326  *	Free {tx,rx}descs and {tx,rx} buffers
7327  */
7328 static void
7329 wm_free_txrx_queues(struct wm_softc *sc)
7330 {
7331 	int i;
7332 
7333 	for (i = 0; i < sc->sc_nqueues; i++) {
7334 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7335 
7336 #ifdef WM_EVENT_COUNTERS
7337 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
7338 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
7339 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
7340 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
7341 #endif /* WM_EVENT_COUNTERS */
7342 
7343 		wm_free_rx_buffer(sc, rxq);
7344 		wm_free_rx_descs(sc, rxq);
7345 		if (rxq->rxq_lock)
7346 			mutex_obj_free(rxq->rxq_lock);
7347 	}
7348 
7349 	for (i = 0; i < sc->sc_nqueues; i++) {
7350 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7351 		struct mbuf *m;
7352 #ifdef WM_EVENT_COUNTERS
7353 		int j;
7354 
7355 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
7356 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
7357 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
7358 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
7359 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
7360 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
7361 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
7362 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
7363 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
7364 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
7365 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
7366 
7367 		for (j = 0; j < WM_NTXSEGS; j++)
7368 			evcnt_detach(&txq->txq_ev_txseg[j]);
7369 
7370 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
7371 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
7372 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
7373 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
7374 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
7375 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
7376 #endif /* WM_EVENT_COUNTERS */
7377 
7378 		/* Drain txq_interq */
7379 		while ((m = pcq_get(txq->txq_interq)) != NULL)
7380 			m_freem(m);
7381 		pcq_destroy(txq->txq_interq);
7382 
7383 		wm_free_tx_buffer(sc, txq);
7384 		wm_free_tx_descs(sc, txq);
7385 		if (txq->txq_lock)
7386 			mutex_obj_free(txq->txq_lock);
7387 	}
7388 
7389 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
7390 }
7391 
7392 static void
7393 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7394 {
7395 
7396 	KASSERT(mutex_owned(txq->txq_lock));
7397 
7398 	/* Initialize the transmit descriptor ring. */
7399 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
7400 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
7401 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7402 	txq->txq_free = WM_NTXDESC(txq);
7403 	txq->txq_next = 0;
7404 }
7405 
7406 static void
7407 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7408     struct wm_txqueue *txq)
7409 {
7410 
7411 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7412 		device_xname(sc->sc_dev), __func__));
7413 	KASSERT(mutex_owned(txq->txq_lock));
7414 
7415 	if (sc->sc_type < WM_T_82543) {
7416 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
7417 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
7418 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
7419 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
7420 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
7421 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
7422 	} else {
7423 		int qid = wmq->wmq_id;
7424 
7425 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
7426 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
7427 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
7428 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
7429 
7430 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7431 			/*
7432 			 * Don't write TDT before TCTL.EN is set.
7433 			 * See the document.
7434 			 */
7435 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
7436 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
7437 			    | TXDCTL_WTHRESH(0));
7438 		else {
7439 			/* XXX should update with AIM? */
7440 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
7441 			if (sc->sc_type >= WM_T_82540) {
7442 				/* Should be the same */
7443 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
7444 			}
7445 
7446 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
7447 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
7448 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
7449 		}
7450 	}
7451 }
7452 
7453 static void
7454 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7455 {
7456 	int i;
7457 
7458 	KASSERT(mutex_owned(txq->txq_lock));
7459 
7460 	/* Initialize the transmit job descriptors. */
7461 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
7462 		txq->txq_soft[i].txs_mbuf = NULL;
7463 	txq->txq_sfree = WM_TXQUEUELEN(txq);
7464 	txq->txq_snext = 0;
7465 	txq->txq_sdirty = 0;
7466 }
7467 
7468 static void
7469 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
7470     struct wm_txqueue *txq)
7471 {
7472 
7473 	KASSERT(mutex_owned(txq->txq_lock));
7474 
7475 	/*
7476 	 * Set up some register offsets that are different between
7477 	 * the i82542 and the i82543 and later chips.
7478 	 */
7479 	if (sc->sc_type < WM_T_82543)
7480 		txq->txq_tdt_reg = WMREG_OLD_TDT;
7481 	else
7482 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
7483 
7484 	wm_init_tx_descs(sc, txq);
7485 	wm_init_tx_regs(sc, wmq, txq);
7486 	wm_init_tx_buffer(sc, txq);
7487 
7488 	/* Clear other than WM_TXQ_LINKDOWN_DISCARD */
7489 	txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
7490 
7491 	txq->txq_sending = false;
7492 }
7493 
7494 static void
7495 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7496     struct wm_rxqueue *rxq)
7497 {
7498 
7499 	KASSERT(mutex_owned(rxq->rxq_lock));
7500 
7501 	/*
7502 	 * Initialize the receive descriptor and receive job
7503 	 * descriptor rings.
7504 	 */
7505 	if (sc->sc_type < WM_T_82543) {
7506 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
7507 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
7508 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
7509 		    rxq->rxq_descsize * rxq->rxq_ndesc);
7510 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
7511 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
7512 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
7513 
7514 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
7515 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
7516 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
7517 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
7518 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
7519 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
7520 	} else {
7521 		int qid = wmq->wmq_id;
7522 
7523 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
7524 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
7525 		CSR_WRITE(sc, WMREG_RDLEN(qid),
7526 		    rxq->rxq_descsize * rxq->rxq_ndesc);
7527 
7528 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7529 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
7530 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
7531 
7532 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
7533 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
7534 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
7535 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
7536 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
7537 			    | RXDCTL_WTHRESH(1));
7538 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
7539 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
7540 		} else {
7541 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
7542 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
7543 			/* XXX should update with AIM? */
7544 			CSR_WRITE(sc, WMREG_RDTR,
7545 			    (wmq->wmq_itr / 4) | RDTR_FPD);
7546 			/* MUST be same */
7547 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
7548 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
7549 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
7550 		}
7551 	}
7552 }
7553 
7554 static int
7555 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7556 {
7557 	struct wm_rxsoft *rxs;
7558 	int error, i;
7559 
7560 	KASSERT(mutex_owned(rxq->rxq_lock));
7561 
7562 	for (i = 0; i < rxq->rxq_ndesc; i++) {
7563 		rxs = &rxq->rxq_soft[i];
7564 		if (rxs->rxs_mbuf == NULL) {
7565 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
7566 				log(LOG_ERR, "%s: unable to allocate or map "
7567 				    "rx buffer %d, error = %d\n",
7568 				    device_xname(sc->sc_dev), i, error);
7569 				/*
7570 				 * XXX Should attempt to run with fewer receive
7571 				 * XXX buffers instead of just failing.
7572 				 */
7573 				wm_rxdrain(rxq);
7574 				return ENOMEM;
7575 			}
7576 		} else {
7577 			/*
7578 			 * For 82575 and 82576, the RX descriptors must be
7579 			 * initialized after the setting of RCTL.EN in
7580 			 * wm_set_filter()
7581 			 */
7582 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
7583 				wm_init_rxdesc(rxq, i);
7584 		}
7585 	}
7586 	rxq->rxq_ptr = 0;
7587 	rxq->rxq_discard = 0;
7588 	WM_RXCHAIN_RESET(rxq);
7589 
7590 	return 0;
7591 }
7592 
7593 static int
7594 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
7595     struct wm_rxqueue *rxq)
7596 {
7597 
7598 	KASSERT(mutex_owned(rxq->rxq_lock));
7599 
7600 	/*
7601 	 * Set up some register offsets that are different between
7602 	 * the i82542 and the i82543 and later chips.
7603 	 */
7604 	if (sc->sc_type < WM_T_82543)
7605 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
7606 	else
7607 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
7608 
7609 	wm_init_rx_regs(sc, wmq, rxq);
7610 	return wm_init_rx_buffer(sc, rxq);
7611 }
7612 
7613 /*
7614  * wm_init_quques:
7615  *	Initialize {tx,rx}descs and {tx,rx} buffers
7616  */
7617 static int
7618 wm_init_txrx_queues(struct wm_softc *sc)
7619 {
7620 	int i, error = 0;
7621 
7622 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7623 		device_xname(sc->sc_dev), __func__));
7624 
7625 	for (i = 0; i < sc->sc_nqueues; i++) {
7626 		struct wm_queue *wmq = &sc->sc_queue[i];
7627 		struct wm_txqueue *txq = &wmq->wmq_txq;
7628 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
7629 
7630 		/*
7631 		 * TODO
7632 		 * Currently, use constant variable instead of AIM.
7633 		 * Furthermore, the interrupt interval of multiqueue which use
7634 		 * polling mode is less than default value.
7635 		 * More tuning and AIM are required.
7636 		 */
7637 		if (wm_is_using_multiqueue(sc))
7638 			wmq->wmq_itr = 50;
7639 		else
7640 			wmq->wmq_itr = sc->sc_itr_init;
7641 		wmq->wmq_set_itr = true;
7642 
7643 		mutex_enter(txq->txq_lock);
7644 		wm_init_tx_queue(sc, wmq, txq);
7645 		mutex_exit(txq->txq_lock);
7646 
7647 		mutex_enter(rxq->rxq_lock);
7648 		error = wm_init_rx_queue(sc, wmq, rxq);
7649 		mutex_exit(rxq->rxq_lock);
7650 		if (error)
7651 			break;
7652 	}
7653 
7654 	return error;
7655 }
7656 
7657 /*
7658  * wm_tx_offload:
7659  *
7660  *	Set up TCP/IP checksumming parameters for the
7661  *	specified packet.
7662  */
7663 static void
7664 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
7665     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
7666 {
7667 	struct mbuf *m0 = txs->txs_mbuf;
7668 	struct livengood_tcpip_ctxdesc *t;
7669 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
7670 	uint32_t ipcse;
7671 	struct ether_header *eh;
7672 	int offset, iphl;
7673 	uint8_t fields;
7674 
7675 	/*
7676 	 * XXX It would be nice if the mbuf pkthdr had offset
7677 	 * fields for the protocol headers.
7678 	 */
7679 
7680 	eh = mtod(m0, struct ether_header *);
7681 	switch (htons(eh->ether_type)) {
7682 	case ETHERTYPE_IP:
7683 	case ETHERTYPE_IPV6:
7684 		offset = ETHER_HDR_LEN;
7685 		break;
7686 
7687 	case ETHERTYPE_VLAN:
7688 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
7689 		break;
7690 
7691 	default:
7692 		/* Don't support this protocol or encapsulation. */
7693 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
7694 		txq->txq_last_hw_ipcs = 0;
7695 		txq->txq_last_hw_tucs = 0;
7696 		*fieldsp = 0;
7697 		*cmdp = 0;
7698 		return;
7699 	}
7700 
7701 	if ((m0->m_pkthdr.csum_flags &
7702 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
7703 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
7704 	} else
7705 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
7706 
7707 	ipcse = offset + iphl - 1;
7708 
7709 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
7710 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
7711 	seg = 0;
7712 	fields = 0;
7713 
7714 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
7715 		int hlen = offset + iphl;
7716 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
7717 
7718 		if (__predict_false(m0->m_len <
7719 				    (hlen + sizeof(struct tcphdr)))) {
7720 			/*
7721 			 * TCP/IP headers are not in the first mbuf; we need
7722 			 * to do this the slow and painful way. Let's just
7723 			 * hope this doesn't happen very often.
7724 			 */
7725 			struct tcphdr th;
7726 
7727 			WM_Q_EVCNT_INCR(txq, tsopain);
7728 
7729 			m_copydata(m0, hlen, sizeof(th), &th);
7730 			if (v4) {
7731 				struct ip ip;
7732 
7733 				m_copydata(m0, offset, sizeof(ip), &ip);
7734 				ip.ip_len = 0;
7735 				m_copyback(m0,
7736 				    offset + offsetof(struct ip, ip_len),
7737 				    sizeof(ip.ip_len), &ip.ip_len);
7738 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
7739 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
7740 			} else {
7741 				struct ip6_hdr ip6;
7742 
7743 				m_copydata(m0, offset, sizeof(ip6), &ip6);
7744 				ip6.ip6_plen = 0;
7745 				m_copyback(m0,
7746 				    offset + offsetof(struct ip6_hdr, ip6_plen),
7747 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
7748 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
7749 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
7750 			}
7751 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
7752 			    sizeof(th.th_sum), &th.th_sum);
7753 
7754 			hlen += th.th_off << 2;
7755 		} else {
7756 			/*
7757 			 * TCP/IP headers are in the first mbuf; we can do
7758 			 * this the easy way.
7759 			 */
7760 			struct tcphdr *th;
7761 
7762 			if (v4) {
7763 				struct ip *ip =
7764 				    (void *)(mtod(m0, char *) + offset);
7765 				th = (void *)(mtod(m0, char *) + hlen);
7766 
7767 				ip->ip_len = 0;
7768 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
7769 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
7770 			} else {
7771 				struct ip6_hdr *ip6 =
7772 				    (void *)(mtod(m0, char *) + offset);
7773 				th = (void *)(mtod(m0, char *) + hlen);
7774 
7775 				ip6->ip6_plen = 0;
7776 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
7777 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
7778 			}
7779 			hlen += th->th_off << 2;
7780 		}
7781 
7782 		if (v4) {
7783 			WM_Q_EVCNT_INCR(txq, tso);
7784 			cmdlen |= WTX_TCPIP_CMD_IP;
7785 		} else {
7786 			WM_Q_EVCNT_INCR(txq, tso6);
7787 			ipcse = 0;
7788 		}
7789 		cmd |= WTX_TCPIP_CMD_TSE;
7790 		cmdlen |= WTX_TCPIP_CMD_TSE |
7791 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
7792 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
7793 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
7794 	}
7795 
7796 	/*
7797 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
7798 	 * offload feature, if we load the context descriptor, we
7799 	 * MUST provide valid values for IPCSS and TUCSS fields.
7800 	 */
7801 
7802 	ipcs = WTX_TCPIP_IPCSS(offset) |
7803 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
7804 	    WTX_TCPIP_IPCSE(ipcse);
7805 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
7806 		WM_Q_EVCNT_INCR(txq, ipsum);
7807 		fields |= WTX_IXSM;
7808 	}
7809 
7810 	offset += iphl;
7811 
7812 	if (m0->m_pkthdr.csum_flags &
7813 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
7814 		WM_Q_EVCNT_INCR(txq, tusum);
7815 		fields |= WTX_TXSM;
7816 		tucs = WTX_TCPIP_TUCSS(offset) |
7817 		    WTX_TCPIP_TUCSO(offset +
7818 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
7819 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
7820 	} else if ((m0->m_pkthdr.csum_flags &
7821 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
7822 		WM_Q_EVCNT_INCR(txq, tusum6);
7823 		fields |= WTX_TXSM;
7824 		tucs = WTX_TCPIP_TUCSS(offset) |
7825 		    WTX_TCPIP_TUCSO(offset +
7826 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
7827 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
7828 	} else {
7829 		/* Just initialize it to a valid TCP context. */
7830 		tucs = WTX_TCPIP_TUCSS(offset) |
7831 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
7832 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
7833 	}
7834 
7835 	*cmdp = cmd;
7836 	*fieldsp = fields;
7837 
7838 	/*
7839 	 * We don't have to write context descriptor for every packet
7840 	 * except for 82574. For 82574, we must write context descriptor
7841 	 * for every packet when we use two descriptor queues.
7842 	 *
7843 	 * The 82574L can only remember the *last* context used
7844 	 * regardless of queue that it was use for.  We cannot reuse
7845 	 * contexts on this hardware platform and must generate a new
7846 	 * context every time.  82574L hardware spec, section 7.2.6,
7847 	 * second note.
7848 	 */
7849 	if (sc->sc_nqueues < 2) {
7850 		/*
7851 		 * Setting up new checksum offload context for every
7852 		 * frames takes a lot of processing time for hardware.
7853 		 * This also reduces performance a lot for small sized
7854 		 * frames so avoid it if driver can use previously
7855 		 * configured checksum offload context.
7856 		 * For TSO, in theory we can use the same TSO context only if
7857 		 * frame is the same type(IP/TCP) and the same MSS. However
7858 		 * checking whether a frame has the same IP/TCP structure is
7859 		 * hard thing so just ignore that and always restablish a
7860 		 * new TSO context.
7861 		 */
7862 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
7863 		    == 0) {
7864 			if (txq->txq_last_hw_cmd == cmd &&
7865 			    txq->txq_last_hw_fields == fields &&
7866 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
7867 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
7868 				WM_Q_EVCNT_INCR(txq, skipcontext);
7869 				return;
7870 			}
7871 		}
7872 
7873 		txq->txq_last_hw_cmd = cmd;
7874 		txq->txq_last_hw_fields = fields;
7875 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
7876 		txq->txq_last_hw_tucs = (tucs & 0xffff);
7877 	}
7878 
7879 	/* Fill in the context descriptor. */
7880 	t = (struct livengood_tcpip_ctxdesc *)
7881 	    &txq->txq_descs[txq->txq_next];
7882 	t->tcpip_ipcs = htole32(ipcs);
7883 	t->tcpip_tucs = htole32(tucs);
7884 	t->tcpip_cmdlen = htole32(cmdlen);
7885 	t->tcpip_seg = htole32(seg);
7886 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
7887 
7888 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
7889 	txs->txs_ndesc++;
7890 }
7891 
7892 static inline int
7893 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
7894 {
7895 	struct wm_softc *sc = ifp->if_softc;
7896 	u_int cpuid = cpu_index(curcpu());
7897 
7898 	/*
7899 	 * Currently, simple distribute strategy.
7900 	 * TODO:
7901 	 * distribute by flowid(RSS has value).
7902 	 */
7903 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
7904 }
7905 
7906 static inline bool
7907 wm_linkdown_discard(struct wm_txqueue *txq)
7908 {
7909 
7910 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
7911 		return true;
7912 
7913 	return false;
7914 }
7915 
7916 /*
7917  * wm_start:		[ifnet interface function]
7918  *
7919  *	Start packet transmission on the interface.
7920  */
7921 static void
7922 wm_start(struct ifnet *ifp)
7923 {
7924 	struct wm_softc *sc = ifp->if_softc;
7925 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7926 
7927 #ifdef WM_MPSAFE
7928 	KASSERT(if_is_mpsafe(ifp));
7929 #endif
7930 	/*
7931 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
7932 	 */
7933 
7934 	mutex_enter(txq->txq_lock);
7935 	if (!txq->txq_stopping)
7936 		wm_start_locked(ifp);
7937 	mutex_exit(txq->txq_lock);
7938 }
7939 
7940 static void
7941 wm_start_locked(struct ifnet *ifp)
7942 {
7943 	struct wm_softc *sc = ifp->if_softc;
7944 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7945 
7946 	wm_send_common_locked(ifp, txq, false);
7947 }
7948 
7949 static int
7950 wm_transmit(struct ifnet *ifp, struct mbuf *m)
7951 {
7952 	int qid;
7953 	struct wm_softc *sc = ifp->if_softc;
7954 	struct wm_txqueue *txq;
7955 
7956 	qid = wm_select_txqueue(ifp, m);
7957 	txq = &sc->sc_queue[qid].wmq_txq;
7958 
7959 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
7960 		m_freem(m);
7961 		WM_Q_EVCNT_INCR(txq, pcqdrop);
7962 		return ENOBUFS;
7963 	}
7964 
7965 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
7966 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
7967 	if (m->m_flags & M_MCAST)
7968 		if_statinc_ref(nsr, if_omcasts);
7969 	IF_STAT_PUTREF(ifp);
7970 
7971 	if (mutex_tryenter(txq->txq_lock)) {
7972 		if (!txq->txq_stopping)
7973 			wm_transmit_locked(ifp, txq);
7974 		mutex_exit(txq->txq_lock);
7975 	}
7976 
7977 	return 0;
7978 }
7979 
7980 static void
7981 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
7982 {
7983 
7984 	wm_send_common_locked(ifp, txq, true);
7985 }
7986 
7987 static void
7988 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
7989     bool is_transmit)
7990 {
7991 	struct wm_softc *sc = ifp->if_softc;
7992 	struct mbuf *m0;
7993 	struct wm_txsoft *txs;
7994 	bus_dmamap_t dmamap;
7995 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
7996 	bus_addr_t curaddr;
7997 	bus_size_t seglen, curlen;
7998 	uint32_t cksumcmd;
7999 	uint8_t cksumfields;
8000 	bool remap = true;
8001 
8002 	KASSERT(mutex_owned(txq->txq_lock));
8003 
8004 	if ((ifp->if_flags & IFF_RUNNING) == 0)
8005 		return;
8006 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
8007 		return;
8008 
8009 	if (__predict_false(wm_linkdown_discard(txq))) {
8010 		do {
8011 			if (is_transmit)
8012 				m0 = pcq_get(txq->txq_interq);
8013 			else
8014 				IFQ_DEQUEUE(&ifp->if_snd, m0);
8015 			/*
8016 			 * increment successed packet counter as in the case
8017 			 * which the packet is discarded by link down PHY.
8018 			 */
8019 			if (m0 != NULL)
8020 				if_statinc(ifp, if_opackets);
8021 			m_freem(m0);
8022 		} while (m0 != NULL);
8023 		return;
8024 	}
8025 
8026 	/* Remember the previous number of free descriptors. */
8027 	ofree = txq->txq_free;
8028 
8029 	/*
8030 	 * Loop through the send queue, setting up transmit descriptors
8031 	 * until we drain the queue, or use up all available transmit
8032 	 * descriptors.
8033 	 */
8034 	for (;;) {
8035 		m0 = NULL;
8036 
8037 		/* Get a work queue entry. */
8038 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
8039 			wm_txeof(txq, UINT_MAX);
8040 			if (txq->txq_sfree == 0) {
8041 				DPRINTF(sc, WM_DEBUG_TX,
8042 				    ("%s: TX: no free job descriptors\n",
8043 					device_xname(sc->sc_dev)));
8044 				WM_Q_EVCNT_INCR(txq, txsstall);
8045 				break;
8046 			}
8047 		}
8048 
8049 		/* Grab a packet off the queue. */
8050 		if (is_transmit)
8051 			m0 = pcq_get(txq->txq_interq);
8052 		else
8053 			IFQ_DEQUEUE(&ifp->if_snd, m0);
8054 		if (m0 == NULL)
8055 			break;
8056 
8057 		DPRINTF(sc, WM_DEBUG_TX,
8058 		    ("%s: TX: have packet to transmit: %p\n",
8059 			device_xname(sc->sc_dev), m0));
8060 
8061 		txs = &txq->txq_soft[txq->txq_snext];
8062 		dmamap = txs->txs_dmamap;
8063 
8064 		use_tso = (m0->m_pkthdr.csum_flags &
8065 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
8066 
8067 		/*
8068 		 * So says the Linux driver:
8069 		 * The controller does a simple calculation to make sure
8070 		 * there is enough room in the FIFO before initiating the
8071 		 * DMA for each buffer. The calc is:
8072 		 *	4 = ceil(buffer len / MSS)
8073 		 * To make sure we don't overrun the FIFO, adjust the max
8074 		 * buffer len if the MSS drops.
8075 		 */
8076 		dmamap->dm_maxsegsz =
8077 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
8078 		    ? m0->m_pkthdr.segsz << 2
8079 		    : WTX_MAX_LEN;
8080 
8081 		/*
8082 		 * Load the DMA map.  If this fails, the packet either
8083 		 * didn't fit in the allotted number of segments, or we
8084 		 * were short on resources.  For the too-many-segments
8085 		 * case, we simply report an error and drop the packet,
8086 		 * since we can't sanely copy a jumbo packet to a single
8087 		 * buffer.
8088 		 */
8089 retry:
8090 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
8091 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
8092 		if (__predict_false(error)) {
8093 			if (error == EFBIG) {
8094 				if (remap == true) {
8095 					struct mbuf *m;
8096 
8097 					remap = false;
8098 					m = m_defrag(m0, M_NOWAIT);
8099 					if (m != NULL) {
8100 						WM_Q_EVCNT_INCR(txq, defrag);
8101 						m0 = m;
8102 						goto retry;
8103 					}
8104 				}
8105 				WM_Q_EVCNT_INCR(txq, toomanyseg);
8106 				log(LOG_ERR, "%s: Tx packet consumes too many "
8107 				    "DMA segments, dropping...\n",
8108 				    device_xname(sc->sc_dev));
8109 				wm_dump_mbuf_chain(sc, m0);
8110 				m_freem(m0);
8111 				continue;
8112 			}
8113 			/* Short on resources, just stop for now. */
8114 			DPRINTF(sc, WM_DEBUG_TX,
8115 			    ("%s: TX: dmamap load failed: %d\n",
8116 				device_xname(sc->sc_dev), error));
8117 			break;
8118 		}
8119 
8120 		segs_needed = dmamap->dm_nsegs;
8121 		if (use_tso) {
8122 			/* For sentinel descriptor; see below. */
8123 			segs_needed++;
8124 		}
8125 
8126 		/*
8127 		 * Ensure we have enough descriptors free to describe
8128 		 * the packet. Note, we always reserve one descriptor
8129 		 * at the end of the ring due to the semantics of the
8130 		 * TDT register, plus one more in the event we need
8131 		 * to load offload context.
8132 		 */
8133 		if (segs_needed > txq->txq_free - 2) {
8134 			/*
8135 			 * Not enough free descriptors to transmit this
8136 			 * packet.  We haven't committed anything yet,
8137 			 * so just unload the DMA map, put the packet
8138 			 * pack on the queue, and punt. Notify the upper
8139 			 * layer that there are no more slots left.
8140 			 */
8141 			DPRINTF(sc, WM_DEBUG_TX,
8142 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
8143 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
8144 				segs_needed, txq->txq_free - 1));
8145 			txq->txq_flags |= WM_TXQ_NO_SPACE;
8146 			bus_dmamap_unload(sc->sc_dmat, dmamap);
8147 			WM_Q_EVCNT_INCR(txq, txdstall);
8148 			break;
8149 		}
8150 
8151 		/*
8152 		 * Check for 82547 Tx FIFO bug. We need to do this
8153 		 * once we know we can transmit the packet, since we
8154 		 * do some internal FIFO space accounting here.
8155 		 */
8156 		if (sc->sc_type == WM_T_82547 &&
8157 		    wm_82547_txfifo_bugchk(sc, m0)) {
8158 			DPRINTF(sc, WM_DEBUG_TX,
8159 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
8160 				device_xname(sc->sc_dev)));
8161 			txq->txq_flags |= WM_TXQ_NO_SPACE;
8162 			bus_dmamap_unload(sc->sc_dmat, dmamap);
8163 			WM_Q_EVCNT_INCR(txq, fifo_stall);
8164 			break;
8165 		}
8166 
8167 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
8168 
8169 		DPRINTF(sc, WM_DEBUG_TX,
8170 		    ("%s: TX: packet has %d (%d) DMA segments\n",
8171 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
8172 
8173 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
8174 
8175 		/*
8176 		 * Store a pointer to the packet so that we can free it
8177 		 * later.
8178 		 *
8179 		 * Initially, we consider the number of descriptors the
8180 		 * packet uses the number of DMA segments.  This may be
8181 		 * incremented by 1 if we do checksum offload (a descriptor
8182 		 * is used to set the checksum context).
8183 		 */
8184 		txs->txs_mbuf = m0;
8185 		txs->txs_firstdesc = txq->txq_next;
8186 		txs->txs_ndesc = segs_needed;
8187 
8188 		/* Set up offload parameters for this packet. */
8189 		if (m0->m_pkthdr.csum_flags &
8190 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
8191 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8192 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
8193 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
8194 		} else {
8195 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
8196 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
8197 			cksumcmd = 0;
8198 			cksumfields = 0;
8199 		}
8200 
8201 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
8202 
8203 		/* Sync the DMA map. */
8204 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
8205 		    BUS_DMASYNC_PREWRITE);
8206 
8207 		/* Initialize the transmit descriptor. */
8208 		for (nexttx = txq->txq_next, seg = 0;
8209 		     seg < dmamap->dm_nsegs; seg++) {
8210 			for (seglen = dmamap->dm_segs[seg].ds_len,
8211 			     curaddr = dmamap->dm_segs[seg].ds_addr;
8212 			     seglen != 0;
8213 			     curaddr += curlen, seglen -= curlen,
8214 			     nexttx = WM_NEXTTX(txq, nexttx)) {
8215 				curlen = seglen;
8216 
8217 				/*
8218 				 * So says the Linux driver:
8219 				 * Work around for premature descriptor
8220 				 * write-backs in TSO mode.  Append a
8221 				 * 4-byte sentinel descriptor.
8222 				 */
8223 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
8224 				    curlen > 8)
8225 					curlen -= 4;
8226 
8227 				wm_set_dma_addr(
8228 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
8229 				txq->txq_descs[nexttx].wtx_cmdlen
8230 				    = htole32(cksumcmd | curlen);
8231 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
8232 				    = 0;
8233 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
8234 				    = cksumfields;
8235 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
8236 				lasttx = nexttx;
8237 
8238 				DPRINTF(sc, WM_DEBUG_TX,
8239 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
8240 					"len %#04zx\n",
8241 					device_xname(sc->sc_dev), nexttx,
8242 					(uint64_t)curaddr, curlen));
8243 			}
8244 		}
8245 
8246 		KASSERT(lasttx != -1);
8247 
8248 		/*
8249 		 * Set up the command byte on the last descriptor of
8250 		 * the packet. If we're in the interrupt delay window,
8251 		 * delay the interrupt.
8252 		 */
8253 		txq->txq_descs[lasttx].wtx_cmdlen |=
8254 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
8255 
8256 		/*
8257 		 * If VLANs are enabled and the packet has a VLAN tag, set
8258 		 * up the descriptor to encapsulate the packet for us.
8259 		 *
8260 		 * This is only valid on the last descriptor of the packet.
8261 		 */
8262 		if (vlan_has_tag(m0)) {
8263 			txq->txq_descs[lasttx].wtx_cmdlen |=
8264 			    htole32(WTX_CMD_VLE);
8265 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
8266 			    = htole16(vlan_get_tag(m0));
8267 		}
8268 
8269 		txs->txs_lastdesc = lasttx;
8270 
8271 		DPRINTF(sc, WM_DEBUG_TX,
8272 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
8273 			device_xname(sc->sc_dev),
8274 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
8275 
8276 		/* Sync the descriptors we're using. */
8277 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
8278 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8279 
8280 		/* Give the packet to the chip. */
8281 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
8282 
8283 		DPRINTF(sc, WM_DEBUG_TX,
8284 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
8285 
8286 		DPRINTF(sc, WM_DEBUG_TX,
8287 		    ("%s: TX: finished transmitting packet, job %d\n",
8288 			device_xname(sc->sc_dev), txq->txq_snext));
8289 
8290 		/* Advance the tx pointer. */
8291 		txq->txq_free -= txs->txs_ndesc;
8292 		txq->txq_next = nexttx;
8293 
8294 		txq->txq_sfree--;
8295 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
8296 
8297 		/* Pass the packet to any BPF listeners. */
8298 		bpf_mtap(ifp, m0, BPF_D_OUT);
8299 	}
8300 
8301 	if (m0 != NULL) {
8302 		txq->txq_flags |= WM_TXQ_NO_SPACE;
8303 		WM_Q_EVCNT_INCR(txq, descdrop);
8304 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
8305 			__func__));
8306 		m_freem(m0);
8307 	}
8308 
8309 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
8310 		/* No more slots; notify upper layer. */
8311 		txq->txq_flags |= WM_TXQ_NO_SPACE;
8312 	}
8313 
8314 	if (txq->txq_free != ofree) {
8315 		/* Set a watchdog timer in case the chip flakes out. */
8316 		txq->txq_lastsent = time_uptime;
8317 		txq->txq_sending = true;
8318 	}
8319 }
8320 
8321 /*
8322  * wm_nq_tx_offload:
8323  *
8324  *	Set up TCP/IP checksumming parameters for the
8325  *	specified packet, for NEWQUEUE devices
8326  */
8327 static void
8328 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
8329     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
8330 {
8331 	struct mbuf *m0 = txs->txs_mbuf;
8332 	uint32_t vl_len, mssidx, cmdc;
8333 	struct ether_header *eh;
8334 	int offset, iphl;
8335 
8336 	/*
8337 	 * XXX It would be nice if the mbuf pkthdr had offset
8338 	 * fields for the protocol headers.
8339 	 */
8340 	*cmdlenp = 0;
8341 	*fieldsp = 0;
8342 
8343 	eh = mtod(m0, struct ether_header *);
8344 	switch (htons(eh->ether_type)) {
8345 	case ETHERTYPE_IP:
8346 	case ETHERTYPE_IPV6:
8347 		offset = ETHER_HDR_LEN;
8348 		break;
8349 
8350 	case ETHERTYPE_VLAN:
8351 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8352 		break;
8353 
8354 	default:
8355 		/* Don't support this protocol or encapsulation. */
8356 		*do_csum = false;
8357 		return;
8358 	}
8359 	*do_csum = true;
8360 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
8361 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
8362 
8363 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
8364 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
8365 
8366 	if ((m0->m_pkthdr.csum_flags &
8367 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
8368 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
8369 	} else {
8370 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
8371 	}
8372 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
8373 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
8374 
8375 	if (vlan_has_tag(m0)) {
8376 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
8377 		    << NQTXC_VLLEN_VLAN_SHIFT);
8378 		*cmdlenp |= NQTX_CMD_VLE;
8379 	}
8380 
8381 	mssidx = 0;
8382 
8383 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
8384 		int hlen = offset + iphl;
8385 		int tcp_hlen;
8386 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
8387 
8388 		if (__predict_false(m0->m_len <
8389 				    (hlen + sizeof(struct tcphdr)))) {
8390 			/*
8391 			 * TCP/IP headers are not in the first mbuf; we need
8392 			 * to do this the slow and painful way. Let's just
8393 			 * hope this doesn't happen very often.
8394 			 */
8395 			struct tcphdr th;
8396 
8397 			WM_Q_EVCNT_INCR(txq, tsopain);
8398 
8399 			m_copydata(m0, hlen, sizeof(th), &th);
8400 			if (v4) {
8401 				struct ip ip;
8402 
8403 				m_copydata(m0, offset, sizeof(ip), &ip);
8404 				ip.ip_len = 0;
8405 				m_copyback(m0,
8406 				    offset + offsetof(struct ip, ip_len),
8407 				    sizeof(ip.ip_len), &ip.ip_len);
8408 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
8409 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
8410 			} else {
8411 				struct ip6_hdr ip6;
8412 
8413 				m_copydata(m0, offset, sizeof(ip6), &ip6);
8414 				ip6.ip6_plen = 0;
8415 				m_copyback(m0,
8416 				    offset + offsetof(struct ip6_hdr, ip6_plen),
8417 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
8418 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
8419 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
8420 			}
8421 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
8422 			    sizeof(th.th_sum), &th.th_sum);
8423 
8424 			tcp_hlen = th.th_off << 2;
8425 		} else {
8426 			/*
8427 			 * TCP/IP headers are in the first mbuf; we can do
8428 			 * this the easy way.
8429 			 */
8430 			struct tcphdr *th;
8431 
8432 			if (v4) {
8433 				struct ip *ip =
8434 				    (void *)(mtod(m0, char *) + offset);
8435 				th = (void *)(mtod(m0, char *) + hlen);
8436 
8437 				ip->ip_len = 0;
8438 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
8439 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
8440 			} else {
8441 				struct ip6_hdr *ip6 =
8442 				    (void *)(mtod(m0, char *) + offset);
8443 				th = (void *)(mtod(m0, char *) + hlen);
8444 
8445 				ip6->ip6_plen = 0;
8446 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
8447 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
8448 			}
8449 			tcp_hlen = th->th_off << 2;
8450 		}
8451 		hlen += tcp_hlen;
8452 		*cmdlenp |= NQTX_CMD_TSE;
8453 
8454 		if (v4) {
8455 			WM_Q_EVCNT_INCR(txq, tso);
8456 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
8457 		} else {
8458 			WM_Q_EVCNT_INCR(txq, tso6);
8459 			*fieldsp |= NQTXD_FIELDS_TUXSM;
8460 		}
8461 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
8462 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
8463 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
8464 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
8465 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
8466 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
8467 	} else {
8468 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
8469 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
8470 	}
8471 
8472 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
8473 		*fieldsp |= NQTXD_FIELDS_IXSM;
8474 		cmdc |= NQTXC_CMD_IP4;
8475 	}
8476 
8477 	if (m0->m_pkthdr.csum_flags &
8478 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
8479 		WM_Q_EVCNT_INCR(txq, tusum);
8480 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
8481 			cmdc |= NQTXC_CMD_TCP;
8482 		else
8483 			cmdc |= NQTXC_CMD_UDP;
8484 
8485 		cmdc |= NQTXC_CMD_IP4;
8486 		*fieldsp |= NQTXD_FIELDS_TUXSM;
8487 	}
8488 	if (m0->m_pkthdr.csum_flags &
8489 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
8490 		WM_Q_EVCNT_INCR(txq, tusum6);
8491 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
8492 			cmdc |= NQTXC_CMD_TCP;
8493 		else
8494 			cmdc |= NQTXC_CMD_UDP;
8495 
8496 		cmdc |= NQTXC_CMD_IP6;
8497 		*fieldsp |= NQTXD_FIELDS_TUXSM;
8498 	}
8499 
8500 	/*
8501 	 * We don't have to write context descriptor for every packet to
8502 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
8503 	 * I210 and I211. It is enough to write once per a Tx queue for these
8504 	 * controllers.
8505 	 * It would be overhead to write context descriptor for every packet,
8506 	 * however it does not cause problems.
8507 	 */
8508 	/* Fill in the context descriptor. */
8509 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
8510 	    htole32(vl_len);
8511 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
8512 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
8513 	    htole32(cmdc);
8514 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
8515 	    htole32(mssidx);
8516 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
8517 	DPRINTF(sc, WM_DEBUG_TX,
8518 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
8519 		txq->txq_next, 0, vl_len));
8520 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
8521 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
8522 	txs->txs_ndesc++;
8523 }
8524 
8525 /*
8526  * wm_nq_start:		[ifnet interface function]
8527  *
8528  *	Start packet transmission on the interface for NEWQUEUE devices
8529  */
8530 static void
8531 wm_nq_start(struct ifnet *ifp)
8532 {
8533 	struct wm_softc *sc = ifp->if_softc;
8534 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8535 
8536 #ifdef WM_MPSAFE
8537 	KASSERT(if_is_mpsafe(ifp));
8538 #endif
8539 	/*
8540 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
8541 	 */
8542 
8543 	mutex_enter(txq->txq_lock);
8544 	if (!txq->txq_stopping)
8545 		wm_nq_start_locked(ifp);
8546 	mutex_exit(txq->txq_lock);
8547 }
8548 
8549 static void
8550 wm_nq_start_locked(struct ifnet *ifp)
8551 {
8552 	struct wm_softc *sc = ifp->if_softc;
8553 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8554 
8555 	wm_nq_send_common_locked(ifp, txq, false);
8556 }
8557 
8558 static int
8559 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
8560 {
8561 	int qid;
8562 	struct wm_softc *sc = ifp->if_softc;
8563 	struct wm_txqueue *txq;
8564 
8565 	qid = wm_select_txqueue(ifp, m);
8566 	txq = &sc->sc_queue[qid].wmq_txq;
8567 
8568 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
8569 		m_freem(m);
8570 		WM_Q_EVCNT_INCR(txq, pcqdrop);
8571 		return ENOBUFS;
8572 	}
8573 
8574 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
8575 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
8576 	if (m->m_flags & M_MCAST)
8577 		if_statinc_ref(nsr, if_omcasts);
8578 	IF_STAT_PUTREF(ifp);
8579 
8580 	/*
8581 	 * The situations which this mutex_tryenter() fails at running time
8582 	 * are below two patterns.
8583 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
8584 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
8585 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
8586 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
8587 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
8588 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
8589 	 * stuck, either.
8590 	 */
8591 	if (mutex_tryenter(txq->txq_lock)) {
8592 		if (!txq->txq_stopping)
8593 			wm_nq_transmit_locked(ifp, txq);
8594 		mutex_exit(txq->txq_lock);
8595 	}
8596 
8597 	return 0;
8598 }
8599 
8600 static void
8601 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
8602 {
8603 
8604 	wm_nq_send_common_locked(ifp, txq, true);
8605 }
8606 
8607 static void
8608 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
8609     bool is_transmit)
8610 {
8611 	struct wm_softc *sc = ifp->if_softc;
8612 	struct mbuf *m0;
8613 	struct wm_txsoft *txs;
8614 	bus_dmamap_t dmamap;
8615 	int error, nexttx, lasttx = -1, seg, segs_needed;
8616 	bool do_csum, sent;
8617 	bool remap = true;
8618 
8619 	KASSERT(mutex_owned(txq->txq_lock));
8620 
8621 	if ((ifp->if_flags & IFF_RUNNING) == 0)
8622 		return;
8623 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
8624 		return;
8625 
8626 	if (__predict_false(wm_linkdown_discard(txq))) {
8627 		do {
8628 			if (is_transmit)
8629 				m0 = pcq_get(txq->txq_interq);
8630 			else
8631 				IFQ_DEQUEUE(&ifp->if_snd, m0);
8632 			/*
8633 			 * increment successed packet counter as in the case
8634 			 * which the packet is discarded by link down PHY.
8635 			 */
8636 			if (m0 != NULL)
8637 				if_statinc(ifp, if_opackets);
8638 			m_freem(m0);
8639 		} while (m0 != NULL);
8640 		return;
8641 	}
8642 
8643 	sent = false;
8644 
8645 	/*
8646 	 * Loop through the send queue, setting up transmit descriptors
8647 	 * until we drain the queue, or use up all available transmit
8648 	 * descriptors.
8649 	 */
8650 	for (;;) {
8651 		m0 = NULL;
8652 
8653 		/* Get a work queue entry. */
8654 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
8655 			wm_txeof(txq, UINT_MAX);
8656 			if (txq->txq_sfree == 0) {
8657 				DPRINTF(sc, WM_DEBUG_TX,
8658 				    ("%s: TX: no free job descriptors\n",
8659 					device_xname(sc->sc_dev)));
8660 				WM_Q_EVCNT_INCR(txq, txsstall);
8661 				break;
8662 			}
8663 		}
8664 
8665 		/* Grab a packet off the queue. */
8666 		if (is_transmit)
8667 			m0 = pcq_get(txq->txq_interq);
8668 		else
8669 			IFQ_DEQUEUE(&ifp->if_snd, m0);
8670 		if (m0 == NULL)
8671 			break;
8672 
8673 		DPRINTF(sc, WM_DEBUG_TX,
8674 		    ("%s: TX: have packet to transmit: %p\n",
8675 		    device_xname(sc->sc_dev), m0));
8676 
8677 		txs = &txq->txq_soft[txq->txq_snext];
8678 		dmamap = txs->txs_dmamap;
8679 
8680 		/*
8681 		 * Load the DMA map.  If this fails, the packet either
8682 		 * didn't fit in the allotted number of segments, or we
8683 		 * were short on resources.  For the too-many-segments
8684 		 * case, we simply report an error and drop the packet,
8685 		 * since we can't sanely copy a jumbo packet to a single
8686 		 * buffer.
8687 		 */
8688 retry:
8689 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
8690 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
8691 		if (__predict_false(error)) {
8692 			if (error == EFBIG) {
8693 				if (remap == true) {
8694 					struct mbuf *m;
8695 
8696 					remap = false;
8697 					m = m_defrag(m0, M_NOWAIT);
8698 					if (m != NULL) {
8699 						WM_Q_EVCNT_INCR(txq, defrag);
8700 						m0 = m;
8701 						goto retry;
8702 					}
8703 				}
8704 				WM_Q_EVCNT_INCR(txq, toomanyseg);
8705 				log(LOG_ERR, "%s: Tx packet consumes too many "
8706 				    "DMA segments, dropping...\n",
8707 				    device_xname(sc->sc_dev));
8708 				wm_dump_mbuf_chain(sc, m0);
8709 				m_freem(m0);
8710 				continue;
8711 			}
8712 			/* Short on resources, just stop for now. */
8713 			DPRINTF(sc, WM_DEBUG_TX,
8714 			    ("%s: TX: dmamap load failed: %d\n",
8715 				device_xname(sc->sc_dev), error));
8716 			break;
8717 		}
8718 
8719 		segs_needed = dmamap->dm_nsegs;
8720 
8721 		/*
8722 		 * Ensure we have enough descriptors free to describe
8723 		 * the packet. Note, we always reserve one descriptor
8724 		 * at the end of the ring due to the semantics of the
8725 		 * TDT register, plus one more in the event we need
8726 		 * to load offload context.
8727 		 */
8728 		if (segs_needed > txq->txq_free - 2) {
8729 			/*
8730 			 * Not enough free descriptors to transmit this
8731 			 * packet.  We haven't committed anything yet,
8732 			 * so just unload the DMA map, put the packet
8733 			 * pack on the queue, and punt. Notify the upper
8734 			 * layer that there are no more slots left.
8735 			 */
8736 			DPRINTF(sc, WM_DEBUG_TX,
8737 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
8738 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
8739 				segs_needed, txq->txq_free - 1));
8740 			txq->txq_flags |= WM_TXQ_NO_SPACE;
8741 			bus_dmamap_unload(sc->sc_dmat, dmamap);
8742 			WM_Q_EVCNT_INCR(txq, txdstall);
8743 			break;
8744 		}
8745 
8746 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
8747 
8748 		DPRINTF(sc, WM_DEBUG_TX,
8749 		    ("%s: TX: packet has %d (%d) DMA segments\n",
8750 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
8751 
8752 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
8753 
8754 		/*
8755 		 * Store a pointer to the packet so that we can free it
8756 		 * later.
8757 		 *
8758 		 * Initially, we consider the number of descriptors the
8759 		 * packet uses the number of DMA segments.  This may be
8760 		 * incremented by 1 if we do checksum offload (a descriptor
8761 		 * is used to set the checksum context).
8762 		 */
8763 		txs->txs_mbuf = m0;
8764 		txs->txs_firstdesc = txq->txq_next;
8765 		txs->txs_ndesc = segs_needed;
8766 
8767 		/* Set up offload parameters for this packet. */
8768 		uint32_t cmdlen, fields, dcmdlen;
8769 		if (m0->m_pkthdr.csum_flags &
8770 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
8771 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8772 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
8773 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
8774 			    &do_csum);
8775 		} else {
8776 			do_csum = false;
8777 			cmdlen = 0;
8778 			fields = 0;
8779 		}
8780 
8781 		/* Sync the DMA map. */
8782 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
8783 		    BUS_DMASYNC_PREWRITE);
8784 
8785 		/* Initialize the first transmit descriptor. */
8786 		nexttx = txq->txq_next;
8787 		if (!do_csum) {
8788 			/* Setup a legacy descriptor */
8789 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
8790 			    dmamap->dm_segs[0].ds_addr);
8791 			txq->txq_descs[nexttx].wtx_cmdlen =
8792 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
8793 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
8794 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
8795 			if (vlan_has_tag(m0)) {
8796 				txq->txq_descs[nexttx].wtx_cmdlen |=
8797 				    htole32(WTX_CMD_VLE);
8798 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
8799 				    htole16(vlan_get_tag(m0));
8800 			} else
8801 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
8802 
8803 			dcmdlen = 0;
8804 		} else {
8805 			/* Setup an advanced data descriptor */
8806 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
8807 			    htole64(dmamap->dm_segs[0].ds_addr);
8808 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
8809 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
8810 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
8811 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
8812 			    htole32(fields);
8813 			DPRINTF(sc, WM_DEBUG_TX,
8814 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
8815 				device_xname(sc->sc_dev), nexttx,
8816 				(uint64_t)dmamap->dm_segs[0].ds_addr));
8817 			DPRINTF(sc, WM_DEBUG_TX,
8818 			    ("\t 0x%08x%08x\n", fields,
8819 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
8820 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
8821 		}
8822 
8823 		lasttx = nexttx;
8824 		nexttx = WM_NEXTTX(txq, nexttx);
8825 		/*
8826 		 * Fill in the next descriptors. legacy or advanced format
8827 		 * is the same here
8828 		 */
8829 		for (seg = 1; seg < dmamap->dm_nsegs;
8830 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
8831 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
8832 			    htole64(dmamap->dm_segs[seg].ds_addr);
8833 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
8834 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
8835 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
8836 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
8837 			lasttx = nexttx;
8838 
8839 			DPRINTF(sc, WM_DEBUG_TX,
8840 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
8841 				device_xname(sc->sc_dev), nexttx,
8842 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
8843 				dmamap->dm_segs[seg].ds_len));
8844 		}
8845 
8846 		KASSERT(lasttx != -1);
8847 
8848 		/*
8849 		 * Set up the command byte on the last descriptor of
8850 		 * the packet. If we're in the interrupt delay window,
8851 		 * delay the interrupt.
8852 		 */
8853 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
8854 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
8855 		txq->txq_descs[lasttx].wtx_cmdlen |=
8856 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
8857 
8858 		txs->txs_lastdesc = lasttx;
8859 
8860 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
8861 		    device_xname(sc->sc_dev),
8862 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
8863 
8864 		/* Sync the descriptors we're using. */
8865 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
8866 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8867 
8868 		/* Give the packet to the chip. */
8869 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
8870 		sent = true;
8871 
8872 		DPRINTF(sc, WM_DEBUG_TX,
8873 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
8874 
8875 		DPRINTF(sc, WM_DEBUG_TX,
8876 		    ("%s: TX: finished transmitting packet, job %d\n",
8877 			device_xname(sc->sc_dev), txq->txq_snext));
8878 
8879 		/* Advance the tx pointer. */
8880 		txq->txq_free -= txs->txs_ndesc;
8881 		txq->txq_next = nexttx;
8882 
8883 		txq->txq_sfree--;
8884 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
8885 
8886 		/* Pass the packet to any BPF listeners. */
8887 		bpf_mtap(ifp, m0, BPF_D_OUT);
8888 	}
8889 
8890 	if (m0 != NULL) {
8891 		txq->txq_flags |= WM_TXQ_NO_SPACE;
8892 		WM_Q_EVCNT_INCR(txq, descdrop);
8893 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
8894 			__func__));
8895 		m_freem(m0);
8896 	}
8897 
8898 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
8899 		/* No more slots; notify upper layer. */
8900 		txq->txq_flags |= WM_TXQ_NO_SPACE;
8901 	}
8902 
8903 	if (sent) {
8904 		/* Set a watchdog timer in case the chip flakes out. */
8905 		txq->txq_lastsent = time_uptime;
8906 		txq->txq_sending = true;
8907 	}
8908 }
8909 
8910 static void
8911 wm_deferred_start_locked(struct wm_txqueue *txq)
8912 {
8913 	struct wm_softc *sc = txq->txq_sc;
8914 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8915 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
8916 	int qid = wmq->wmq_id;
8917 
8918 	KASSERT(mutex_owned(txq->txq_lock));
8919 
8920 	if (txq->txq_stopping) {
8921 		mutex_exit(txq->txq_lock);
8922 		return;
8923 	}
8924 
8925 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
8926 		/* XXX need for ALTQ or one CPU system */
8927 		if (qid == 0)
8928 			wm_nq_start_locked(ifp);
8929 		wm_nq_transmit_locked(ifp, txq);
8930 	} else {
8931 		/* XXX need for ALTQ or one CPU system */
8932 		if (qid == 0)
8933 			wm_start_locked(ifp);
8934 		wm_transmit_locked(ifp, txq);
8935 	}
8936 }
8937 
8938 /* Interrupt */
8939 
8940 /*
8941  * wm_txeof:
8942  *
8943  *	Helper; handle transmit interrupts.
8944  */
8945 static bool
8946 wm_txeof(struct wm_txqueue *txq, u_int limit)
8947 {
8948 	struct wm_softc *sc = txq->txq_sc;
8949 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8950 	struct wm_txsoft *txs;
8951 	int count = 0;
8952 	int i;
8953 	uint8_t status;
8954 	bool more = false;
8955 
8956 	KASSERT(mutex_owned(txq->txq_lock));
8957 
8958 	if (txq->txq_stopping)
8959 		return false;
8960 
8961 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
8962 
8963 	/*
8964 	 * Go through the Tx list and free mbufs for those
8965 	 * frames which have been transmitted.
8966 	 */
8967 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
8968 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
8969 		if (limit-- == 0) {
8970 			more = true;
8971 			DPRINTF(sc, WM_DEBUG_TX,
8972 			    ("%s: TX: loop limited, job %d is not processed\n",
8973 				device_xname(sc->sc_dev), i));
8974 			break;
8975 		}
8976 
8977 		txs = &txq->txq_soft[i];
8978 
8979 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
8980 			device_xname(sc->sc_dev), i));
8981 
8982 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
8983 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
8984 
8985 		status =
8986 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
8987 		if ((status & WTX_ST_DD) == 0) {
8988 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
8989 			    BUS_DMASYNC_PREREAD);
8990 			break;
8991 		}
8992 
8993 		count++;
8994 		DPRINTF(sc, WM_DEBUG_TX,
8995 		    ("%s: TX: job %d done: descs %d..%d\n",
8996 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
8997 		    txs->txs_lastdesc));
8998 
8999 		/*
9000 		 * XXX We should probably be using the statistics
9001 		 * XXX registers, but I don't know if they exist
9002 		 * XXX on chips before the i82544.
9003 		 */
9004 
9005 #ifdef WM_EVENT_COUNTERS
9006 		if (status & WTX_ST_TU)
9007 			WM_Q_EVCNT_INCR(txq, underrun);
9008 #endif /* WM_EVENT_COUNTERS */
9009 
9010 		/*
9011 		 * 82574 and newer's document says the status field has neither
9012 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
9013 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
9014 		 * Developer's Manual", 82574 datasheet and newer.
9015 		 *
9016 		 * XXX I saw the LC bit was set on I218 even though the media
9017 		 * was full duplex, so the bit might be used for other
9018 		 * meaning ...(I have no document).
9019 		 */
9020 
9021 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
9022 		    && ((sc->sc_type < WM_T_82574)
9023 			|| (sc->sc_type == WM_T_80003))) {
9024 			if_statinc(ifp, if_oerrors);
9025 			if (status & WTX_ST_LC)
9026 				log(LOG_WARNING, "%s: late collision\n",
9027 				    device_xname(sc->sc_dev));
9028 			else if (status & WTX_ST_EC) {
9029 				if_statadd(ifp, if_collisions,
9030 				    TX_COLLISION_THRESHOLD + 1);
9031 				log(LOG_WARNING, "%s: excessive collisions\n",
9032 				    device_xname(sc->sc_dev));
9033 			}
9034 		} else
9035 			if_statinc(ifp, if_opackets);
9036 
9037 		txq->txq_packets++;
9038 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
9039 
9040 		txq->txq_free += txs->txs_ndesc;
9041 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
9042 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
9043 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
9044 		m_freem(txs->txs_mbuf);
9045 		txs->txs_mbuf = NULL;
9046 	}
9047 
9048 	/* Update the dirty transmit buffer pointer. */
9049 	txq->txq_sdirty = i;
9050 	DPRINTF(sc, WM_DEBUG_TX,
9051 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
9052 
9053 	if (count != 0)
9054 		rnd_add_uint32(&sc->rnd_source, count);
9055 
9056 	/*
9057 	 * If there are no more pending transmissions, cancel the watchdog
9058 	 * timer.
9059 	 */
9060 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
9061 		txq->txq_sending = false;
9062 
9063 	return more;
9064 }
9065 
9066 static inline uint32_t
9067 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
9068 {
9069 	struct wm_softc *sc = rxq->rxq_sc;
9070 
9071 	if (sc->sc_type == WM_T_82574)
9072 		return EXTRXC_STATUS(
9073 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
9074 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9075 		return NQRXC_STATUS(
9076 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
9077 	else
9078 		return rxq->rxq_descs[idx].wrx_status;
9079 }
9080 
9081 static inline uint32_t
9082 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
9083 {
9084 	struct wm_softc *sc = rxq->rxq_sc;
9085 
9086 	if (sc->sc_type == WM_T_82574)
9087 		return EXTRXC_ERROR(
9088 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
9089 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9090 		return NQRXC_ERROR(
9091 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
9092 	else
9093 		return rxq->rxq_descs[idx].wrx_errors;
9094 }
9095 
9096 static inline uint16_t
9097 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
9098 {
9099 	struct wm_softc *sc = rxq->rxq_sc;
9100 
9101 	if (sc->sc_type == WM_T_82574)
9102 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
9103 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9104 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
9105 	else
9106 		return rxq->rxq_descs[idx].wrx_special;
9107 }
9108 
9109 static inline int
9110 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
9111 {
9112 	struct wm_softc *sc = rxq->rxq_sc;
9113 
9114 	if (sc->sc_type == WM_T_82574)
9115 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
9116 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9117 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
9118 	else
9119 		return rxq->rxq_descs[idx].wrx_len;
9120 }
9121 
9122 #ifdef WM_DEBUG
9123 static inline uint32_t
9124 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
9125 {
9126 	struct wm_softc *sc = rxq->rxq_sc;
9127 
9128 	if (sc->sc_type == WM_T_82574)
9129 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
9130 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9131 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
9132 	else
9133 		return 0;
9134 }
9135 
9136 static inline uint8_t
9137 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
9138 {
9139 	struct wm_softc *sc = rxq->rxq_sc;
9140 
9141 	if (sc->sc_type == WM_T_82574)
9142 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
9143 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9144 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
9145 	else
9146 		return 0;
9147 }
9148 #endif /* WM_DEBUG */
9149 
9150 static inline bool
9151 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
9152     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
9153 {
9154 
9155 	if (sc->sc_type == WM_T_82574)
9156 		return (status & ext_bit) != 0;
9157 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9158 		return (status & nq_bit) != 0;
9159 	else
9160 		return (status & legacy_bit) != 0;
9161 }
9162 
9163 static inline bool
9164 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
9165     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
9166 {
9167 
9168 	if (sc->sc_type == WM_T_82574)
9169 		return (error & ext_bit) != 0;
9170 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9171 		return (error & nq_bit) != 0;
9172 	else
9173 		return (error & legacy_bit) != 0;
9174 }
9175 
9176 static inline bool
9177 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
9178 {
9179 
9180 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
9181 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
9182 		return true;
9183 	else
9184 		return false;
9185 }
9186 
9187 static inline bool
9188 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
9189 {
9190 	struct wm_softc *sc = rxq->rxq_sc;
9191 
9192 	/* XXX missing error bit for newqueue? */
9193 	if (wm_rxdesc_is_set_error(sc, errors,
9194 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
9195 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
9196 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
9197 		NQRXC_ERROR_RXE)) {
9198 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
9199 		    EXTRXC_ERROR_SE, 0))
9200 			log(LOG_WARNING, "%s: symbol error\n",
9201 			    device_xname(sc->sc_dev));
9202 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
9203 		    EXTRXC_ERROR_SEQ, 0))
9204 			log(LOG_WARNING, "%s: receive sequence error\n",
9205 			    device_xname(sc->sc_dev));
9206 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
9207 		    EXTRXC_ERROR_CE, 0))
9208 			log(LOG_WARNING, "%s: CRC error\n",
9209 			    device_xname(sc->sc_dev));
9210 		return true;
9211 	}
9212 
9213 	return false;
9214 }
9215 
9216 static inline bool
9217 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
9218 {
9219 	struct wm_softc *sc = rxq->rxq_sc;
9220 
9221 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
9222 		NQRXC_STATUS_DD)) {
9223 		/* We have processed all of the receive descriptors. */
9224 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
9225 		return false;
9226 	}
9227 
9228 	return true;
9229 }
9230 
9231 static inline bool
9232 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
9233     uint16_t vlantag, struct mbuf *m)
9234 {
9235 
9236 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
9237 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
9238 		vlan_set_tag(m, le16toh(vlantag));
9239 	}
9240 
9241 	return true;
9242 }
9243 
9244 static inline void
9245 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
9246     uint32_t errors, struct mbuf *m)
9247 {
9248 	struct wm_softc *sc = rxq->rxq_sc;
9249 
9250 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
9251 		if (wm_rxdesc_is_set_status(sc, status,
9252 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
9253 			WM_Q_EVCNT_INCR(rxq, ipsum);
9254 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
9255 			if (wm_rxdesc_is_set_error(sc, errors,
9256 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
9257 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
9258 		}
9259 		if (wm_rxdesc_is_set_status(sc, status,
9260 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
9261 			/*
9262 			 * Note: we don't know if this was TCP or UDP,
9263 			 * so we just set both bits, and expect the
9264 			 * upper layers to deal.
9265 			 */
9266 			WM_Q_EVCNT_INCR(rxq, tusum);
9267 			m->m_pkthdr.csum_flags |=
9268 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
9269 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
9270 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
9271 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
9272 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
9273 		}
9274 	}
9275 }
9276 
9277 /*
9278  * wm_rxeof:
9279  *
9280  *	Helper; handle receive interrupts.
9281  */
9282 static bool
9283 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
9284 {
9285 	struct wm_softc *sc = rxq->rxq_sc;
9286 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9287 	struct wm_rxsoft *rxs;
9288 	struct mbuf *m;
9289 	int i, len;
9290 	int count = 0;
9291 	uint32_t status, errors;
9292 	uint16_t vlantag;
9293 	bool more = false;
9294 
9295 	KASSERT(mutex_owned(rxq->rxq_lock));
9296 
9297 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
9298 		if (limit-- == 0) {
9299 			more = true;
9300 			DPRINTF(sc, WM_DEBUG_RX,
9301 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
9302 				device_xname(sc->sc_dev), i));
9303 			break;
9304 		}
9305 
9306 		rxs = &rxq->rxq_soft[i];
9307 
9308 		DPRINTF(sc, WM_DEBUG_RX,
9309 		    ("%s: RX: checking descriptor %d\n",
9310 			device_xname(sc->sc_dev), i));
9311 		wm_cdrxsync(rxq, i,
9312 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
9313 
9314 		status = wm_rxdesc_get_status(rxq, i);
9315 		errors = wm_rxdesc_get_errors(rxq, i);
9316 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
9317 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
9318 #ifdef WM_DEBUG
9319 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
9320 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
9321 #endif
9322 
9323 		if (!wm_rxdesc_dd(rxq, i, status)) {
9324 			break;
9325 		}
9326 
9327 		count++;
9328 		if (__predict_false(rxq->rxq_discard)) {
9329 			DPRINTF(sc, WM_DEBUG_RX,
9330 			    ("%s: RX: discarding contents of descriptor %d\n",
9331 				device_xname(sc->sc_dev), i));
9332 			wm_init_rxdesc(rxq, i);
9333 			if (wm_rxdesc_is_eop(rxq, status)) {
9334 				/* Reset our state. */
9335 				DPRINTF(sc, WM_DEBUG_RX,
9336 				    ("%s: RX: resetting rxdiscard -> 0\n",
9337 					device_xname(sc->sc_dev)));
9338 				rxq->rxq_discard = 0;
9339 			}
9340 			continue;
9341 		}
9342 
9343 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
9344 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
9345 
9346 		m = rxs->rxs_mbuf;
9347 
9348 		/*
9349 		 * Add a new receive buffer to the ring, unless of
9350 		 * course the length is zero. Treat the latter as a
9351 		 * failed mapping.
9352 		 */
9353 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
9354 			/*
9355 			 * Failed, throw away what we've done so
9356 			 * far, and discard the rest of the packet.
9357 			 */
9358 			if_statinc(ifp, if_ierrors);
9359 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
9360 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
9361 			wm_init_rxdesc(rxq, i);
9362 			if (!wm_rxdesc_is_eop(rxq, status))
9363 				rxq->rxq_discard = 1;
9364 			if (rxq->rxq_head != NULL)
9365 				m_freem(rxq->rxq_head);
9366 			WM_RXCHAIN_RESET(rxq);
9367 			DPRINTF(sc, WM_DEBUG_RX,
9368 			    ("%s: RX: Rx buffer allocation failed, "
9369 			    "dropping packet%s\n", device_xname(sc->sc_dev),
9370 				rxq->rxq_discard ? " (discard)" : ""));
9371 			continue;
9372 		}
9373 
9374 		m->m_len = len;
9375 		rxq->rxq_len += len;
9376 		DPRINTF(sc, WM_DEBUG_RX,
9377 		    ("%s: RX: buffer at %p len %d\n",
9378 			device_xname(sc->sc_dev), m->m_data, len));
9379 
9380 		/* If this is not the end of the packet, keep looking. */
9381 		if (!wm_rxdesc_is_eop(rxq, status)) {
9382 			WM_RXCHAIN_LINK(rxq, m);
9383 			DPRINTF(sc, WM_DEBUG_RX,
9384 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
9385 				device_xname(sc->sc_dev), rxq->rxq_len));
9386 			continue;
9387 		}
9388 
9389 		/*
9390 		 * Okay, we have the entire packet now. The chip is
9391 		 * configured to include the FCS except I35[04], I21[01].
9392 		 * (not all chips can be configured to strip it), so we need
9393 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
9394 		 * in RCTL register is always set, so we don't trim it.
9395 		 * PCH2 and newer chip also not include FCS when jumbo
9396 		 * frame is used to do workaround an errata.
9397 		 * May need to adjust length of previous mbuf in the
9398 		 * chain if the current mbuf is too short.
9399 		 */
9400 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
9401 			if (m->m_len < ETHER_CRC_LEN) {
9402 				rxq->rxq_tail->m_len
9403 				    -= (ETHER_CRC_LEN - m->m_len);
9404 				m->m_len = 0;
9405 			} else
9406 				m->m_len -= ETHER_CRC_LEN;
9407 			len = rxq->rxq_len - ETHER_CRC_LEN;
9408 		} else
9409 			len = rxq->rxq_len;
9410 
9411 		WM_RXCHAIN_LINK(rxq, m);
9412 
9413 		*rxq->rxq_tailp = NULL;
9414 		m = rxq->rxq_head;
9415 
9416 		WM_RXCHAIN_RESET(rxq);
9417 
9418 		DPRINTF(sc, WM_DEBUG_RX,
9419 		    ("%s: RX: have entire packet, len -> %d\n",
9420 			device_xname(sc->sc_dev), len));
9421 
9422 		/* If an error occurred, update stats and drop the packet. */
9423 		if (wm_rxdesc_has_errors(rxq, errors)) {
9424 			m_freem(m);
9425 			continue;
9426 		}
9427 
9428 		/* No errors.  Receive the packet. */
9429 		m_set_rcvif(m, ifp);
9430 		m->m_pkthdr.len = len;
9431 		/*
9432 		 * TODO
9433 		 * should be save rsshash and rsstype to this mbuf.
9434 		 */
9435 		DPRINTF(sc, WM_DEBUG_RX,
9436 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
9437 			device_xname(sc->sc_dev), rsstype, rsshash));
9438 
9439 		/*
9440 		 * If VLANs are enabled, VLAN packets have been unwrapped
9441 		 * for us.  Associate the tag with the packet.
9442 		 */
9443 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
9444 			continue;
9445 
9446 		/* Set up checksum info for this packet. */
9447 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
9448 
9449 		rxq->rxq_packets++;
9450 		rxq->rxq_bytes += len;
9451 		/* Pass it on. */
9452 		if_percpuq_enqueue(sc->sc_ipq, m);
9453 
9454 		if (rxq->rxq_stopping)
9455 			break;
9456 	}
9457 	rxq->rxq_ptr = i;
9458 
9459 	if (count != 0)
9460 		rnd_add_uint32(&sc->rnd_source, count);
9461 
9462 	DPRINTF(sc, WM_DEBUG_RX,
9463 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
9464 
9465 	return more;
9466 }
9467 
9468 /*
9469  * wm_linkintr_gmii:
9470  *
9471  *	Helper; handle link interrupts for GMII.
9472  */
9473 static void
9474 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
9475 {
9476 	device_t dev = sc->sc_dev;
9477 	uint32_t status, reg;
9478 	bool link;
9479 	int rv;
9480 
9481 	KASSERT(WM_CORE_LOCKED(sc));
9482 
9483 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
9484 		__func__));
9485 
9486 	if ((icr & ICR_LSC) == 0) {
9487 		if (icr & ICR_RXSEQ)
9488 			DPRINTF(sc, WM_DEBUG_LINK,
9489 			    ("%s: LINK Receive sequence error\n",
9490 				device_xname(dev)));
9491 		return;
9492 	}
9493 
9494 	/* Link status changed */
9495 	status = CSR_READ(sc, WMREG_STATUS);
9496 	link = status & STATUS_LU;
9497 	if (link) {
9498 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
9499 			device_xname(dev),
9500 			(status & STATUS_FD) ? "FDX" : "HDX"));
9501 		if (wm_phy_need_linkdown_discard(sc)) {
9502 			DPRINTF(sc, WM_DEBUG_LINK,
9503 			    ("%s: linkintr: Clear linkdown discard flag\n",
9504 				device_xname(dev)));
9505 			wm_clear_linkdown_discard(sc);
9506 		}
9507 	} else {
9508 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9509 			device_xname(dev)));
9510 		if (wm_phy_need_linkdown_discard(sc)) {
9511 			DPRINTF(sc, WM_DEBUG_LINK,
9512 			    ("%s: linkintr: Set linkdown discard flag\n",
9513 				device_xname(dev)));
9514 			wm_set_linkdown_discard(sc);
9515 		}
9516 	}
9517 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
9518 		wm_gig_downshift_workaround_ich8lan(sc);
9519 
9520 	if ((sc->sc_type == WM_T_ICH8)
9521 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
9522 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
9523 	}
9524 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
9525 		device_xname(dev)));
9526 	mii_pollstat(&sc->sc_mii);
9527 	if (sc->sc_type == WM_T_82543) {
9528 		int miistatus, active;
9529 
9530 		/*
9531 		 * With 82543, we need to force speed and
9532 		 * duplex on the MAC equal to what the PHY
9533 		 * speed and duplex configuration is.
9534 		 */
9535 		miistatus = sc->sc_mii.mii_media_status;
9536 
9537 		if (miistatus & IFM_ACTIVE) {
9538 			active = sc->sc_mii.mii_media_active;
9539 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
9540 			switch (IFM_SUBTYPE(active)) {
9541 			case IFM_10_T:
9542 				sc->sc_ctrl |= CTRL_SPEED_10;
9543 				break;
9544 			case IFM_100_TX:
9545 				sc->sc_ctrl |= CTRL_SPEED_100;
9546 				break;
9547 			case IFM_1000_T:
9548 				sc->sc_ctrl |= CTRL_SPEED_1000;
9549 				break;
9550 			default:
9551 				/*
9552 				 * Fiber?
9553 				 * Shoud not enter here.
9554 				 */
9555 				device_printf(dev, "unknown media (%x)\n",
9556 				    active);
9557 				break;
9558 			}
9559 			if (active & IFM_FDX)
9560 				sc->sc_ctrl |= CTRL_FD;
9561 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9562 		}
9563 	} else if (sc->sc_type == WM_T_PCH) {
9564 		wm_k1_gig_workaround_hv(sc,
9565 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
9566 	}
9567 
9568 	/*
9569 	 * When connected at 10Mbps half-duplex, some parts are excessively
9570 	 * aggressive resulting in many collisions. To avoid this, increase
9571 	 * the IPG and reduce Rx latency in the PHY.
9572 	 */
9573 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
9574 	    && link) {
9575 		uint32_t tipg_reg;
9576 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
9577 		bool fdx;
9578 		uint16_t emi_addr, emi_val;
9579 
9580 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
9581 		tipg_reg &= ~TIPG_IPGT_MASK;
9582 		fdx = status & STATUS_FD;
9583 
9584 		if (!fdx && (speed == STATUS_SPEED_10)) {
9585 			tipg_reg |= 0xff;
9586 			/* Reduce Rx latency in analog PHY */
9587 			emi_val = 0;
9588 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
9589 		    fdx && speed != STATUS_SPEED_1000) {
9590 			tipg_reg |= 0xc;
9591 			emi_val = 1;
9592 		} else {
9593 			/* Roll back the default values */
9594 			tipg_reg |= 0x08;
9595 			emi_val = 1;
9596 		}
9597 
9598 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
9599 
9600 		rv = sc->phy.acquire(sc);
9601 		if (rv)
9602 			return;
9603 
9604 		if (sc->sc_type == WM_T_PCH2)
9605 			emi_addr = I82579_RX_CONFIG;
9606 		else
9607 			emi_addr = I217_RX_CONFIG;
9608 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
9609 
9610 		if (sc->sc_type >= WM_T_PCH_LPT) {
9611 			uint16_t phy_reg;
9612 
9613 			sc->phy.readreg_locked(dev, 2,
9614 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
9615 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
9616 			if (speed == STATUS_SPEED_100
9617 			    || speed == STATUS_SPEED_10)
9618 				phy_reg |= 0x3e8;
9619 			else
9620 				phy_reg |= 0xfa;
9621 			sc->phy.writereg_locked(dev, 2,
9622 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
9623 
9624 			if (speed == STATUS_SPEED_1000) {
9625 				sc->phy.readreg_locked(dev, 2,
9626 				    HV_PM_CTRL, &phy_reg);
9627 
9628 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
9629 
9630 				sc->phy.writereg_locked(dev, 2,
9631 				    HV_PM_CTRL, phy_reg);
9632 			}
9633 		}
9634 		sc->phy.release(sc);
9635 
9636 		if (rv)
9637 			return;
9638 
9639 		if (sc->sc_type >= WM_T_PCH_SPT) {
9640 			uint16_t data, ptr_gap;
9641 
9642 			if (speed == STATUS_SPEED_1000) {
9643 				rv = sc->phy.acquire(sc);
9644 				if (rv)
9645 					return;
9646 
9647 				rv = sc->phy.readreg_locked(dev, 2,
9648 				    I82579_UNKNOWN1, &data);
9649 				if (rv) {
9650 					sc->phy.release(sc);
9651 					return;
9652 				}
9653 
9654 				ptr_gap = (data & (0x3ff << 2)) >> 2;
9655 				if (ptr_gap < 0x18) {
9656 					data &= ~(0x3ff << 2);
9657 					data |= (0x18 << 2);
9658 					rv = sc->phy.writereg_locked(dev,
9659 					    2, I82579_UNKNOWN1, data);
9660 				}
9661 				sc->phy.release(sc);
9662 				if (rv)
9663 					return;
9664 			} else {
9665 				rv = sc->phy.acquire(sc);
9666 				if (rv)
9667 					return;
9668 
9669 				rv = sc->phy.writereg_locked(dev, 2,
9670 				    I82579_UNKNOWN1, 0xc023);
9671 				sc->phy.release(sc);
9672 				if (rv)
9673 					return;
9674 
9675 			}
9676 		}
9677 	}
9678 
9679 	/*
9680 	 * I217 Packet Loss issue:
9681 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
9682 	 * on power up.
9683 	 * Set the Beacon Duration for I217 to 8 usec
9684 	 */
9685 	if (sc->sc_type >= WM_T_PCH_LPT) {
9686 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
9687 		reg &= ~FEXTNVM4_BEACON_DURATION;
9688 		reg |= FEXTNVM4_BEACON_DURATION_8US;
9689 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
9690 	}
9691 
9692 	/* Work-around I218 hang issue */
9693 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
9694 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
9695 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
9696 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
9697 		wm_k1_workaround_lpt_lp(sc, link);
9698 
9699 	if (sc->sc_type >= WM_T_PCH_LPT) {
9700 		/*
9701 		 * Set platform power management values for Latency
9702 		 * Tolerance Reporting (LTR)
9703 		 */
9704 		wm_platform_pm_pch_lpt(sc,
9705 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
9706 	}
9707 
9708 	/* Clear link partner's EEE ability */
9709 	sc->eee_lp_ability = 0;
9710 
9711 	/* FEXTNVM6 K1-off workaround */
9712 	if (sc->sc_type == WM_T_PCH_SPT) {
9713 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
9714 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
9715 			reg |= FEXTNVM6_K1_OFF_ENABLE;
9716 		else
9717 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
9718 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
9719 	}
9720 
9721 	if (!link)
9722 		return;
9723 
9724 	switch (sc->sc_type) {
9725 	case WM_T_PCH2:
9726 		wm_k1_workaround_lv(sc);
9727 		/* FALLTHROUGH */
9728 	case WM_T_PCH:
9729 		if (sc->sc_phytype == WMPHY_82578)
9730 			wm_link_stall_workaround_hv(sc);
9731 		break;
9732 	default:
9733 		break;
9734 	}
9735 
9736 	/* Enable/Disable EEE after link up */
9737 	if (sc->sc_phytype > WMPHY_82579)
9738 		wm_set_eee_pchlan(sc);
9739 }
9740 
9741 /*
9742  * wm_linkintr_tbi:
9743  *
9744  *	Helper; handle link interrupts for TBI mode.
9745  */
9746 static void
9747 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
9748 {
9749 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9750 	uint32_t status;
9751 
9752 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
9753 		__func__));
9754 
9755 	status = CSR_READ(sc, WMREG_STATUS);
9756 	if (icr & ICR_LSC) {
9757 		wm_check_for_link(sc);
9758 		if (status & STATUS_LU) {
9759 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
9760 				device_xname(sc->sc_dev),
9761 				(status & STATUS_FD) ? "FDX" : "HDX"));
9762 			/*
9763 			 * NOTE: CTRL will update TFCE and RFCE automatically,
9764 			 * so we should update sc->sc_ctrl
9765 			 */
9766 
9767 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9768 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9769 			sc->sc_fcrtl &= ~FCRTL_XONE;
9770 			if (status & STATUS_FD)
9771 				sc->sc_tctl |=
9772 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9773 			else
9774 				sc->sc_tctl |=
9775 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9776 			if (sc->sc_ctrl & CTRL_TFCE)
9777 				sc->sc_fcrtl |= FCRTL_XONE;
9778 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9779 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9780 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
9781 			sc->sc_tbi_linkup = 1;
9782 			if_link_state_change(ifp, LINK_STATE_UP);
9783 		} else {
9784 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9785 				device_xname(sc->sc_dev)));
9786 			sc->sc_tbi_linkup = 0;
9787 			if_link_state_change(ifp, LINK_STATE_DOWN);
9788 		}
9789 		/* Update LED */
9790 		wm_tbi_serdes_set_linkled(sc);
9791 	} else if (icr & ICR_RXSEQ)
9792 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
9793 			device_xname(sc->sc_dev)));
9794 }
9795 
9796 /*
9797  * wm_linkintr_serdes:
9798  *
9799  *	Helper; handle link interrupts for TBI mode.
9800  */
9801 static void
9802 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
9803 {
9804 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9805 	struct mii_data *mii = &sc->sc_mii;
9806 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9807 	uint32_t pcs_adv, pcs_lpab, reg;
9808 
9809 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
9810 		__func__));
9811 
9812 	if (icr & ICR_LSC) {
9813 		/* Check PCS */
9814 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
9815 		if ((reg & PCS_LSTS_LINKOK) != 0) {
9816 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
9817 				device_xname(sc->sc_dev)));
9818 			mii->mii_media_status |= IFM_ACTIVE;
9819 			sc->sc_tbi_linkup = 1;
9820 			if_link_state_change(ifp, LINK_STATE_UP);
9821 		} else {
9822 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9823 				device_xname(sc->sc_dev)));
9824 			mii->mii_media_status |= IFM_NONE;
9825 			sc->sc_tbi_linkup = 0;
9826 			if_link_state_change(ifp, LINK_STATE_DOWN);
9827 			wm_tbi_serdes_set_linkled(sc);
9828 			return;
9829 		}
9830 		mii->mii_media_active |= IFM_1000_SX;
9831 		if ((reg & PCS_LSTS_FDX) != 0)
9832 			mii->mii_media_active |= IFM_FDX;
9833 		else
9834 			mii->mii_media_active |= IFM_HDX;
9835 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
9836 			/* Check flow */
9837 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
9838 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
9839 				DPRINTF(sc, WM_DEBUG_LINK,
9840 				    ("XXX LINKOK but not ACOMP\n"));
9841 				return;
9842 			}
9843 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
9844 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
9845 			DPRINTF(sc, WM_DEBUG_LINK,
9846 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
9847 			if ((pcs_adv & TXCW_SYM_PAUSE)
9848 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
9849 				mii->mii_media_active |= IFM_FLOW
9850 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
9851 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
9852 			    && (pcs_adv & TXCW_ASYM_PAUSE)
9853 			    && (pcs_lpab & TXCW_SYM_PAUSE)
9854 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
9855 				mii->mii_media_active |= IFM_FLOW
9856 				    | IFM_ETH_TXPAUSE;
9857 			else if ((pcs_adv & TXCW_SYM_PAUSE)
9858 			    && (pcs_adv & TXCW_ASYM_PAUSE)
9859 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
9860 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
9861 				mii->mii_media_active |= IFM_FLOW
9862 				    | IFM_ETH_RXPAUSE;
9863 		}
9864 		/* Update LED */
9865 		wm_tbi_serdes_set_linkled(sc);
9866 	} else
9867 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
9868 		    device_xname(sc->sc_dev)));
9869 }
9870 
9871 /*
9872  * wm_linkintr:
9873  *
9874  *	Helper; handle link interrupts.
9875  */
9876 static void
9877 wm_linkintr(struct wm_softc *sc, uint32_t icr)
9878 {
9879 
9880 	KASSERT(WM_CORE_LOCKED(sc));
9881 
9882 	if (sc->sc_flags & WM_F_HAS_MII)
9883 		wm_linkintr_gmii(sc, icr);
9884 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9885 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
9886 		wm_linkintr_serdes(sc, icr);
9887 	else
9888 		wm_linkintr_tbi(sc, icr);
9889 }
9890 
9891 
9892 static inline void
9893 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
9894 {
9895 
9896 	if (wmq->wmq_txrx_use_workqueue)
9897 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
9898 	else
9899 		softint_schedule(wmq->wmq_si);
9900 }
9901 
9902 static inline void
9903 wm_legacy_intr_disable(struct wm_softc *sc)
9904 {
9905 
9906 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
9907 }
9908 
9909 static inline void
9910 wm_legacy_intr_enable(struct wm_softc *sc)
9911 {
9912 
9913 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
9914 }
9915 
9916 /*
9917  * wm_intr_legacy:
9918  *
9919  *	Interrupt service routine for INTx and MSI.
9920  */
9921 static int
9922 wm_intr_legacy(void *arg)
9923 {
9924 	struct wm_softc *sc = arg;
9925 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9926 	struct wm_queue *wmq = &sc->sc_queue[0];
9927 	struct wm_txqueue *txq = &wmq->wmq_txq;
9928 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
9929 	u_int txlimit = sc->sc_tx_intr_process_limit;
9930 	u_int rxlimit = sc->sc_rx_intr_process_limit;
9931 	uint32_t icr, rndval = 0;
9932 	bool more = false;
9933 
9934 	icr = CSR_READ(sc, WMREG_ICR);
9935 	if ((icr & sc->sc_icr) == 0)
9936 		return 0;
9937 
9938 	DPRINTF(sc, WM_DEBUG_TX,
9939 	    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
9940 	if (rndval == 0)
9941 		rndval = icr;
9942 
9943 	mutex_enter(rxq->rxq_lock);
9944 
9945 	if (rxq->rxq_stopping) {
9946 		mutex_exit(rxq->rxq_lock);
9947 		return 1;
9948 	}
9949 
9950 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
9951 	if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
9952 		DPRINTF(sc, WM_DEBUG_RX,
9953 		    ("%s: RX: got Rx intr 0x%08x\n",
9954 			device_xname(sc->sc_dev),
9955 			icr & (uint32_t)(ICR_RXDMT0 | ICR_RXT0)));
9956 		WM_Q_EVCNT_INCR(rxq, intr);
9957 	}
9958 #endif
9959 	/*
9960 	 * wm_rxeof() does *not* call upper layer functions directly,
9961 	 * as if_percpuq_enqueue() just call softint_schedule().
9962 	 * So, we can call wm_rxeof() in interrupt context.
9963 	 */
9964 	more = wm_rxeof(rxq, rxlimit);
9965 
9966 	mutex_exit(rxq->rxq_lock);
9967 	mutex_enter(txq->txq_lock);
9968 
9969 	if (txq->txq_stopping) {
9970 		mutex_exit(txq->txq_lock);
9971 		return 1;
9972 	}
9973 
9974 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
9975 	if (icr & ICR_TXDW) {
9976 		DPRINTF(sc, WM_DEBUG_TX,
9977 		    ("%s: TX: got TXDW interrupt\n",
9978 			device_xname(sc->sc_dev)));
9979 		WM_Q_EVCNT_INCR(txq, txdw);
9980 	}
9981 #endif
9982 	more |= wm_txeof(txq, txlimit);
9983 	if (!IF_IS_EMPTY(&ifp->if_snd))
9984 		more = true;
9985 
9986 	mutex_exit(txq->txq_lock);
9987 	WM_CORE_LOCK(sc);
9988 
9989 	if (sc->sc_core_stopping) {
9990 		WM_CORE_UNLOCK(sc);
9991 		return 1;
9992 	}
9993 
9994 	if (icr & (ICR_LSC | ICR_RXSEQ)) {
9995 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
9996 		wm_linkintr(sc, icr);
9997 	}
9998 	if ((icr & ICR_GPI(0)) != 0)
9999 		device_printf(sc->sc_dev, "got module interrupt\n");
10000 
10001 	WM_CORE_UNLOCK(sc);
10002 
10003 	if (icr & ICR_RXO) {
10004 #if defined(WM_DEBUG)
10005 		log(LOG_WARNING, "%s: Receive overrun\n",
10006 		    device_xname(sc->sc_dev));
10007 #endif /* defined(WM_DEBUG) */
10008 	}
10009 
10010 	rnd_add_uint32(&sc->rnd_source, rndval);
10011 
10012 	if (more) {
10013 		/* Try to get more packets going. */
10014 		wm_legacy_intr_disable(sc);
10015 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10016 		wm_sched_handle_queue(sc, wmq);
10017 	}
10018 
10019 	return 1;
10020 }
10021 
10022 static inline void
10023 wm_txrxintr_disable(struct wm_queue *wmq)
10024 {
10025 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
10026 
10027 	if (__predict_false(!wm_is_using_msix(sc))) {
10028 		return wm_legacy_intr_disable(sc);
10029 	}
10030 
10031 	if (sc->sc_type == WM_T_82574)
10032 		CSR_WRITE(sc, WMREG_IMC,
10033 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
10034 	else if (sc->sc_type == WM_T_82575)
10035 		CSR_WRITE(sc, WMREG_EIMC,
10036 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
10037 	else
10038 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
10039 }
10040 
10041 static inline void
10042 wm_txrxintr_enable(struct wm_queue *wmq)
10043 {
10044 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
10045 
10046 	wm_itrs_calculate(sc, wmq);
10047 
10048 	if (__predict_false(!wm_is_using_msix(sc))) {
10049 		return wm_legacy_intr_enable(sc);
10050 	}
10051 
10052 	/*
10053 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
10054 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
10055 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
10056 	 * while each wm_handle_queue(wmq) is runnig.
10057 	 */
10058 	if (sc->sc_type == WM_T_82574)
10059 		CSR_WRITE(sc, WMREG_IMS,
10060 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
10061 	else if (sc->sc_type == WM_T_82575)
10062 		CSR_WRITE(sc, WMREG_EIMS,
10063 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
10064 	else
10065 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
10066 }
10067 
10068 static int
10069 wm_txrxintr_msix(void *arg)
10070 {
10071 	struct wm_queue *wmq = arg;
10072 	struct wm_txqueue *txq = &wmq->wmq_txq;
10073 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10074 	struct wm_softc *sc = txq->txq_sc;
10075 	u_int txlimit = sc->sc_tx_intr_process_limit;
10076 	u_int rxlimit = sc->sc_rx_intr_process_limit;
10077 	bool txmore;
10078 	bool rxmore;
10079 
10080 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
10081 
10082 	DPRINTF(sc, WM_DEBUG_TX,
10083 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
10084 
10085 	wm_txrxintr_disable(wmq);
10086 
10087 	mutex_enter(txq->txq_lock);
10088 
10089 	if (txq->txq_stopping) {
10090 		mutex_exit(txq->txq_lock);
10091 		return 1;
10092 	}
10093 
10094 	WM_Q_EVCNT_INCR(txq, txdw);
10095 	txmore = wm_txeof(txq, txlimit);
10096 	/* wm_deferred start() is done in wm_handle_queue(). */
10097 	mutex_exit(txq->txq_lock);
10098 
10099 	DPRINTF(sc, WM_DEBUG_RX,
10100 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
10101 	mutex_enter(rxq->rxq_lock);
10102 
10103 	if (rxq->rxq_stopping) {
10104 		mutex_exit(rxq->rxq_lock);
10105 		return 1;
10106 	}
10107 
10108 	WM_Q_EVCNT_INCR(rxq, intr);
10109 	rxmore = wm_rxeof(rxq, rxlimit);
10110 	mutex_exit(rxq->rxq_lock);
10111 
10112 	wm_itrs_writereg(sc, wmq);
10113 
10114 	if (txmore || rxmore) {
10115 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10116 		wm_sched_handle_queue(sc, wmq);
10117 	} else
10118 		wm_txrxintr_enable(wmq);
10119 
10120 	return 1;
10121 }
10122 
10123 static void
10124 wm_handle_queue(void *arg)
10125 {
10126 	struct wm_queue *wmq = arg;
10127 	struct wm_txqueue *txq = &wmq->wmq_txq;
10128 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
10129 	struct wm_softc *sc = txq->txq_sc;
10130 	u_int txlimit = sc->sc_tx_process_limit;
10131 	u_int rxlimit = sc->sc_rx_process_limit;
10132 	bool txmore;
10133 	bool rxmore;
10134 
10135 	mutex_enter(txq->txq_lock);
10136 	if (txq->txq_stopping) {
10137 		mutex_exit(txq->txq_lock);
10138 		return;
10139 	}
10140 	txmore = wm_txeof(txq, txlimit);
10141 	wm_deferred_start_locked(txq);
10142 	mutex_exit(txq->txq_lock);
10143 
10144 	mutex_enter(rxq->rxq_lock);
10145 	if (rxq->rxq_stopping) {
10146 		mutex_exit(rxq->rxq_lock);
10147 		return;
10148 	}
10149 	WM_Q_EVCNT_INCR(rxq, defer);
10150 	rxmore = wm_rxeof(rxq, rxlimit);
10151 	mutex_exit(rxq->rxq_lock);
10152 
10153 	if (txmore || rxmore) {
10154 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10155 		wm_sched_handle_queue(sc, wmq);
10156 	} else
10157 		wm_txrxintr_enable(wmq);
10158 }
10159 
10160 static void
10161 wm_handle_queue_work(struct work *wk, void *context)
10162 {
10163 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
10164 
10165 	/*
10166 	 * "enqueued flag" is not required here.
10167 	 */
10168 	wm_handle_queue(wmq);
10169 }
10170 
10171 /*
10172  * wm_linkintr_msix:
10173  *
10174  *	Interrupt service routine for link status change for MSI-X.
10175  */
10176 static int
10177 wm_linkintr_msix(void *arg)
10178 {
10179 	struct wm_softc *sc = arg;
10180 	uint32_t reg;
10181 	bool has_rxo;
10182 
10183 	reg = CSR_READ(sc, WMREG_ICR);
10184 	WM_CORE_LOCK(sc);
10185 	DPRINTF(sc, WM_DEBUG_LINK,
10186 	    ("%s: LINK: got link intr. ICR = %08x\n",
10187 		device_xname(sc->sc_dev), reg));
10188 
10189 	if (sc->sc_core_stopping)
10190 		goto out;
10191 
10192 	if ((reg & ICR_LSC) != 0) {
10193 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
10194 		wm_linkintr(sc, ICR_LSC);
10195 	}
10196 	if ((reg & ICR_GPI(0)) != 0)
10197 		device_printf(sc->sc_dev, "got module interrupt\n");
10198 
10199 	/*
10200 	 * XXX 82574 MSI-X mode workaround
10201 	 *
10202 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
10203 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
10204 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
10205 	 * interrupts by writing WMREG_ICS to process receive packets.
10206 	 */
10207 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
10208 #if defined(WM_DEBUG)
10209 		log(LOG_WARNING, "%s: Receive overrun\n",
10210 		    device_xname(sc->sc_dev));
10211 #endif /* defined(WM_DEBUG) */
10212 
10213 		has_rxo = true;
10214 		/*
10215 		 * The RXO interrupt is very high rate when receive traffic is
10216 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
10217 		 * interrupts. ICR_OTHER will be enabled at the end of
10218 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
10219 		 * ICR_RXQ(1) interrupts.
10220 		 */
10221 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
10222 
10223 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
10224 	}
10225 
10226 
10227 
10228 out:
10229 	WM_CORE_UNLOCK(sc);
10230 
10231 	if (sc->sc_type == WM_T_82574) {
10232 		if (!has_rxo)
10233 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
10234 		else
10235 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
10236 	} else if (sc->sc_type == WM_T_82575)
10237 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
10238 	else
10239 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
10240 
10241 	return 1;
10242 }
10243 
10244 /*
10245  * Media related.
10246  * GMII, SGMII, TBI (and SERDES)
10247  */
10248 
10249 /* Common */
10250 
10251 /*
10252  * wm_tbi_serdes_set_linkled:
10253  *
10254  *	Update the link LED on TBI and SERDES devices.
10255  */
10256 static void
10257 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
10258 {
10259 
10260 	if (sc->sc_tbi_linkup)
10261 		sc->sc_ctrl |= CTRL_SWDPIN(0);
10262 	else
10263 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
10264 
10265 	/* 82540 or newer devices are active low */
10266 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
10267 
10268 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10269 }
10270 
10271 /* GMII related */
10272 
10273 /*
10274  * wm_gmii_reset:
10275  *
10276  *	Reset the PHY.
10277  */
10278 static void
10279 wm_gmii_reset(struct wm_softc *sc)
10280 {
10281 	uint32_t reg;
10282 	int rv;
10283 
10284 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
10285 		device_xname(sc->sc_dev), __func__));
10286 
10287 	rv = sc->phy.acquire(sc);
10288 	if (rv != 0) {
10289 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10290 		    __func__);
10291 		return;
10292 	}
10293 
10294 	switch (sc->sc_type) {
10295 	case WM_T_82542_2_0:
10296 	case WM_T_82542_2_1:
10297 		/* null */
10298 		break;
10299 	case WM_T_82543:
10300 		/*
10301 		 * With 82543, we need to force speed and duplex on the MAC
10302 		 * equal to what the PHY speed and duplex configuration is.
10303 		 * In addition, we need to perform a hardware reset on the PHY
10304 		 * to take it out of reset.
10305 		 */
10306 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
10307 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10308 
10309 		/* The PHY reset pin is active-low. */
10310 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
10311 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
10312 		    CTRL_EXT_SWDPIN(4));
10313 		reg |= CTRL_EXT_SWDPIO(4);
10314 
10315 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10316 		CSR_WRITE_FLUSH(sc);
10317 		delay(10*1000);
10318 
10319 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
10320 		CSR_WRITE_FLUSH(sc);
10321 		delay(150);
10322 #if 0
10323 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
10324 #endif
10325 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
10326 		break;
10327 	case WM_T_82544:	/* Reset 10000us */
10328 	case WM_T_82540:
10329 	case WM_T_82545:
10330 	case WM_T_82545_3:
10331 	case WM_T_82546:
10332 	case WM_T_82546_3:
10333 	case WM_T_82541:
10334 	case WM_T_82541_2:
10335 	case WM_T_82547:
10336 	case WM_T_82547_2:
10337 	case WM_T_82571:	/* Reset 100us */
10338 	case WM_T_82572:
10339 	case WM_T_82573:
10340 	case WM_T_82574:
10341 	case WM_T_82575:
10342 	case WM_T_82576:
10343 	case WM_T_82580:
10344 	case WM_T_I350:
10345 	case WM_T_I354:
10346 	case WM_T_I210:
10347 	case WM_T_I211:
10348 	case WM_T_82583:
10349 	case WM_T_80003:
10350 		/* Generic reset */
10351 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10352 		CSR_WRITE_FLUSH(sc);
10353 		delay(20000);
10354 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10355 		CSR_WRITE_FLUSH(sc);
10356 		delay(20000);
10357 
10358 		if ((sc->sc_type == WM_T_82541)
10359 		    || (sc->sc_type == WM_T_82541_2)
10360 		    || (sc->sc_type == WM_T_82547)
10361 		    || (sc->sc_type == WM_T_82547_2)) {
10362 			/* Workaround for igp are done in igp_reset() */
10363 			/* XXX add code to set LED after phy reset */
10364 		}
10365 		break;
10366 	case WM_T_ICH8:
10367 	case WM_T_ICH9:
10368 	case WM_T_ICH10:
10369 	case WM_T_PCH:
10370 	case WM_T_PCH2:
10371 	case WM_T_PCH_LPT:
10372 	case WM_T_PCH_SPT:
10373 	case WM_T_PCH_CNP:
10374 		/* Generic reset */
10375 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10376 		CSR_WRITE_FLUSH(sc);
10377 		delay(100);
10378 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10379 		CSR_WRITE_FLUSH(sc);
10380 		delay(150);
10381 		break;
10382 	default:
10383 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
10384 		    __func__);
10385 		break;
10386 	}
10387 
10388 	sc->phy.release(sc);
10389 
10390 	/* get_cfg_done */
10391 	wm_get_cfg_done(sc);
10392 
10393 	/* Extra setup */
10394 	switch (sc->sc_type) {
10395 	case WM_T_82542_2_0:
10396 	case WM_T_82542_2_1:
10397 	case WM_T_82543:
10398 	case WM_T_82544:
10399 	case WM_T_82540:
10400 	case WM_T_82545:
10401 	case WM_T_82545_3:
10402 	case WM_T_82546:
10403 	case WM_T_82546_3:
10404 	case WM_T_82541_2:
10405 	case WM_T_82547_2:
10406 	case WM_T_82571:
10407 	case WM_T_82572:
10408 	case WM_T_82573:
10409 	case WM_T_82574:
10410 	case WM_T_82583:
10411 	case WM_T_82575:
10412 	case WM_T_82576:
10413 	case WM_T_82580:
10414 	case WM_T_I350:
10415 	case WM_T_I354:
10416 	case WM_T_I210:
10417 	case WM_T_I211:
10418 	case WM_T_80003:
10419 		/* Null */
10420 		break;
10421 	case WM_T_82541:
10422 	case WM_T_82547:
10423 		/* XXX Configure actively LED after PHY reset */
10424 		break;
10425 	case WM_T_ICH8:
10426 	case WM_T_ICH9:
10427 	case WM_T_ICH10:
10428 	case WM_T_PCH:
10429 	case WM_T_PCH2:
10430 	case WM_T_PCH_LPT:
10431 	case WM_T_PCH_SPT:
10432 	case WM_T_PCH_CNP:
10433 		wm_phy_post_reset(sc);
10434 		break;
10435 	default:
10436 		panic("%s: unknown type\n", __func__);
10437 		break;
10438 	}
10439 }
10440 
10441 /*
10442  * Setup sc_phytype and mii_{read|write}reg.
10443  *
10444  *  To identify PHY type, correct read/write function should be selected.
10445  * To select correct read/write function, PCI ID or MAC type are required
10446  * without accessing PHY registers.
10447  *
10448  *  On the first call of this function, PHY ID is not known yet. Check
10449  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
10450  * result might be incorrect.
10451  *
10452  *  In the second call, PHY OUI and model is used to identify PHY type.
10453  * It might not be perfect because of the lack of compared entry, but it
10454  * would be better than the first call.
10455  *
10456  *  If the detected new result and previous assumption is different,
10457  * diagnous message will be printed.
10458  */
10459 static void
10460 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
10461     uint16_t phy_model)
10462 {
10463 	device_t dev = sc->sc_dev;
10464 	struct mii_data *mii = &sc->sc_mii;
10465 	uint16_t new_phytype = WMPHY_UNKNOWN;
10466 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
10467 	mii_readreg_t new_readreg;
10468 	mii_writereg_t new_writereg;
10469 	bool dodiag = true;
10470 
10471 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
10472 		device_xname(sc->sc_dev), __func__));
10473 
10474 	/*
10475 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
10476 	 * incorrect. So don't print diag output when it's 2nd call.
10477 	 */
10478 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
10479 		dodiag = false;
10480 
10481 	if (mii->mii_readreg == NULL) {
10482 		/*
10483 		 *  This is the first call of this function. For ICH and PCH
10484 		 * variants, it's difficult to determine the PHY access method
10485 		 * by sc_type, so use the PCI product ID for some devices.
10486 		 */
10487 
10488 		switch (sc->sc_pcidevid) {
10489 		case PCI_PRODUCT_INTEL_PCH_M_LM:
10490 		case PCI_PRODUCT_INTEL_PCH_M_LC:
10491 			/* 82577 */
10492 			new_phytype = WMPHY_82577;
10493 			break;
10494 		case PCI_PRODUCT_INTEL_PCH_D_DM:
10495 		case PCI_PRODUCT_INTEL_PCH_D_DC:
10496 			/* 82578 */
10497 			new_phytype = WMPHY_82578;
10498 			break;
10499 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
10500 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
10501 			/* 82579 */
10502 			new_phytype = WMPHY_82579;
10503 			break;
10504 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
10505 		case PCI_PRODUCT_INTEL_82801I_BM:
10506 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
10507 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
10508 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
10509 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
10510 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
10511 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
10512 			/* ICH8, 9, 10 with 82567 */
10513 			new_phytype = WMPHY_BM;
10514 			break;
10515 		default:
10516 			break;
10517 		}
10518 	} else {
10519 		/* It's not the first call. Use PHY OUI and model */
10520 		switch (phy_oui) {
10521 		case MII_OUI_ATTANSIC: /* atphy(4) */
10522 			switch (phy_model) {
10523 			case MII_MODEL_ATTANSIC_AR8021:
10524 				new_phytype = WMPHY_82578;
10525 				break;
10526 			default:
10527 				break;
10528 			}
10529 			break;
10530 		case MII_OUI_xxMARVELL:
10531 			switch (phy_model) {
10532 			case MII_MODEL_xxMARVELL_I210:
10533 				new_phytype = WMPHY_I210;
10534 				break;
10535 			case MII_MODEL_xxMARVELL_E1011:
10536 			case MII_MODEL_xxMARVELL_E1000_3:
10537 			case MII_MODEL_xxMARVELL_E1000_5:
10538 			case MII_MODEL_xxMARVELL_E1112:
10539 				new_phytype = WMPHY_M88;
10540 				break;
10541 			case MII_MODEL_xxMARVELL_E1149:
10542 				new_phytype = WMPHY_BM;
10543 				break;
10544 			case MII_MODEL_xxMARVELL_E1111:
10545 			case MII_MODEL_xxMARVELL_I347:
10546 			case MII_MODEL_xxMARVELL_E1512:
10547 			case MII_MODEL_xxMARVELL_E1340M:
10548 			case MII_MODEL_xxMARVELL_E1543:
10549 				new_phytype = WMPHY_M88;
10550 				break;
10551 			case MII_MODEL_xxMARVELL_I82563:
10552 				new_phytype = WMPHY_GG82563;
10553 				break;
10554 			default:
10555 				break;
10556 			}
10557 			break;
10558 		case MII_OUI_INTEL:
10559 			switch (phy_model) {
10560 			case MII_MODEL_INTEL_I82577:
10561 				new_phytype = WMPHY_82577;
10562 				break;
10563 			case MII_MODEL_INTEL_I82579:
10564 				new_phytype = WMPHY_82579;
10565 				break;
10566 			case MII_MODEL_INTEL_I217:
10567 				new_phytype = WMPHY_I217;
10568 				break;
10569 			case MII_MODEL_INTEL_I82580:
10570 				new_phytype = WMPHY_82580;
10571 				break;
10572 			case MII_MODEL_INTEL_I350:
10573 				new_phytype = WMPHY_I350;
10574 				break;
10575 				break;
10576 			default:
10577 				break;
10578 			}
10579 			break;
10580 		case MII_OUI_yyINTEL:
10581 			switch (phy_model) {
10582 			case MII_MODEL_yyINTEL_I82562G:
10583 			case MII_MODEL_yyINTEL_I82562EM:
10584 			case MII_MODEL_yyINTEL_I82562ET:
10585 				new_phytype = WMPHY_IFE;
10586 				break;
10587 			case MII_MODEL_yyINTEL_IGP01E1000:
10588 				new_phytype = WMPHY_IGP;
10589 				break;
10590 			case MII_MODEL_yyINTEL_I82566:
10591 				new_phytype = WMPHY_IGP_3;
10592 				break;
10593 			default:
10594 				break;
10595 			}
10596 			break;
10597 		default:
10598 			break;
10599 		}
10600 
10601 		if (dodiag) {
10602 			if (new_phytype == WMPHY_UNKNOWN)
10603 				aprint_verbose_dev(dev,
10604 				    "%s: Unknown PHY model. OUI=%06x, "
10605 				    "model=%04x\n", __func__, phy_oui,
10606 				    phy_model);
10607 
10608 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
10609 			    && (sc->sc_phytype != new_phytype)) {
10610 				aprint_error_dev(dev, "Previously assumed PHY "
10611 				    "type(%u) was incorrect. PHY type from PHY"
10612 				    "ID = %u\n", sc->sc_phytype, new_phytype);
10613 			}
10614 		}
10615 	}
10616 
10617 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
10618 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
10619 		/* SGMII */
10620 		new_readreg = wm_sgmii_readreg;
10621 		new_writereg = wm_sgmii_writereg;
10622 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
10623 		/* BM2 (phyaddr == 1) */
10624 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
10625 		    && (new_phytype != WMPHY_BM)
10626 		    && (new_phytype != WMPHY_UNKNOWN))
10627 			doubt_phytype = new_phytype;
10628 		new_phytype = WMPHY_BM;
10629 		new_readreg = wm_gmii_bm_readreg;
10630 		new_writereg = wm_gmii_bm_writereg;
10631 	} else if (sc->sc_type >= WM_T_PCH) {
10632 		/* All PCH* use _hv_ */
10633 		new_readreg = wm_gmii_hv_readreg;
10634 		new_writereg = wm_gmii_hv_writereg;
10635 	} else if (sc->sc_type >= WM_T_ICH8) {
10636 		/* non-82567 ICH8, 9 and 10 */
10637 		new_readreg = wm_gmii_i82544_readreg;
10638 		new_writereg = wm_gmii_i82544_writereg;
10639 	} else if (sc->sc_type >= WM_T_80003) {
10640 		/* 80003 */
10641 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
10642 		    && (new_phytype != WMPHY_GG82563)
10643 		    && (new_phytype != WMPHY_UNKNOWN))
10644 			doubt_phytype = new_phytype;
10645 		new_phytype = WMPHY_GG82563;
10646 		new_readreg = wm_gmii_i80003_readreg;
10647 		new_writereg = wm_gmii_i80003_writereg;
10648 	} else if (sc->sc_type >= WM_T_I210) {
10649 		/* I210 and I211 */
10650 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
10651 		    && (new_phytype != WMPHY_I210)
10652 		    && (new_phytype != WMPHY_UNKNOWN))
10653 			doubt_phytype = new_phytype;
10654 		new_phytype = WMPHY_I210;
10655 		new_readreg = wm_gmii_gs40g_readreg;
10656 		new_writereg = wm_gmii_gs40g_writereg;
10657 	} else if (sc->sc_type >= WM_T_82580) {
10658 		/* 82580, I350 and I354 */
10659 		new_readreg = wm_gmii_82580_readreg;
10660 		new_writereg = wm_gmii_82580_writereg;
10661 	} else if (sc->sc_type >= WM_T_82544) {
10662 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
10663 		new_readreg = wm_gmii_i82544_readreg;
10664 		new_writereg = wm_gmii_i82544_writereg;
10665 	} else {
10666 		new_readreg = wm_gmii_i82543_readreg;
10667 		new_writereg = wm_gmii_i82543_writereg;
10668 	}
10669 
10670 	if (new_phytype == WMPHY_BM) {
10671 		/* All BM use _bm_ */
10672 		new_readreg = wm_gmii_bm_readreg;
10673 		new_writereg = wm_gmii_bm_writereg;
10674 	}
10675 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
10676 		/* All PCH* use _hv_ */
10677 		new_readreg = wm_gmii_hv_readreg;
10678 		new_writereg = wm_gmii_hv_writereg;
10679 	}
10680 
10681 	/* Diag output */
10682 	if (dodiag) {
10683 		if (doubt_phytype != WMPHY_UNKNOWN)
10684 			aprint_error_dev(dev, "Assumed new PHY type was "
10685 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
10686 			    new_phytype);
10687 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
10688 		    && (sc->sc_phytype != new_phytype))
10689 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
10690 			    "was incorrect. New PHY type = %u\n",
10691 			    sc->sc_phytype, new_phytype);
10692 
10693 		if ((mii->mii_readreg != NULL) &&
10694 		    (new_phytype == WMPHY_UNKNOWN))
10695 			aprint_error_dev(dev, "PHY type is still unknown.\n");
10696 
10697 		if ((mii->mii_readreg != NULL) &&
10698 		    (mii->mii_readreg != new_readreg))
10699 			aprint_error_dev(dev, "Previously assumed PHY "
10700 			    "read/write function was incorrect.\n");
10701 	}
10702 
10703 	/* Update now */
10704 	sc->sc_phytype = new_phytype;
10705 	mii->mii_readreg = new_readreg;
10706 	mii->mii_writereg = new_writereg;
10707 	if (new_readreg == wm_gmii_hv_readreg) {
10708 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
10709 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
10710 	} else if (new_readreg == wm_sgmii_readreg) {
10711 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
10712 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
10713 	} else if (new_readreg == wm_gmii_i82544_readreg) {
10714 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
10715 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
10716 	}
10717 }
10718 
10719 /*
10720  * wm_get_phy_id_82575:
10721  *
10722  * Return PHY ID. Return -1 if it failed.
10723  */
10724 static int
10725 wm_get_phy_id_82575(struct wm_softc *sc)
10726 {
10727 	uint32_t reg;
10728 	int phyid = -1;
10729 
10730 	/* XXX */
10731 	if ((sc->sc_flags & WM_F_SGMII) == 0)
10732 		return -1;
10733 
10734 	if (wm_sgmii_uses_mdio(sc)) {
10735 		switch (sc->sc_type) {
10736 		case WM_T_82575:
10737 		case WM_T_82576:
10738 			reg = CSR_READ(sc, WMREG_MDIC);
10739 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
10740 			break;
10741 		case WM_T_82580:
10742 		case WM_T_I350:
10743 		case WM_T_I354:
10744 		case WM_T_I210:
10745 		case WM_T_I211:
10746 			reg = CSR_READ(sc, WMREG_MDICNFG);
10747 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
10748 			break;
10749 		default:
10750 			return -1;
10751 		}
10752 	}
10753 
10754 	return phyid;
10755 }
10756 
10757 /*
10758  * wm_gmii_mediainit:
10759  *
10760  *	Initialize media for use on 1000BASE-T devices.
10761  */
10762 static void
10763 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
10764 {
10765 	device_t dev = sc->sc_dev;
10766 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10767 	struct mii_data *mii = &sc->sc_mii;
10768 
10769 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
10770 		device_xname(sc->sc_dev), __func__));
10771 
10772 	/* We have GMII. */
10773 	sc->sc_flags |= WM_F_HAS_MII;
10774 
10775 	if (sc->sc_type == WM_T_80003)
10776 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
10777 	else
10778 		sc->sc_tipg = TIPG_1000T_DFLT;
10779 
10780 	/*
10781 	 * Let the chip set speed/duplex on its own based on
10782 	 * signals from the PHY.
10783 	 * XXXbouyer - I'm not sure this is right for the 80003,
10784 	 * the em driver only sets CTRL_SLU here - but it seems to work.
10785 	 */
10786 	sc->sc_ctrl |= CTRL_SLU;
10787 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10788 
10789 	/* Initialize our media structures and probe the GMII. */
10790 	mii->mii_ifp = ifp;
10791 
10792 	mii->mii_statchg = wm_gmii_statchg;
10793 
10794 	/* get PHY control from SMBus to PCIe */
10795 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
10796 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
10797 	    || (sc->sc_type == WM_T_PCH_CNP))
10798 		wm_init_phy_workarounds_pchlan(sc);
10799 
10800 	wm_gmii_reset(sc);
10801 
10802 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
10803 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
10804 	    wm_gmii_mediastatus, sc->sc_core_lock);
10805 
10806 	/* Setup internal SGMII PHY for SFP */
10807 	wm_sgmii_sfp_preconfig(sc);
10808 
10809 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
10810 	    || (sc->sc_type == WM_T_82580)
10811 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
10812 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
10813 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
10814 			/* Attach only one port */
10815 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
10816 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
10817 		} else {
10818 			int i, id;
10819 			uint32_t ctrl_ext;
10820 
10821 			id = wm_get_phy_id_82575(sc);
10822 			if (id != -1) {
10823 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
10824 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
10825 			}
10826 			if ((id == -1)
10827 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
10828 				/* Power on sgmii phy if it is disabled */
10829 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10830 				CSR_WRITE(sc, WMREG_CTRL_EXT,
10831 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
10832 				CSR_WRITE_FLUSH(sc);
10833 				delay(300*1000); /* XXX too long */
10834 
10835 				/*
10836 				 * From 1 to 8.
10837 				 *
10838 				 * I2C access fails with I2C register's ERROR
10839 				 * bit set, so prevent error message while
10840 				 * scanning.
10841 				 */
10842 				sc->phy.no_errprint = true;
10843 				for (i = 1; i < 8; i++)
10844 					mii_attach(sc->sc_dev, &sc->sc_mii,
10845 					    0xffffffff, i, MII_OFFSET_ANY,
10846 					    MIIF_DOPAUSE);
10847 				sc->phy.no_errprint = false;
10848 
10849 				/* Restore previous sfp cage power state */
10850 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
10851 			}
10852 		}
10853 	} else
10854 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10855 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
10856 
10857 	/*
10858 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
10859 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
10860 	 */
10861 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
10862 		|| (sc->sc_type == WM_T_PCH_SPT)
10863 		|| (sc->sc_type == WM_T_PCH_CNP))
10864 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
10865 		wm_set_mdio_slow_mode_hv(sc);
10866 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10867 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
10868 	}
10869 
10870 	/*
10871 	 * (For ICH8 variants)
10872 	 * If PHY detection failed, use BM's r/w function and retry.
10873 	 */
10874 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
10875 		/* if failed, retry with *_bm_* */
10876 		aprint_verbose_dev(dev, "Assumed PHY access function "
10877 		    "(type = %d) might be incorrect. Use BM and retry.\n",
10878 		    sc->sc_phytype);
10879 		sc->sc_phytype = WMPHY_BM;
10880 		mii->mii_readreg = wm_gmii_bm_readreg;
10881 		mii->mii_writereg = wm_gmii_bm_writereg;
10882 
10883 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10884 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
10885 	}
10886 
10887 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
10888 		/* Any PHY wasn't find */
10889 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
10890 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
10891 		sc->sc_phytype = WMPHY_NONE;
10892 	} else {
10893 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
10894 
10895 		/*
10896 		 * PHY Found! Check PHY type again by the second call of
10897 		 * wm_gmii_setup_phytype.
10898 		 */
10899 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
10900 		    child->mii_mpd_model);
10901 
10902 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
10903 	}
10904 }
10905 
10906 /*
10907  * wm_gmii_mediachange:	[ifmedia interface function]
10908  *
10909  *	Set hardware to newly-selected media on a 1000BASE-T device.
10910  */
10911 static int
10912 wm_gmii_mediachange(struct ifnet *ifp)
10913 {
10914 	struct wm_softc *sc = ifp->if_softc;
10915 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
10916 	uint32_t reg;
10917 	int rc;
10918 
10919 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
10920 		device_xname(sc->sc_dev), __func__));
10921 	if ((ifp->if_flags & IFF_UP) == 0)
10922 		return 0;
10923 
10924 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
10925 	if ((sc->sc_type == WM_T_82580)
10926 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
10927 	    || (sc->sc_type == WM_T_I211)) {
10928 		reg = CSR_READ(sc, WMREG_PHPM);
10929 		reg &= ~PHPM_GO_LINK_D;
10930 		CSR_WRITE(sc, WMREG_PHPM, reg);
10931 	}
10932 
10933 	/* Disable D0 LPLU. */
10934 	wm_lplu_d0_disable(sc);
10935 
10936 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
10937 	sc->sc_ctrl |= CTRL_SLU;
10938 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
10939 	    || (sc->sc_type > WM_T_82543)) {
10940 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
10941 	} else {
10942 		sc->sc_ctrl &= ~CTRL_ASDE;
10943 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
10944 		if (ife->ifm_media & IFM_FDX)
10945 			sc->sc_ctrl |= CTRL_FD;
10946 		switch (IFM_SUBTYPE(ife->ifm_media)) {
10947 		case IFM_10_T:
10948 			sc->sc_ctrl |= CTRL_SPEED_10;
10949 			break;
10950 		case IFM_100_TX:
10951 			sc->sc_ctrl |= CTRL_SPEED_100;
10952 			break;
10953 		case IFM_1000_T:
10954 			sc->sc_ctrl |= CTRL_SPEED_1000;
10955 			break;
10956 		case IFM_NONE:
10957 			/* There is no specific setting for IFM_NONE */
10958 			break;
10959 		default:
10960 			panic("wm_gmii_mediachange: bad media 0x%x",
10961 			    ife->ifm_media);
10962 		}
10963 	}
10964 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10965 	CSR_WRITE_FLUSH(sc);
10966 
10967 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
10968 		wm_serdes_mediachange(ifp);
10969 
10970 	if (sc->sc_type <= WM_T_82543)
10971 		wm_gmii_reset(sc);
10972 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
10973 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
10974 		/* allow time for SFP cage time to power up phy */
10975 		delay(300 * 1000);
10976 		wm_gmii_reset(sc);
10977 	}
10978 
10979 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
10980 		return 0;
10981 	return rc;
10982 }
10983 
10984 /*
10985  * wm_gmii_mediastatus:	[ifmedia interface function]
10986  *
10987  *	Get the current interface media status on a 1000BASE-T device.
10988  */
10989 static void
10990 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
10991 {
10992 	struct wm_softc *sc = ifp->if_softc;
10993 
10994 	ether_mediastatus(ifp, ifmr);
10995 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
10996 	    | sc->sc_flowflags;
10997 }
10998 
10999 #define	MDI_IO		CTRL_SWDPIN(2)
11000 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
11001 #define	MDI_CLK		CTRL_SWDPIN(3)
11002 
11003 static void
11004 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
11005 {
11006 	uint32_t i, v;
11007 
11008 	v = CSR_READ(sc, WMREG_CTRL);
11009 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
11010 	v |= MDI_DIR | CTRL_SWDPIO(3);
11011 
11012 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
11013 		if (data & i)
11014 			v |= MDI_IO;
11015 		else
11016 			v &= ~MDI_IO;
11017 		CSR_WRITE(sc, WMREG_CTRL, v);
11018 		CSR_WRITE_FLUSH(sc);
11019 		delay(10);
11020 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11021 		CSR_WRITE_FLUSH(sc);
11022 		delay(10);
11023 		CSR_WRITE(sc, WMREG_CTRL, v);
11024 		CSR_WRITE_FLUSH(sc);
11025 		delay(10);
11026 	}
11027 }
11028 
11029 static uint16_t
11030 wm_i82543_mii_recvbits(struct wm_softc *sc)
11031 {
11032 	uint32_t v, i;
11033 	uint16_t data = 0;
11034 
11035 	v = CSR_READ(sc, WMREG_CTRL);
11036 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
11037 	v |= CTRL_SWDPIO(3);
11038 
11039 	CSR_WRITE(sc, WMREG_CTRL, v);
11040 	CSR_WRITE_FLUSH(sc);
11041 	delay(10);
11042 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11043 	CSR_WRITE_FLUSH(sc);
11044 	delay(10);
11045 	CSR_WRITE(sc, WMREG_CTRL, v);
11046 	CSR_WRITE_FLUSH(sc);
11047 	delay(10);
11048 
11049 	for (i = 0; i < 16; i++) {
11050 		data <<= 1;
11051 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11052 		CSR_WRITE_FLUSH(sc);
11053 		delay(10);
11054 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
11055 			data |= 1;
11056 		CSR_WRITE(sc, WMREG_CTRL, v);
11057 		CSR_WRITE_FLUSH(sc);
11058 		delay(10);
11059 	}
11060 
11061 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
11062 	CSR_WRITE_FLUSH(sc);
11063 	delay(10);
11064 	CSR_WRITE(sc, WMREG_CTRL, v);
11065 	CSR_WRITE_FLUSH(sc);
11066 	delay(10);
11067 
11068 	return data;
11069 }
11070 
11071 #undef MDI_IO
11072 #undef MDI_DIR
11073 #undef MDI_CLK
11074 
11075 /*
11076  * wm_gmii_i82543_readreg:	[mii interface function]
11077  *
11078  *	Read a PHY register on the GMII (i82543 version).
11079  */
11080 static int
11081 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
11082 {
11083 	struct wm_softc *sc = device_private(dev);
11084 
11085 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
11086 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
11087 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
11088 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
11089 
11090 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
11091 		device_xname(dev), phy, reg, *val));
11092 
11093 	return 0;
11094 }
11095 
11096 /*
11097  * wm_gmii_i82543_writereg:	[mii interface function]
11098  *
11099  *	Write a PHY register on the GMII (i82543 version).
11100  */
11101 static int
11102 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
11103 {
11104 	struct wm_softc *sc = device_private(dev);
11105 
11106 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
11107 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
11108 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
11109 	    (MII_COMMAND_START << 30), 32);
11110 
11111 	return 0;
11112 }
11113 
11114 /*
11115  * wm_gmii_mdic_readreg:	[mii interface function]
11116  *
11117  *	Read a PHY register on the GMII.
11118  */
11119 static int
11120 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
11121 {
11122 	struct wm_softc *sc = device_private(dev);
11123 	uint32_t mdic = 0;
11124 	int i;
11125 
11126 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
11127 	    && (reg > MII_ADDRMASK)) {
11128 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11129 		    __func__, sc->sc_phytype, reg);
11130 		reg &= MII_ADDRMASK;
11131 	}
11132 
11133 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
11134 	    MDIC_REGADD(reg));
11135 
11136 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
11137 		delay(50);
11138 		mdic = CSR_READ(sc, WMREG_MDIC);
11139 		if (mdic & MDIC_READY)
11140 			break;
11141 	}
11142 
11143 	if ((mdic & MDIC_READY) == 0) {
11144 		DPRINTF(sc, WM_DEBUG_GMII,
11145 		    ("%s: MDIC read timed out: phy %d reg %d\n",
11146 			device_xname(dev), phy, reg));
11147 		return ETIMEDOUT;
11148 	} else if (mdic & MDIC_E) {
11149 		/* This is normal if no PHY is present. */
11150 		DPRINTF(sc, WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
11151 			device_xname(sc->sc_dev), phy, reg));
11152 		return -1;
11153 	} else
11154 		*val = MDIC_DATA(mdic);
11155 
11156 	/*
11157 	 * Allow some time after each MDIC transaction to avoid
11158 	 * reading duplicate data in the next MDIC transaction.
11159 	 */
11160 	if (sc->sc_type == WM_T_PCH2)
11161 		delay(100);
11162 
11163 	return 0;
11164 }
11165 
11166 /*
11167  * wm_gmii_mdic_writereg:	[mii interface function]
11168  *
11169  *	Write a PHY register on the GMII.
11170  */
11171 static int
11172 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
11173 {
11174 	struct wm_softc *sc = device_private(dev);
11175 	uint32_t mdic = 0;
11176 	int i;
11177 
11178 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
11179 	    && (reg > MII_ADDRMASK)) {
11180 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11181 		    __func__, sc->sc_phytype, reg);
11182 		reg &= MII_ADDRMASK;
11183 	}
11184 
11185 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
11186 	    MDIC_REGADD(reg) | MDIC_DATA(val));
11187 
11188 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
11189 		delay(50);
11190 		mdic = CSR_READ(sc, WMREG_MDIC);
11191 		if (mdic & MDIC_READY)
11192 			break;
11193 	}
11194 
11195 	if ((mdic & MDIC_READY) == 0) {
11196 		DPRINTF(sc, WM_DEBUG_GMII,
11197 		    ("%s: MDIC write timed out: phy %d reg %d\n",
11198 			device_xname(dev), phy, reg));
11199 		return ETIMEDOUT;
11200 	} else if (mdic & MDIC_E) {
11201 		DPRINTF(sc, WM_DEBUG_GMII,
11202 		    ("%s: MDIC write error: phy %d reg %d\n",
11203 			device_xname(dev), phy, reg));
11204 		return -1;
11205 	}
11206 
11207 	/*
11208 	 * Allow some time after each MDIC transaction to avoid
11209 	 * reading duplicate data in the next MDIC transaction.
11210 	 */
11211 	if (sc->sc_type == WM_T_PCH2)
11212 		delay(100);
11213 
11214 	return 0;
11215 }
11216 
11217 /*
11218  * wm_gmii_i82544_readreg:	[mii interface function]
11219  *
11220  *	Read a PHY register on the GMII.
11221  */
11222 static int
11223 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
11224 {
11225 	struct wm_softc *sc = device_private(dev);
11226 	int rv;
11227 
11228 	if (sc->phy.acquire(sc)) {
11229 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11230 		return -1;
11231 	}
11232 
11233 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
11234 
11235 	sc->phy.release(sc);
11236 
11237 	return rv;
11238 }
11239 
11240 static int
11241 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
11242 {
11243 	struct wm_softc *sc = device_private(dev);
11244 	int rv;
11245 
11246 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11247 		switch (sc->sc_phytype) {
11248 		case WMPHY_IGP:
11249 		case WMPHY_IGP_2:
11250 		case WMPHY_IGP_3:
11251 			rv = wm_gmii_mdic_writereg(dev, phy,
11252 			    IGPHY_PAGE_SELECT, reg);
11253 			if (rv != 0)
11254 				return rv;
11255 			break;
11256 		default:
11257 #ifdef WM_DEBUG
11258 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
11259 			    __func__, sc->sc_phytype, reg);
11260 #endif
11261 			break;
11262 		}
11263 	}
11264 
11265 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11266 }
11267 
11268 /*
11269  * wm_gmii_i82544_writereg:	[mii interface function]
11270  *
11271  *	Write a PHY register on the GMII.
11272  */
11273 static int
11274 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
11275 {
11276 	struct wm_softc *sc = device_private(dev);
11277 	int rv;
11278 
11279 	if (sc->phy.acquire(sc)) {
11280 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11281 		return -1;
11282 	}
11283 
11284 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
11285 	sc->phy.release(sc);
11286 
11287 	return rv;
11288 }
11289 
11290 static int
11291 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
11292 {
11293 	struct wm_softc *sc = device_private(dev);
11294 	int rv;
11295 
11296 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11297 		switch (sc->sc_phytype) {
11298 		case WMPHY_IGP:
11299 		case WMPHY_IGP_2:
11300 		case WMPHY_IGP_3:
11301 			rv = wm_gmii_mdic_writereg(dev, phy,
11302 			    IGPHY_PAGE_SELECT, reg);
11303 			if (rv != 0)
11304 				return rv;
11305 			break;
11306 		default:
11307 #ifdef WM_DEBUG
11308 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
11309 			    __func__, sc->sc_phytype, reg);
11310 #endif
11311 			break;
11312 		}
11313 	}
11314 
11315 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11316 }
11317 
11318 /*
11319  * wm_gmii_i80003_readreg:	[mii interface function]
11320  *
11321  *	Read a PHY register on the kumeran
11322  * This could be handled by the PHY layer if we didn't have to lock the
11323  * resource ...
11324  */
11325 static int
11326 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
11327 {
11328 	struct wm_softc *sc = device_private(dev);
11329 	int page_select;
11330 	uint16_t temp, temp2;
11331 	int rv = 0;
11332 
11333 	if (phy != 1) /* Only one PHY on kumeran bus */
11334 		return -1;
11335 
11336 	if (sc->phy.acquire(sc)) {
11337 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11338 		return -1;
11339 	}
11340 
11341 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
11342 		page_select = GG82563_PHY_PAGE_SELECT;
11343 	else {
11344 		/*
11345 		 * Use Alternative Page Select register to access registers
11346 		 * 30 and 31.
11347 		 */
11348 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
11349 	}
11350 	temp = reg >> GG82563_PAGE_SHIFT;
11351 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
11352 		goto out;
11353 
11354 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
11355 		/*
11356 		 * Wait more 200us for a bug of the ready bit in the MDIC
11357 		 * register.
11358 		 */
11359 		delay(200);
11360 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
11361 		if ((rv != 0) || (temp2 != temp)) {
11362 			device_printf(dev, "%s failed\n", __func__);
11363 			rv = -1;
11364 			goto out;
11365 		}
11366 		delay(200);
11367 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11368 		delay(200);
11369 	} else
11370 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11371 
11372 out:
11373 	sc->phy.release(sc);
11374 	return rv;
11375 }
11376 
11377 /*
11378  * wm_gmii_i80003_writereg:	[mii interface function]
11379  *
11380  *	Write a PHY register on the kumeran.
11381  * This could be handled by the PHY layer if we didn't have to lock the
11382  * resource ...
11383  */
11384 static int
11385 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
11386 {
11387 	struct wm_softc *sc = device_private(dev);
11388 	int page_select, rv;
11389 	uint16_t temp, temp2;
11390 
11391 	if (phy != 1) /* Only one PHY on kumeran bus */
11392 		return -1;
11393 
11394 	if (sc->phy.acquire(sc)) {
11395 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11396 		return -1;
11397 	}
11398 
11399 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
11400 		page_select = GG82563_PHY_PAGE_SELECT;
11401 	else {
11402 		/*
11403 		 * Use Alternative Page Select register to access registers
11404 		 * 30 and 31.
11405 		 */
11406 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
11407 	}
11408 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
11409 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
11410 		goto out;
11411 
11412 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
11413 		/*
11414 		 * Wait more 200us for a bug of the ready bit in the MDIC
11415 		 * register.
11416 		 */
11417 		delay(200);
11418 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
11419 		if ((rv != 0) || (temp2 != temp)) {
11420 			device_printf(dev, "%s failed\n", __func__);
11421 			rv = -1;
11422 			goto out;
11423 		}
11424 		delay(200);
11425 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11426 		delay(200);
11427 	} else
11428 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11429 
11430 out:
11431 	sc->phy.release(sc);
11432 	return rv;
11433 }
11434 
11435 /*
11436  * wm_gmii_bm_readreg:	[mii interface function]
11437  *
11438  *	Read a PHY register on the kumeran
11439  * This could be handled by the PHY layer if we didn't have to lock the
11440  * resource ...
11441  */
11442 static int
11443 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
11444 {
11445 	struct wm_softc *sc = device_private(dev);
11446 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
11447 	int rv;
11448 
11449 	if (sc->phy.acquire(sc)) {
11450 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11451 		return -1;
11452 	}
11453 
11454 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11455 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
11456 		    || (reg == 31)) ? 1 : phy;
11457 	/* Page 800 works differently than the rest so it has its own func */
11458 	if (page == BM_WUC_PAGE) {
11459 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
11460 		goto release;
11461 	}
11462 
11463 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11464 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
11465 		    && (sc->sc_type != WM_T_82583))
11466 			rv = wm_gmii_mdic_writereg(dev, phy,
11467 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11468 		else
11469 			rv = wm_gmii_mdic_writereg(dev, phy,
11470 			    BME1000_PHY_PAGE_SELECT, page);
11471 		if (rv != 0)
11472 			goto release;
11473 	}
11474 
11475 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11476 
11477 release:
11478 	sc->phy.release(sc);
11479 	return rv;
11480 }
11481 
11482 /*
11483  * wm_gmii_bm_writereg:	[mii interface function]
11484  *
11485  *	Write a PHY register on the kumeran.
11486  * This could be handled by the PHY layer if we didn't have to lock the
11487  * resource ...
11488  */
11489 static int
11490 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
11491 {
11492 	struct wm_softc *sc = device_private(dev);
11493 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
11494 	int rv;
11495 
11496 	if (sc->phy.acquire(sc)) {
11497 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11498 		return -1;
11499 	}
11500 
11501 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11502 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
11503 		    || (reg == 31)) ? 1 : phy;
11504 	/* Page 800 works differently than the rest so it has its own func */
11505 	if (page == BM_WUC_PAGE) {
11506 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
11507 		goto release;
11508 	}
11509 
11510 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11511 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
11512 		    && (sc->sc_type != WM_T_82583))
11513 			rv = wm_gmii_mdic_writereg(dev, phy,
11514 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11515 		else
11516 			rv = wm_gmii_mdic_writereg(dev, phy,
11517 			    BME1000_PHY_PAGE_SELECT, page);
11518 		if (rv != 0)
11519 			goto release;
11520 	}
11521 
11522 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11523 
11524 release:
11525 	sc->phy.release(sc);
11526 	return rv;
11527 }
11528 
11529 /*
11530  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
11531  *  @dev: pointer to the HW structure
11532  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
11533  *
11534  *  Assumes semaphore already acquired and phy_reg points to a valid memory
11535  *  address to store contents of the BM_WUC_ENABLE_REG register.
11536  */
11537 static int
11538 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
11539 {
11540 #ifdef WM_DEBUG
11541 	struct wm_softc *sc = device_private(dev);
11542 #endif
11543 	uint16_t temp;
11544 	int rv;
11545 
11546 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
11547 		device_xname(dev), __func__));
11548 
11549 	if (!phy_regp)
11550 		return -1;
11551 
11552 	/* All page select, port ctrl and wakeup registers use phy address 1 */
11553 
11554 	/* Select Port Control Registers page */
11555 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
11556 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
11557 	if (rv != 0)
11558 		return rv;
11559 
11560 	/* Read WUCE and save it */
11561 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
11562 	if (rv != 0)
11563 		return rv;
11564 
11565 	/* Enable both PHY wakeup mode and Wakeup register page writes.
11566 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
11567 	 */
11568 	temp = *phy_regp;
11569 	temp |= BM_WUC_ENABLE_BIT;
11570 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
11571 
11572 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
11573 		return rv;
11574 
11575 	/* Select Host Wakeup Registers page - caller now able to write
11576 	 * registers on the Wakeup registers page
11577 	 */
11578 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
11579 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
11580 }
11581 
11582 /*
11583  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
11584  *  @dev: pointer to the HW structure
11585  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
11586  *
11587  *  Restore BM_WUC_ENABLE_REG to its original value.
11588  *
11589  *  Assumes semaphore already acquired and *phy_reg is the contents of the
11590  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
11591  *  caller.
11592  */
11593 static int
11594 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
11595 {
11596 #ifdef WM_DEBUG
11597 	struct wm_softc *sc = device_private(dev);
11598 #endif
11599 
11600 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
11601 		device_xname(dev), __func__));
11602 
11603 	if (!phy_regp)
11604 		return -1;
11605 
11606 	/* Select Port Control Registers page */
11607 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
11608 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
11609 
11610 	/* Restore 769.17 to its original value */
11611 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
11612 
11613 	return 0;
11614 }
11615 
11616 /*
11617  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
11618  *  @sc: pointer to the HW structure
11619  *  @offset: register offset to be read or written
11620  *  @val: pointer to the data to read or write
11621  *  @rd: determines if operation is read or write
11622  *  @page_set: BM_WUC_PAGE already set and access enabled
11623  *
11624  *  Read the PHY register at offset and store the retrieved information in
11625  *  data, or write data to PHY register at offset.  Note the procedure to
11626  *  access the PHY wakeup registers is different than reading the other PHY
11627  *  registers. It works as such:
11628  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
11629  *  2) Set page to 800 for host (801 if we were manageability)
11630  *  3) Write the address using the address opcode (0x11)
11631  *  4) Read or write the data using the data opcode (0x12)
11632  *  5) Restore 769.17.2 to its original value
11633  *
11634  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
11635  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
11636  *
11637  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
11638  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
11639  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
11640  */
11641 static int
11642 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
11643 	bool page_set)
11644 {
11645 	struct wm_softc *sc = device_private(dev);
11646 	uint16_t regnum = BM_PHY_REG_NUM(offset);
11647 	uint16_t page = BM_PHY_REG_PAGE(offset);
11648 	uint16_t wuce;
11649 	int rv = 0;
11650 
11651 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11652 		device_xname(dev), __func__));
11653 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
11654 	if ((sc->sc_type == WM_T_PCH)
11655 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
11656 		device_printf(dev,
11657 		    "Attempting to access page %d while gig enabled.\n", page);
11658 	}
11659 
11660 	if (!page_set) {
11661 		/* Enable access to PHY wakeup registers */
11662 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
11663 		if (rv != 0) {
11664 			device_printf(dev,
11665 			    "%s: Could not enable PHY wakeup reg access\n",
11666 			    __func__);
11667 			return rv;
11668 		}
11669 	}
11670 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
11671 		device_xname(sc->sc_dev), __func__, page, regnum));
11672 
11673 	/*
11674 	 * 2) Access PHY wakeup register.
11675 	 * See wm_access_phy_wakeup_reg_bm.
11676 	 */
11677 
11678 	/* Write the Wakeup register page offset value using opcode 0x11 */
11679 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
11680 	if (rv != 0)
11681 		return rv;
11682 
11683 	if (rd) {
11684 		/* Read the Wakeup register page value using opcode 0x12 */
11685 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
11686 	} else {
11687 		/* Write the Wakeup register page value using opcode 0x12 */
11688 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
11689 	}
11690 	if (rv != 0)
11691 		return rv;
11692 
11693 	if (!page_set)
11694 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
11695 
11696 	return rv;
11697 }
11698 
11699 /*
11700  * wm_gmii_hv_readreg:	[mii interface function]
11701  *
11702  *	Read a PHY register on the kumeran
11703  * This could be handled by the PHY layer if we didn't have to lock the
11704  * resource ...
11705  */
11706 static int
11707 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
11708 {
11709 	struct wm_softc *sc = device_private(dev);
11710 	int rv;
11711 
11712 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11713 		device_xname(dev), __func__));
11714 	if (sc->phy.acquire(sc)) {
11715 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11716 		return -1;
11717 	}
11718 
11719 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
11720 	sc->phy.release(sc);
11721 	return rv;
11722 }
11723 
11724 static int
11725 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
11726 {
11727 	uint16_t page = BM_PHY_REG_PAGE(reg);
11728 	uint16_t regnum = BM_PHY_REG_NUM(reg);
11729 	int rv;
11730 
11731 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
11732 
11733 	/* Page 800 works differently than the rest so it has its own func */
11734 	if (page == BM_WUC_PAGE)
11735 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
11736 
11737 	/*
11738 	 * Lower than page 768 works differently than the rest so it has its
11739 	 * own func
11740 	 */
11741 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
11742 		device_printf(dev, "gmii_hv_readreg!!!\n");
11743 		return -1;
11744 	}
11745 
11746 	/*
11747 	 * XXX I21[789] documents say that the SMBus Address register is at
11748 	 * PHY address 01, Page 0 (not 768), Register 26.
11749 	 */
11750 	if (page == HV_INTC_FC_PAGE_START)
11751 		page = 0;
11752 
11753 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
11754 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
11755 		    page << BME1000_PAGE_SHIFT);
11756 		if (rv != 0)
11757 			return rv;
11758 	}
11759 
11760 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
11761 }
11762 
11763 /*
11764  * wm_gmii_hv_writereg:	[mii interface function]
11765  *
11766  *	Write a PHY register on the kumeran.
11767  * This could be handled by the PHY layer if we didn't have to lock the
11768  * resource ...
11769  */
11770 static int
11771 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
11772 {
11773 	struct wm_softc *sc = device_private(dev);
11774 	int rv;
11775 
11776 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11777 		device_xname(dev), __func__));
11778 
11779 	if (sc->phy.acquire(sc)) {
11780 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11781 		return -1;
11782 	}
11783 
11784 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
11785 	sc->phy.release(sc);
11786 
11787 	return rv;
11788 }
11789 
11790 static int
11791 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
11792 {
11793 	struct wm_softc *sc = device_private(dev);
11794 	uint16_t page = BM_PHY_REG_PAGE(reg);
11795 	uint16_t regnum = BM_PHY_REG_NUM(reg);
11796 	int rv;
11797 
11798 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
11799 
11800 	/* Page 800 works differently than the rest so it has its own func */
11801 	if (page == BM_WUC_PAGE)
11802 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
11803 		    false);
11804 
11805 	/*
11806 	 * Lower than page 768 works differently than the rest so it has its
11807 	 * own func
11808 	 */
11809 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
11810 		device_printf(dev, "gmii_hv_writereg!!!\n");
11811 		return -1;
11812 	}
11813 
11814 	{
11815 		/*
11816 		 * XXX I21[789] documents say that the SMBus Address register
11817 		 * is at PHY address 01, Page 0 (not 768), Register 26.
11818 		 */
11819 		if (page == HV_INTC_FC_PAGE_START)
11820 			page = 0;
11821 
11822 		/*
11823 		 * XXX Workaround MDIO accesses being disabled after entering
11824 		 * IEEE Power Down (whenever bit 11 of the PHY control
11825 		 * register is set)
11826 		 */
11827 		if (sc->sc_phytype == WMPHY_82578) {
11828 			struct mii_softc *child;
11829 
11830 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
11831 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
11832 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
11833 			    && ((val & (1 << 11)) != 0)) {
11834 				device_printf(dev, "XXX need workaround\n");
11835 			}
11836 		}
11837 
11838 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
11839 			rv = wm_gmii_mdic_writereg(dev, 1,
11840 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11841 			if (rv != 0)
11842 				return rv;
11843 		}
11844 	}
11845 
11846 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
11847 }
11848 
11849 /*
11850  * wm_gmii_82580_readreg:	[mii interface function]
11851  *
11852  *	Read a PHY register on the 82580 and I350.
11853  * This could be handled by the PHY layer if we didn't have to lock the
11854  * resource ...
11855  */
11856 static int
11857 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
11858 {
11859 	struct wm_softc *sc = device_private(dev);
11860 	int rv;
11861 
11862 	if (sc->phy.acquire(sc) != 0) {
11863 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11864 		return -1;
11865 	}
11866 
11867 #ifdef DIAGNOSTIC
11868 	if (reg > MII_ADDRMASK) {
11869 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11870 		    __func__, sc->sc_phytype, reg);
11871 		reg &= MII_ADDRMASK;
11872 	}
11873 #endif
11874 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
11875 
11876 	sc->phy.release(sc);
11877 	return rv;
11878 }
11879 
11880 /*
11881  * wm_gmii_82580_writereg:	[mii interface function]
11882  *
11883  *	Write a PHY register on the 82580 and I350.
11884  * This could be handled by the PHY layer if we didn't have to lock the
11885  * resource ...
11886  */
11887 static int
11888 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
11889 {
11890 	struct wm_softc *sc = device_private(dev);
11891 	int rv;
11892 
11893 	if (sc->phy.acquire(sc) != 0) {
11894 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11895 		return -1;
11896 	}
11897 
11898 #ifdef DIAGNOSTIC
11899 	if (reg > MII_ADDRMASK) {
11900 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11901 		    __func__, sc->sc_phytype, reg);
11902 		reg &= MII_ADDRMASK;
11903 	}
11904 #endif
11905 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
11906 
11907 	sc->phy.release(sc);
11908 	return rv;
11909 }
11910 
11911 /*
11912  * wm_gmii_gs40g_readreg:	[mii interface function]
11913  *
11914  *	Read a PHY register on the I2100 and I211.
11915  * This could be handled by the PHY layer if we didn't have to lock the
11916  * resource ...
11917  */
11918 static int
11919 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
11920 {
11921 	struct wm_softc *sc = device_private(dev);
11922 	int page, offset;
11923 	int rv;
11924 
11925 	/* Acquire semaphore */
11926 	if (sc->phy.acquire(sc)) {
11927 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11928 		return -1;
11929 	}
11930 
11931 	/* Page select */
11932 	page = reg >> GS40G_PAGE_SHIFT;
11933 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
11934 	if (rv != 0)
11935 		goto release;
11936 
11937 	/* Read reg */
11938 	offset = reg & GS40G_OFFSET_MASK;
11939 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
11940 
11941 release:
11942 	sc->phy.release(sc);
11943 	return rv;
11944 }
11945 
11946 /*
11947  * wm_gmii_gs40g_writereg:	[mii interface function]
11948  *
11949  *	Write a PHY register on the I210 and I211.
11950  * This could be handled by the PHY layer if we didn't have to lock the
11951  * resource ...
11952  */
11953 static int
11954 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
11955 {
11956 	struct wm_softc *sc = device_private(dev);
11957 	uint16_t page;
11958 	int offset, rv;
11959 
11960 	/* Acquire semaphore */
11961 	if (sc->phy.acquire(sc)) {
11962 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11963 		return -1;
11964 	}
11965 
11966 	/* Page select */
11967 	page = reg >> GS40G_PAGE_SHIFT;
11968 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
11969 	if (rv != 0)
11970 		goto release;
11971 
11972 	/* Write reg */
11973 	offset = reg & GS40G_OFFSET_MASK;
11974 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
11975 
11976 release:
11977 	/* Release semaphore */
11978 	sc->phy.release(sc);
11979 	return rv;
11980 }
11981 
11982 /*
11983  * wm_gmii_statchg:	[mii interface function]
11984  *
11985  *	Callback from MII layer when media changes.
11986  */
11987 static void
11988 wm_gmii_statchg(struct ifnet *ifp)
11989 {
11990 	struct wm_softc *sc = ifp->if_softc;
11991 	struct mii_data *mii = &sc->sc_mii;
11992 
11993 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
11994 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
11995 	sc->sc_fcrtl &= ~FCRTL_XONE;
11996 
11997 	/* Get flow control negotiation result. */
11998 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
11999 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
12000 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
12001 		mii->mii_media_active &= ~IFM_ETH_FMASK;
12002 	}
12003 
12004 	if (sc->sc_flowflags & IFM_FLOW) {
12005 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
12006 			sc->sc_ctrl |= CTRL_TFCE;
12007 			sc->sc_fcrtl |= FCRTL_XONE;
12008 		}
12009 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
12010 			sc->sc_ctrl |= CTRL_RFCE;
12011 	}
12012 
12013 	if (mii->mii_media_active & IFM_FDX) {
12014 		DPRINTF(sc, WM_DEBUG_LINK,
12015 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
12016 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
12017 	} else {
12018 		DPRINTF(sc, WM_DEBUG_LINK,
12019 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
12020 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
12021 	}
12022 
12023 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12024 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
12025 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
12026 						 : WMREG_FCRTL, sc->sc_fcrtl);
12027 	if (sc->sc_type == WM_T_80003) {
12028 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
12029 		case IFM_1000_T:
12030 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
12031 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
12032 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
12033 			break;
12034 		default:
12035 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
12036 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
12037 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
12038 			break;
12039 		}
12040 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
12041 	}
12042 }
12043 
12044 /* kumeran related (80003, ICH* and PCH*) */
12045 
12046 /*
12047  * wm_kmrn_readreg:
12048  *
12049  *	Read a kumeran register
12050  */
12051 static int
12052 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
12053 {
12054 	int rv;
12055 
12056 	if (sc->sc_type == WM_T_80003)
12057 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12058 	else
12059 		rv = sc->phy.acquire(sc);
12060 	if (rv != 0) {
12061 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
12062 		    __func__);
12063 		return rv;
12064 	}
12065 
12066 	rv = wm_kmrn_readreg_locked(sc, reg, val);
12067 
12068 	if (sc->sc_type == WM_T_80003)
12069 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12070 	else
12071 		sc->phy.release(sc);
12072 
12073 	return rv;
12074 }
12075 
12076 static int
12077 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
12078 {
12079 
12080 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
12081 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
12082 	    KUMCTRLSTA_REN);
12083 	CSR_WRITE_FLUSH(sc);
12084 	delay(2);
12085 
12086 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
12087 
12088 	return 0;
12089 }
12090 
12091 /*
12092  * wm_kmrn_writereg:
12093  *
12094  *	Write a kumeran register
12095  */
12096 static int
12097 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
12098 {
12099 	int rv;
12100 
12101 	if (sc->sc_type == WM_T_80003)
12102 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12103 	else
12104 		rv = sc->phy.acquire(sc);
12105 	if (rv != 0) {
12106 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
12107 		    __func__);
12108 		return rv;
12109 	}
12110 
12111 	rv = wm_kmrn_writereg_locked(sc, reg, val);
12112 
12113 	if (sc->sc_type == WM_T_80003)
12114 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
12115 	else
12116 		sc->phy.release(sc);
12117 
12118 	return rv;
12119 }
12120 
12121 static int
12122 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
12123 {
12124 
12125 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
12126 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
12127 
12128 	return 0;
12129 }
12130 
12131 /*
12132  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
12133  * This access method is different from IEEE MMD.
12134  */
12135 static int
12136 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
12137 {
12138 	struct wm_softc *sc = device_private(dev);
12139 	int rv;
12140 
12141 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
12142 	if (rv != 0)
12143 		return rv;
12144 
12145 	if (rd)
12146 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
12147 	else
12148 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
12149 	return rv;
12150 }
12151 
12152 static int
12153 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
12154 {
12155 
12156 	return wm_access_emi_reg_locked(dev, reg, val, true);
12157 }
12158 
12159 static int
12160 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
12161 {
12162 
12163 	return wm_access_emi_reg_locked(dev, reg, &val, false);
12164 }
12165 
12166 /* SGMII related */
12167 
12168 /*
12169  * wm_sgmii_uses_mdio
12170  *
12171  * Check whether the transaction is to the internal PHY or the external
12172  * MDIO interface. Return true if it's MDIO.
12173  */
12174 static bool
12175 wm_sgmii_uses_mdio(struct wm_softc *sc)
12176 {
12177 	uint32_t reg;
12178 	bool ismdio = false;
12179 
12180 	switch (sc->sc_type) {
12181 	case WM_T_82575:
12182 	case WM_T_82576:
12183 		reg = CSR_READ(sc, WMREG_MDIC);
12184 		ismdio = ((reg & MDIC_DEST) != 0);
12185 		break;
12186 	case WM_T_82580:
12187 	case WM_T_I350:
12188 	case WM_T_I354:
12189 	case WM_T_I210:
12190 	case WM_T_I211:
12191 		reg = CSR_READ(sc, WMREG_MDICNFG);
12192 		ismdio = ((reg & MDICNFG_DEST) != 0);
12193 		break;
12194 	default:
12195 		break;
12196 	}
12197 
12198 	return ismdio;
12199 }
12200 
12201 /* Setup internal SGMII PHY for SFP */
12202 static void
12203 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
12204 {
12205 	uint16_t id1, id2, phyreg;
12206 	int i, rv;
12207 
12208 	if (((sc->sc_flags & WM_F_SGMII) == 0)
12209 	    || ((sc->sc_flags & WM_F_SFP) == 0))
12210 		return;
12211 
12212 	for (i = 0; i < MII_NPHY; i++) {
12213 		sc->phy.no_errprint = true;
12214 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
12215 		if (rv != 0)
12216 			continue;
12217 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
12218 		if (rv != 0)
12219 			continue;
12220 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
12221 			continue;
12222 		sc->phy.no_errprint = false;
12223 
12224 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
12225 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
12226 		phyreg |= ESSR_SGMII_WOC_COPPER;
12227 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
12228 		break;
12229 	}
12230 
12231 }
12232 
12233 /*
12234  * wm_sgmii_readreg:	[mii interface function]
12235  *
12236  *	Read a PHY register on the SGMII
12237  * This could be handled by the PHY layer if we didn't have to lock the
12238  * resource ...
12239  */
12240 static int
12241 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
12242 {
12243 	struct wm_softc *sc = device_private(dev);
12244 	int rv;
12245 
12246 	if (sc->phy.acquire(sc)) {
12247 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12248 		return -1;
12249 	}
12250 
12251 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
12252 
12253 	sc->phy.release(sc);
12254 	return rv;
12255 }
12256 
12257 static int
12258 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
12259 {
12260 	struct wm_softc *sc = device_private(dev);
12261 	uint32_t i2ccmd;
12262 	int i, rv = 0;
12263 
12264 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
12265 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
12266 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12267 
12268 	/* Poll the ready bit */
12269 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12270 		delay(50);
12271 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12272 		if (i2ccmd & I2CCMD_READY)
12273 			break;
12274 	}
12275 	if ((i2ccmd & I2CCMD_READY) == 0) {
12276 		device_printf(dev, "I2CCMD Read did not complete\n");
12277 		rv = ETIMEDOUT;
12278 	}
12279 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
12280 		if (!sc->phy.no_errprint)
12281 			device_printf(dev, "I2CCMD Error bit set\n");
12282 		rv = EIO;
12283 	}
12284 
12285 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
12286 
12287 	return rv;
12288 }
12289 
12290 /*
12291  * wm_sgmii_writereg:	[mii interface function]
12292  *
12293  *	Write a PHY register on the SGMII.
12294  * This could be handled by the PHY layer if we didn't have to lock the
12295  * resource ...
12296  */
12297 static int
12298 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
12299 {
12300 	struct wm_softc *sc = device_private(dev);
12301 	int rv;
12302 
12303 	if (sc->phy.acquire(sc) != 0) {
12304 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12305 		return -1;
12306 	}
12307 
12308 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
12309 
12310 	sc->phy.release(sc);
12311 
12312 	return rv;
12313 }
12314 
12315 static int
12316 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
12317 {
12318 	struct wm_softc *sc = device_private(dev);
12319 	uint32_t i2ccmd;
12320 	uint16_t swapdata;
12321 	int rv = 0;
12322 	int i;
12323 
12324 	/* Swap the data bytes for the I2C interface */
12325 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
12326 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
12327 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
12328 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12329 
12330 	/* Poll the ready bit */
12331 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12332 		delay(50);
12333 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12334 		if (i2ccmd & I2CCMD_READY)
12335 			break;
12336 	}
12337 	if ((i2ccmd & I2CCMD_READY) == 0) {
12338 		device_printf(dev, "I2CCMD Write did not complete\n");
12339 		rv = ETIMEDOUT;
12340 	}
12341 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
12342 		device_printf(dev, "I2CCMD Error bit set\n");
12343 		rv = EIO;
12344 	}
12345 
12346 	return rv;
12347 }
12348 
12349 /* TBI related */
12350 
12351 static bool
12352 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
12353 {
12354 	bool sig;
12355 
12356 	sig = ctrl & CTRL_SWDPIN(1);
12357 
12358 	/*
12359 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
12360 	 * detect a signal, 1 if they don't.
12361 	 */
12362 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
12363 		sig = !sig;
12364 
12365 	return sig;
12366 }
12367 
12368 /*
12369  * wm_tbi_mediainit:
12370  *
12371  *	Initialize media for use on 1000BASE-X devices.
12372  */
12373 static void
12374 wm_tbi_mediainit(struct wm_softc *sc)
12375 {
12376 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
12377 	const char *sep = "";
12378 
12379 	if (sc->sc_type < WM_T_82543)
12380 		sc->sc_tipg = TIPG_WM_DFLT;
12381 	else
12382 		sc->sc_tipg = TIPG_LG_DFLT;
12383 
12384 	sc->sc_tbi_serdes_anegticks = 5;
12385 
12386 	/* Initialize our media structures */
12387 	sc->sc_mii.mii_ifp = ifp;
12388 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
12389 
12390 	ifp->if_baudrate = IF_Gbps(1);
12391 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
12392 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
12393 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
12394 		    wm_serdes_mediachange, wm_serdes_mediastatus,
12395 		    sc->sc_core_lock);
12396 	} else {
12397 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
12398 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
12399 	}
12400 
12401 	/*
12402 	 * SWD Pins:
12403 	 *
12404 	 *	0 = Link LED (output)
12405 	 *	1 = Loss Of Signal (input)
12406 	 */
12407 	sc->sc_ctrl |= CTRL_SWDPIO(0);
12408 
12409 	/* XXX Perhaps this is only for TBI */
12410 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12411 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
12412 
12413 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
12414 		sc->sc_ctrl &= ~CTRL_LRST;
12415 
12416 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12417 
12418 #define	ADD(ss, mm, dd)							\
12419 do {									\
12420 	aprint_normal("%s%s", sep, ss);					\
12421 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
12422 	sep = ", ";							\
12423 } while (/*CONSTCOND*/0)
12424 
12425 	aprint_normal_dev(sc->sc_dev, "");
12426 
12427 	if (sc->sc_type == WM_T_I354) {
12428 		uint32_t status;
12429 
12430 		status = CSR_READ(sc, WMREG_STATUS);
12431 		if (((status & STATUS_2P5_SKU) != 0)
12432 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
12433 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
12434 		} else
12435 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
12436 	} else if (sc->sc_type == WM_T_82545) {
12437 		/* Only 82545 is LX (XXX except SFP) */
12438 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
12439 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
12440 	} else if (sc->sc_sfptype != 0) {
12441 		/* XXX wm(4) fiber/serdes don't use ifm_data */
12442 		switch (sc->sc_sfptype) {
12443 		default:
12444 		case SFF_SFP_ETH_FLAGS_1000SX:
12445 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
12446 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
12447 			break;
12448 		case SFF_SFP_ETH_FLAGS_1000LX:
12449 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
12450 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
12451 			break;
12452 		case SFF_SFP_ETH_FLAGS_1000CX:
12453 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
12454 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
12455 			break;
12456 		case SFF_SFP_ETH_FLAGS_1000T:
12457 			ADD("1000baseT", IFM_1000_T, 0);
12458 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
12459 			break;
12460 		case SFF_SFP_ETH_FLAGS_100FX:
12461 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
12462 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
12463 			break;
12464 		}
12465 	} else {
12466 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
12467 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
12468 	}
12469 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
12470 	aprint_normal("\n");
12471 
12472 #undef ADD
12473 
12474 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
12475 }
12476 
12477 /*
12478  * wm_tbi_mediachange:	[ifmedia interface function]
12479  *
12480  *	Set hardware to newly-selected media on a 1000BASE-X device.
12481  */
12482 static int
12483 wm_tbi_mediachange(struct ifnet *ifp)
12484 {
12485 	struct wm_softc *sc = ifp->if_softc;
12486 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
12487 	uint32_t status, ctrl;
12488 	bool signal;
12489 	int i;
12490 
12491 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
12492 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
12493 		/* XXX need some work for >= 82571 and < 82575 */
12494 		if (sc->sc_type < WM_T_82575)
12495 			return 0;
12496 	}
12497 
12498 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
12499 	    || (sc->sc_type >= WM_T_82575))
12500 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
12501 
12502 	sc->sc_ctrl &= ~CTRL_LRST;
12503 	sc->sc_txcw = TXCW_ANE;
12504 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12505 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
12506 	else if (ife->ifm_media & IFM_FDX)
12507 		sc->sc_txcw |= TXCW_FD;
12508 	else
12509 		sc->sc_txcw |= TXCW_HD;
12510 
12511 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
12512 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
12513 
12514 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
12515 		device_xname(sc->sc_dev), sc->sc_txcw));
12516 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12517 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12518 	CSR_WRITE_FLUSH(sc);
12519 	delay(1000);
12520 
12521 	ctrl = CSR_READ(sc, WMREG_CTRL);
12522 	signal = wm_tbi_havesignal(sc, ctrl);
12523 
12524 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
12525 		signal));
12526 
12527 	if (signal) {
12528 		/* Have signal; wait for the link to come up. */
12529 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
12530 			delay(10000);
12531 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
12532 				break;
12533 		}
12534 
12535 		DPRINTF(sc, WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
12536 			device_xname(sc->sc_dev), i));
12537 
12538 		status = CSR_READ(sc, WMREG_STATUS);
12539 		DPRINTF(sc, WM_DEBUG_LINK,
12540 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
12541 			device_xname(sc->sc_dev), status, (uint32_t)STATUS_LU));
12542 		if (status & STATUS_LU) {
12543 			/* Link is up. */
12544 			DPRINTF(sc, WM_DEBUG_LINK,
12545 			    ("%s: LINK: set media -> link up %s\n",
12546 				device_xname(sc->sc_dev),
12547 				(status & STATUS_FD) ? "FDX" : "HDX"));
12548 
12549 			/*
12550 			 * NOTE: CTRL will update TFCE and RFCE automatically,
12551 			 * so we should update sc->sc_ctrl
12552 			 */
12553 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
12554 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
12555 			sc->sc_fcrtl &= ~FCRTL_XONE;
12556 			if (status & STATUS_FD)
12557 				sc->sc_tctl |=
12558 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
12559 			else
12560 				sc->sc_tctl |=
12561 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
12562 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
12563 				sc->sc_fcrtl |= FCRTL_XONE;
12564 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
12565 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
12566 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
12567 			sc->sc_tbi_linkup = 1;
12568 		} else {
12569 			if (i == WM_LINKUP_TIMEOUT)
12570 				wm_check_for_link(sc);
12571 			/* Link is down. */
12572 			DPRINTF(sc, WM_DEBUG_LINK,
12573 			    ("%s: LINK: set media -> link down\n",
12574 				device_xname(sc->sc_dev)));
12575 			sc->sc_tbi_linkup = 0;
12576 		}
12577 	} else {
12578 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
12579 			device_xname(sc->sc_dev)));
12580 		sc->sc_tbi_linkup = 0;
12581 	}
12582 
12583 	wm_tbi_serdes_set_linkled(sc);
12584 
12585 	return 0;
12586 }
12587 
12588 /*
12589  * wm_tbi_mediastatus:	[ifmedia interface function]
12590  *
12591  *	Get the current interface media status on a 1000BASE-X device.
12592  */
12593 static void
12594 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
12595 {
12596 	struct wm_softc *sc = ifp->if_softc;
12597 	uint32_t ctrl, status;
12598 
12599 	ifmr->ifm_status = IFM_AVALID;
12600 	ifmr->ifm_active = IFM_ETHER;
12601 
12602 	status = CSR_READ(sc, WMREG_STATUS);
12603 	if ((status & STATUS_LU) == 0) {
12604 		ifmr->ifm_active |= IFM_NONE;
12605 		return;
12606 	}
12607 
12608 	ifmr->ifm_status |= IFM_ACTIVE;
12609 	/* Only 82545 is LX */
12610 	if (sc->sc_type == WM_T_82545)
12611 		ifmr->ifm_active |= IFM_1000_LX;
12612 	else
12613 		ifmr->ifm_active |= IFM_1000_SX;
12614 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
12615 		ifmr->ifm_active |= IFM_FDX;
12616 	else
12617 		ifmr->ifm_active |= IFM_HDX;
12618 	ctrl = CSR_READ(sc, WMREG_CTRL);
12619 	if (ctrl & CTRL_RFCE)
12620 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
12621 	if (ctrl & CTRL_TFCE)
12622 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
12623 }
12624 
12625 /* XXX TBI only */
12626 static int
12627 wm_check_for_link(struct wm_softc *sc)
12628 {
12629 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
12630 	uint32_t rxcw;
12631 	uint32_t ctrl;
12632 	uint32_t status;
12633 	bool signal;
12634 
12635 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
12636 		device_xname(sc->sc_dev), __func__));
12637 
12638 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
12639 		/* XXX need some work for >= 82571 */
12640 		if (sc->sc_type >= WM_T_82571) {
12641 			sc->sc_tbi_linkup = 1;
12642 			return 0;
12643 		}
12644 	}
12645 
12646 	rxcw = CSR_READ(sc, WMREG_RXCW);
12647 	ctrl = CSR_READ(sc, WMREG_CTRL);
12648 	status = CSR_READ(sc, WMREG_STATUS);
12649 	signal = wm_tbi_havesignal(sc, ctrl);
12650 
12651 	DPRINTF(sc, WM_DEBUG_LINK,
12652 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
12653 		device_xname(sc->sc_dev), __func__, signal,
12654 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
12655 
12656 	/*
12657 	 * SWDPIN   LU RXCW
12658 	 *	0    0	  0
12659 	 *	0    0	  1	(should not happen)
12660 	 *	0    1	  0	(should not happen)
12661 	 *	0    1	  1	(should not happen)
12662 	 *	1    0	  0	Disable autonego and force linkup
12663 	 *	1    0	  1	got /C/ but not linkup yet
12664 	 *	1    1	  0	(linkup)
12665 	 *	1    1	  1	If IFM_AUTO, back to autonego
12666 	 *
12667 	 */
12668 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
12669 		DPRINTF(sc, WM_DEBUG_LINK,
12670 		    ("%s: %s: force linkup and fullduplex\n",
12671 			device_xname(sc->sc_dev), __func__));
12672 		sc->sc_tbi_linkup = 0;
12673 		/* Disable auto-negotiation in the TXCW register */
12674 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
12675 
12676 		/*
12677 		 * Force link-up and also force full-duplex.
12678 		 *
12679 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
12680 		 * so we should update sc->sc_ctrl
12681 		 */
12682 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
12683 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12684 	} else if (((status & STATUS_LU) != 0)
12685 	    && ((rxcw & RXCW_C) != 0)
12686 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
12687 		sc->sc_tbi_linkup = 1;
12688 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
12689 			device_xname(sc->sc_dev),
12690 			__func__));
12691 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12692 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
12693 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
12694 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
12695 			device_xname(sc->sc_dev), __func__));
12696 	} else {
12697 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
12698 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
12699 			status));
12700 	}
12701 
12702 	return 0;
12703 }
12704 
12705 /*
12706  * wm_tbi_tick:
12707  *
12708  *	Check the link on TBI devices.
12709  *	This function acts as mii_tick().
12710  */
12711 static void
12712 wm_tbi_tick(struct wm_softc *sc)
12713 {
12714 	struct mii_data *mii = &sc->sc_mii;
12715 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
12716 	uint32_t status;
12717 
12718 	KASSERT(WM_CORE_LOCKED(sc));
12719 
12720 	status = CSR_READ(sc, WMREG_STATUS);
12721 
12722 	/* XXX is this needed? */
12723 	(void)CSR_READ(sc, WMREG_RXCW);
12724 	(void)CSR_READ(sc, WMREG_CTRL);
12725 
12726 	/* set link status */
12727 	if ((status & STATUS_LU) == 0) {
12728 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
12729 			device_xname(sc->sc_dev)));
12730 		sc->sc_tbi_linkup = 0;
12731 	} else if (sc->sc_tbi_linkup == 0) {
12732 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
12733 			device_xname(sc->sc_dev),
12734 			(status & STATUS_FD) ? "FDX" : "HDX"));
12735 		sc->sc_tbi_linkup = 1;
12736 		sc->sc_tbi_serdes_ticks = 0;
12737 	}
12738 
12739 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
12740 		goto setled;
12741 
12742 	if ((status & STATUS_LU) == 0) {
12743 		sc->sc_tbi_linkup = 0;
12744 		/* If the timer expired, retry autonegotiation */
12745 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12746 		    && (++sc->sc_tbi_serdes_ticks
12747 			>= sc->sc_tbi_serdes_anegticks)) {
12748 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
12749 				device_xname(sc->sc_dev), __func__));
12750 			sc->sc_tbi_serdes_ticks = 0;
12751 			/*
12752 			 * Reset the link, and let autonegotiation do
12753 			 * its thing
12754 			 */
12755 			sc->sc_ctrl |= CTRL_LRST;
12756 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12757 			CSR_WRITE_FLUSH(sc);
12758 			delay(1000);
12759 			sc->sc_ctrl &= ~CTRL_LRST;
12760 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12761 			CSR_WRITE_FLUSH(sc);
12762 			delay(1000);
12763 			CSR_WRITE(sc, WMREG_TXCW,
12764 			    sc->sc_txcw & ~TXCW_ANE);
12765 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12766 		}
12767 	}
12768 
12769 setled:
12770 	wm_tbi_serdes_set_linkled(sc);
12771 }
12772 
12773 /* SERDES related */
12774 static void
12775 wm_serdes_power_up_link_82575(struct wm_softc *sc)
12776 {
12777 	uint32_t reg;
12778 
12779 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12780 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
12781 		return;
12782 
12783 	/* Enable PCS to turn on link */
12784 	reg = CSR_READ(sc, WMREG_PCS_CFG);
12785 	reg |= PCS_CFG_PCS_EN;
12786 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
12787 
12788 	/* Power up the laser */
12789 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
12790 	reg &= ~CTRL_EXT_SWDPIN(3);
12791 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12792 
12793 	/* Flush the write to verify completion */
12794 	CSR_WRITE_FLUSH(sc);
12795 	delay(1000);
12796 }
12797 
12798 static int
12799 wm_serdes_mediachange(struct ifnet *ifp)
12800 {
12801 	struct wm_softc *sc = ifp->if_softc;
12802 	bool pcs_autoneg = true; /* XXX */
12803 	uint32_t ctrl_ext, pcs_lctl, reg;
12804 
12805 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12806 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
12807 		return 0;
12808 
12809 	/* XXX Currently, this function is not called on 8257[12] */
12810 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
12811 	    || (sc->sc_type >= WM_T_82575))
12812 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
12813 
12814 	/* Power on the sfp cage if present */
12815 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
12816 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
12817 	ctrl_ext |= CTRL_EXT_I2C_ENA;
12818 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
12819 
12820 	sc->sc_ctrl |= CTRL_SLU;
12821 
12822 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
12823 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
12824 
12825 		reg = CSR_READ(sc, WMREG_CONNSW);
12826 		reg |= CONNSW_ENRGSRC;
12827 		CSR_WRITE(sc, WMREG_CONNSW, reg);
12828 	}
12829 
12830 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
12831 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
12832 	case CTRL_EXT_LINK_MODE_SGMII:
12833 		/* SGMII mode lets the phy handle forcing speed/duplex */
12834 		pcs_autoneg = true;
12835 		/* Autoneg time out should be disabled for SGMII mode */
12836 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
12837 		break;
12838 	case CTRL_EXT_LINK_MODE_1000KX:
12839 		pcs_autoneg = false;
12840 		/* FALLTHROUGH */
12841 	default:
12842 		if ((sc->sc_type == WM_T_82575)
12843 		    || (sc->sc_type == WM_T_82576)) {
12844 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
12845 				pcs_autoneg = false;
12846 		}
12847 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
12848 		    | CTRL_FRCFDX;
12849 
12850 		/* Set speed of 1000/Full if speed/duplex is forced */
12851 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
12852 	}
12853 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12854 
12855 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
12856 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
12857 
12858 	if (pcs_autoneg) {
12859 		/* Set PCS register for autoneg */
12860 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
12861 
12862 		/* Disable force flow control for autoneg */
12863 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
12864 
12865 		/* Configure flow control advertisement for autoneg */
12866 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
12867 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
12868 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
12869 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
12870 	} else
12871 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
12872 
12873 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
12874 
12875 	return 0;
12876 }
12877 
12878 static void
12879 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
12880 {
12881 	struct wm_softc *sc = ifp->if_softc;
12882 	struct mii_data *mii = &sc->sc_mii;
12883 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
12884 	uint32_t pcs_adv, pcs_lpab, reg;
12885 
12886 	ifmr->ifm_status = IFM_AVALID;
12887 	ifmr->ifm_active = IFM_ETHER;
12888 
12889 	/* Check PCS */
12890 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
12891 	if ((reg & PCS_LSTS_LINKOK) == 0) {
12892 		ifmr->ifm_active |= IFM_NONE;
12893 		sc->sc_tbi_linkup = 0;
12894 		goto setled;
12895 	}
12896 
12897 	sc->sc_tbi_linkup = 1;
12898 	ifmr->ifm_status |= IFM_ACTIVE;
12899 	if (sc->sc_type == WM_T_I354) {
12900 		uint32_t status;
12901 
12902 		status = CSR_READ(sc, WMREG_STATUS);
12903 		if (((status & STATUS_2P5_SKU) != 0)
12904 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
12905 			ifmr->ifm_active |= IFM_2500_KX;
12906 		} else
12907 			ifmr->ifm_active |= IFM_1000_KX;
12908 	} else {
12909 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
12910 		case PCS_LSTS_SPEED_10:
12911 			ifmr->ifm_active |= IFM_10_T; /* XXX */
12912 			break;
12913 		case PCS_LSTS_SPEED_100:
12914 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
12915 			break;
12916 		case PCS_LSTS_SPEED_1000:
12917 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
12918 			break;
12919 		default:
12920 			device_printf(sc->sc_dev, "Unknown speed\n");
12921 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
12922 			break;
12923 		}
12924 	}
12925 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
12926 	if ((reg & PCS_LSTS_FDX) != 0)
12927 		ifmr->ifm_active |= IFM_FDX;
12928 	else
12929 		ifmr->ifm_active |= IFM_HDX;
12930 	mii->mii_media_active &= ~IFM_ETH_FMASK;
12931 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
12932 		/* Check flow */
12933 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
12934 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
12935 			DPRINTF(sc, WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
12936 			goto setled;
12937 		}
12938 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
12939 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
12940 		DPRINTF(sc, WM_DEBUG_LINK,
12941 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
12942 		if ((pcs_adv & TXCW_SYM_PAUSE)
12943 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
12944 			mii->mii_media_active |= IFM_FLOW
12945 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
12946 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
12947 		    && (pcs_adv & TXCW_ASYM_PAUSE)
12948 		    && (pcs_lpab & TXCW_SYM_PAUSE)
12949 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
12950 			mii->mii_media_active |= IFM_FLOW
12951 			    | IFM_ETH_TXPAUSE;
12952 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
12953 		    && (pcs_adv & TXCW_ASYM_PAUSE)
12954 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
12955 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
12956 			mii->mii_media_active |= IFM_FLOW
12957 			    | IFM_ETH_RXPAUSE;
12958 		}
12959 	}
12960 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
12961 	    | (mii->mii_media_active & IFM_ETH_FMASK);
12962 setled:
12963 	wm_tbi_serdes_set_linkled(sc);
12964 }
12965 
12966 /*
12967  * wm_serdes_tick:
12968  *
12969  *	Check the link on serdes devices.
12970  */
12971 static void
12972 wm_serdes_tick(struct wm_softc *sc)
12973 {
12974 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
12975 	struct mii_data *mii = &sc->sc_mii;
12976 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
12977 	uint32_t reg;
12978 
12979 	KASSERT(WM_CORE_LOCKED(sc));
12980 
12981 	mii->mii_media_status = IFM_AVALID;
12982 	mii->mii_media_active = IFM_ETHER;
12983 
12984 	/* Check PCS */
12985 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
12986 	if ((reg & PCS_LSTS_LINKOK) != 0) {
12987 		mii->mii_media_status |= IFM_ACTIVE;
12988 		sc->sc_tbi_linkup = 1;
12989 		sc->sc_tbi_serdes_ticks = 0;
12990 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
12991 		if ((reg & PCS_LSTS_FDX) != 0)
12992 			mii->mii_media_active |= IFM_FDX;
12993 		else
12994 			mii->mii_media_active |= IFM_HDX;
12995 	} else {
12996 		mii->mii_media_status |= IFM_NONE;
12997 		sc->sc_tbi_linkup = 0;
12998 		/* If the timer expired, retry autonegotiation */
12999 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
13000 		    && (++sc->sc_tbi_serdes_ticks
13001 			>= sc->sc_tbi_serdes_anegticks)) {
13002 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
13003 				device_xname(sc->sc_dev), __func__));
13004 			sc->sc_tbi_serdes_ticks = 0;
13005 			/* XXX */
13006 			wm_serdes_mediachange(ifp);
13007 		}
13008 	}
13009 
13010 	wm_tbi_serdes_set_linkled(sc);
13011 }
13012 
13013 /* SFP related */
13014 
13015 static int
13016 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
13017 {
13018 	uint32_t i2ccmd;
13019 	int i;
13020 
13021 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
13022 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
13023 
13024 	/* Poll the ready bit */
13025 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
13026 		delay(50);
13027 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
13028 		if (i2ccmd & I2CCMD_READY)
13029 			break;
13030 	}
13031 	if ((i2ccmd & I2CCMD_READY) == 0)
13032 		return -1;
13033 	if ((i2ccmd & I2CCMD_ERROR) != 0)
13034 		return -1;
13035 
13036 	*data = i2ccmd & 0x00ff;
13037 
13038 	return 0;
13039 }
13040 
13041 static uint32_t
13042 wm_sfp_get_media_type(struct wm_softc *sc)
13043 {
13044 	uint32_t ctrl_ext;
13045 	uint8_t val = 0;
13046 	int timeout = 3;
13047 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
13048 	int rv = -1;
13049 
13050 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
13051 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
13052 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
13053 	CSR_WRITE_FLUSH(sc);
13054 
13055 	/* Read SFP module data */
13056 	while (timeout) {
13057 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
13058 		if (rv == 0)
13059 			break;
13060 		delay(100*1000); /* XXX too big */
13061 		timeout--;
13062 	}
13063 	if (rv != 0)
13064 		goto out;
13065 
13066 	switch (val) {
13067 	case SFF_SFP_ID_SFF:
13068 		aprint_normal_dev(sc->sc_dev,
13069 		    "Module/Connector soldered to board\n");
13070 		break;
13071 	case SFF_SFP_ID_SFP:
13072 		sc->sc_flags |= WM_F_SFP;
13073 		break;
13074 	case SFF_SFP_ID_UNKNOWN:
13075 		goto out;
13076 	default:
13077 		break;
13078 	}
13079 
13080 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
13081 	if (rv != 0)
13082 		goto out;
13083 
13084 	sc->sc_sfptype = val;
13085 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
13086 		mediatype = WM_MEDIATYPE_SERDES;
13087 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
13088 		sc->sc_flags |= WM_F_SGMII;
13089 		mediatype = WM_MEDIATYPE_COPPER;
13090 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
13091 		sc->sc_flags |= WM_F_SGMII;
13092 		mediatype = WM_MEDIATYPE_SERDES;
13093 	} else {
13094 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
13095 		    __func__, sc->sc_sfptype);
13096 		sc->sc_sfptype = 0; /* XXX unknown */
13097 	}
13098 
13099 out:
13100 	/* Restore I2C interface setting */
13101 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
13102 
13103 	return mediatype;
13104 }
13105 
13106 /*
13107  * NVM related.
13108  * Microwire, SPI (w/wo EERD) and Flash.
13109  */
13110 
13111 /* Both spi and uwire */
13112 
13113 /*
13114  * wm_eeprom_sendbits:
13115  *
13116  *	Send a series of bits to the EEPROM.
13117  */
13118 static void
13119 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
13120 {
13121 	uint32_t reg;
13122 	int x;
13123 
13124 	reg = CSR_READ(sc, WMREG_EECD);
13125 
13126 	for (x = nbits; x > 0; x--) {
13127 		if (bits & (1U << (x - 1)))
13128 			reg |= EECD_DI;
13129 		else
13130 			reg &= ~EECD_DI;
13131 		CSR_WRITE(sc, WMREG_EECD, reg);
13132 		CSR_WRITE_FLUSH(sc);
13133 		delay(2);
13134 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
13135 		CSR_WRITE_FLUSH(sc);
13136 		delay(2);
13137 		CSR_WRITE(sc, WMREG_EECD, reg);
13138 		CSR_WRITE_FLUSH(sc);
13139 		delay(2);
13140 	}
13141 }
13142 
13143 /*
13144  * wm_eeprom_recvbits:
13145  *
13146  *	Receive a series of bits from the EEPROM.
13147  */
13148 static void
13149 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
13150 {
13151 	uint32_t reg, val;
13152 	int x;
13153 
13154 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
13155 
13156 	val = 0;
13157 	for (x = nbits; x > 0; x--) {
13158 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
13159 		CSR_WRITE_FLUSH(sc);
13160 		delay(2);
13161 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
13162 			val |= (1U << (x - 1));
13163 		CSR_WRITE(sc, WMREG_EECD, reg);
13164 		CSR_WRITE_FLUSH(sc);
13165 		delay(2);
13166 	}
13167 	*valp = val;
13168 }
13169 
13170 /* Microwire */
13171 
13172 /*
13173  * wm_nvm_read_uwire:
13174  *
13175  *	Read a word from the EEPROM using the MicroWire protocol.
13176  */
13177 static int
13178 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
13179 {
13180 	uint32_t reg, val;
13181 	int i;
13182 
13183 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13184 		device_xname(sc->sc_dev), __func__));
13185 
13186 	if (sc->nvm.acquire(sc) != 0)
13187 		return -1;
13188 
13189 	for (i = 0; i < wordcnt; i++) {
13190 		/* Clear SK and DI. */
13191 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
13192 		CSR_WRITE(sc, WMREG_EECD, reg);
13193 
13194 		/*
13195 		 * XXX: workaround for a bug in qemu-0.12.x and prior
13196 		 * and Xen.
13197 		 *
13198 		 * We use this workaround only for 82540 because qemu's
13199 		 * e1000 act as 82540.
13200 		 */
13201 		if (sc->sc_type == WM_T_82540) {
13202 			reg |= EECD_SK;
13203 			CSR_WRITE(sc, WMREG_EECD, reg);
13204 			reg &= ~EECD_SK;
13205 			CSR_WRITE(sc, WMREG_EECD, reg);
13206 			CSR_WRITE_FLUSH(sc);
13207 			delay(2);
13208 		}
13209 		/* XXX: end of workaround */
13210 
13211 		/* Set CHIP SELECT. */
13212 		reg |= EECD_CS;
13213 		CSR_WRITE(sc, WMREG_EECD, reg);
13214 		CSR_WRITE_FLUSH(sc);
13215 		delay(2);
13216 
13217 		/* Shift in the READ command. */
13218 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
13219 
13220 		/* Shift in address. */
13221 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
13222 
13223 		/* Shift out the data. */
13224 		wm_eeprom_recvbits(sc, &val, 16);
13225 		data[i] = val & 0xffff;
13226 
13227 		/* Clear CHIP SELECT. */
13228 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
13229 		CSR_WRITE(sc, WMREG_EECD, reg);
13230 		CSR_WRITE_FLUSH(sc);
13231 		delay(2);
13232 	}
13233 
13234 	sc->nvm.release(sc);
13235 	return 0;
13236 }
13237 
13238 /* SPI */
13239 
13240 /*
13241  * Set SPI and FLASH related information from the EECD register.
13242  * For 82541 and 82547, the word size is taken from EEPROM.
13243  */
13244 static int
13245 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
13246 {
13247 	int size;
13248 	uint32_t reg;
13249 	uint16_t data;
13250 
13251 	reg = CSR_READ(sc, WMREG_EECD);
13252 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
13253 
13254 	/* Read the size of NVM from EECD by default */
13255 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
13256 	switch (sc->sc_type) {
13257 	case WM_T_82541:
13258 	case WM_T_82541_2:
13259 	case WM_T_82547:
13260 	case WM_T_82547_2:
13261 		/* Set dummy value to access EEPROM */
13262 		sc->sc_nvm_wordsize = 64;
13263 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
13264 			aprint_error_dev(sc->sc_dev,
13265 			    "%s: failed to read EEPROM size\n", __func__);
13266 		}
13267 		reg = data;
13268 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
13269 		if (size == 0)
13270 			size = 6; /* 64 word size */
13271 		else
13272 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
13273 		break;
13274 	case WM_T_80003:
13275 	case WM_T_82571:
13276 	case WM_T_82572:
13277 	case WM_T_82573: /* SPI case */
13278 	case WM_T_82574: /* SPI case */
13279 	case WM_T_82583: /* SPI case */
13280 		size += NVM_WORD_SIZE_BASE_SHIFT;
13281 		if (size > 14)
13282 			size = 14;
13283 		break;
13284 	case WM_T_82575:
13285 	case WM_T_82576:
13286 	case WM_T_82580:
13287 	case WM_T_I350:
13288 	case WM_T_I354:
13289 	case WM_T_I210:
13290 	case WM_T_I211:
13291 		size += NVM_WORD_SIZE_BASE_SHIFT;
13292 		if (size > 15)
13293 			size = 15;
13294 		break;
13295 	default:
13296 		aprint_error_dev(sc->sc_dev,
13297 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
13298 		return -1;
13299 		break;
13300 	}
13301 
13302 	sc->sc_nvm_wordsize = 1 << size;
13303 
13304 	return 0;
13305 }
13306 
13307 /*
13308  * wm_nvm_ready_spi:
13309  *
13310  *	Wait for a SPI EEPROM to be ready for commands.
13311  */
13312 static int
13313 wm_nvm_ready_spi(struct wm_softc *sc)
13314 {
13315 	uint32_t val;
13316 	int usec;
13317 
13318 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13319 		device_xname(sc->sc_dev), __func__));
13320 
13321 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
13322 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
13323 		wm_eeprom_recvbits(sc, &val, 8);
13324 		if ((val & SPI_SR_RDY) == 0)
13325 			break;
13326 	}
13327 	if (usec >= SPI_MAX_RETRIES) {
13328 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
13329 		return -1;
13330 	}
13331 	return 0;
13332 }
13333 
13334 /*
13335  * wm_nvm_read_spi:
13336  *
13337  *	Read a work from the EEPROM using the SPI protocol.
13338  */
13339 static int
13340 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
13341 {
13342 	uint32_t reg, val;
13343 	int i;
13344 	uint8_t opc;
13345 	int rv = 0;
13346 
13347 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13348 		device_xname(sc->sc_dev), __func__));
13349 
13350 	if (sc->nvm.acquire(sc) != 0)
13351 		return -1;
13352 
13353 	/* Clear SK and CS. */
13354 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
13355 	CSR_WRITE(sc, WMREG_EECD, reg);
13356 	CSR_WRITE_FLUSH(sc);
13357 	delay(2);
13358 
13359 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
13360 		goto out;
13361 
13362 	/* Toggle CS to flush commands. */
13363 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
13364 	CSR_WRITE_FLUSH(sc);
13365 	delay(2);
13366 	CSR_WRITE(sc, WMREG_EECD, reg);
13367 	CSR_WRITE_FLUSH(sc);
13368 	delay(2);
13369 
13370 	opc = SPI_OPC_READ;
13371 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
13372 		opc |= SPI_OPC_A8;
13373 
13374 	wm_eeprom_sendbits(sc, opc, 8);
13375 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
13376 
13377 	for (i = 0; i < wordcnt; i++) {
13378 		wm_eeprom_recvbits(sc, &val, 16);
13379 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
13380 	}
13381 
13382 	/* Raise CS and clear SK. */
13383 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
13384 	CSR_WRITE(sc, WMREG_EECD, reg);
13385 	CSR_WRITE_FLUSH(sc);
13386 	delay(2);
13387 
13388 out:
13389 	sc->nvm.release(sc);
13390 	return rv;
13391 }
13392 
13393 /* Using with EERD */
13394 
13395 static int
13396 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
13397 {
13398 	uint32_t attempts = 100000;
13399 	uint32_t i, reg = 0;
13400 	int32_t done = -1;
13401 
13402 	for (i = 0; i < attempts; i++) {
13403 		reg = CSR_READ(sc, rw);
13404 
13405 		if (reg & EERD_DONE) {
13406 			done = 0;
13407 			break;
13408 		}
13409 		delay(5);
13410 	}
13411 
13412 	return done;
13413 }
13414 
13415 static int
13416 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
13417 {
13418 	int i, eerd = 0;
13419 	int rv = 0;
13420 
13421 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13422 		device_xname(sc->sc_dev), __func__));
13423 
13424 	if (sc->nvm.acquire(sc) != 0)
13425 		return -1;
13426 
13427 	for (i = 0; i < wordcnt; i++) {
13428 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
13429 		CSR_WRITE(sc, WMREG_EERD, eerd);
13430 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
13431 		if (rv != 0) {
13432 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
13433 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
13434 			break;
13435 		}
13436 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
13437 	}
13438 
13439 	sc->nvm.release(sc);
13440 	return rv;
13441 }
13442 
13443 /* Flash */
13444 
13445 static int
13446 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
13447 {
13448 	uint32_t eecd;
13449 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
13450 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
13451 	uint32_t nvm_dword = 0;
13452 	uint8_t sig_byte = 0;
13453 	int rv;
13454 
13455 	switch (sc->sc_type) {
13456 	case WM_T_PCH_SPT:
13457 	case WM_T_PCH_CNP:
13458 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
13459 		act_offset = ICH_NVM_SIG_WORD * 2;
13460 
13461 		/* Set bank to 0 in case flash read fails. */
13462 		*bank = 0;
13463 
13464 		/* Check bank 0 */
13465 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
13466 		if (rv != 0)
13467 			return rv;
13468 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
13469 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13470 			*bank = 0;
13471 			return 0;
13472 		}
13473 
13474 		/* Check bank 1 */
13475 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
13476 		    &nvm_dword);
13477 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
13478 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13479 			*bank = 1;
13480 			return 0;
13481 		}
13482 		aprint_error_dev(sc->sc_dev,
13483 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
13484 		return -1;
13485 	case WM_T_ICH8:
13486 	case WM_T_ICH9:
13487 		eecd = CSR_READ(sc, WMREG_EECD);
13488 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
13489 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
13490 			return 0;
13491 		}
13492 		/* FALLTHROUGH */
13493 	default:
13494 		/* Default to 0 */
13495 		*bank = 0;
13496 
13497 		/* Check bank 0 */
13498 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
13499 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13500 			*bank = 0;
13501 			return 0;
13502 		}
13503 
13504 		/* Check bank 1 */
13505 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
13506 		    &sig_byte);
13507 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13508 			*bank = 1;
13509 			return 0;
13510 		}
13511 	}
13512 
13513 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
13514 		device_xname(sc->sc_dev)));
13515 	return -1;
13516 }
13517 
13518 /******************************************************************************
13519  * This function does initial flash setup so that a new read/write/erase cycle
13520  * can be started.
13521  *
13522  * sc - The pointer to the hw structure
13523  ****************************************************************************/
13524 static int32_t
13525 wm_ich8_cycle_init(struct wm_softc *sc)
13526 {
13527 	uint16_t hsfsts;
13528 	int32_t error = 1;
13529 	int32_t i     = 0;
13530 
13531 	if (sc->sc_type >= WM_T_PCH_SPT)
13532 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
13533 	else
13534 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
13535 
13536 	/* May be check the Flash Des Valid bit in Hw status */
13537 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
13538 		return error;
13539 
13540 	/* Clear FCERR in Hw status by writing 1 */
13541 	/* Clear DAEL in Hw status by writing a 1 */
13542 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
13543 
13544 	if (sc->sc_type >= WM_T_PCH_SPT)
13545 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
13546 	else
13547 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
13548 
13549 	/*
13550 	 * Either we should have a hardware SPI cycle in progress bit to check
13551 	 * against, in order to start a new cycle or FDONE bit should be
13552 	 * changed in the hardware so that it is 1 after hardware reset, which
13553 	 * can then be used as an indication whether a cycle is in progress or
13554 	 * has been completed .. we should also have some software semaphore
13555 	 * mechanism to guard FDONE or the cycle in progress bit so that two
13556 	 * threads access to those bits can be sequentiallized or a way so that
13557 	 * 2 threads don't start the cycle at the same time
13558 	 */
13559 
13560 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
13561 		/*
13562 		 * There is no cycle running at present, so we can start a
13563 		 * cycle
13564 		 */
13565 
13566 		/* Begin by setting Flash Cycle Done. */
13567 		hsfsts |= HSFSTS_DONE;
13568 		if (sc->sc_type >= WM_T_PCH_SPT)
13569 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13570 			    hsfsts & 0xffffUL);
13571 		else
13572 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
13573 		error = 0;
13574 	} else {
13575 		/*
13576 		 * Otherwise poll for sometime so the current cycle has a
13577 		 * chance to end before giving up.
13578 		 */
13579 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
13580 			if (sc->sc_type >= WM_T_PCH_SPT)
13581 				hsfsts = ICH8_FLASH_READ32(sc,
13582 				    ICH_FLASH_HSFSTS) & 0xffffUL;
13583 			else
13584 				hsfsts = ICH8_FLASH_READ16(sc,
13585 				    ICH_FLASH_HSFSTS);
13586 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
13587 				error = 0;
13588 				break;
13589 			}
13590 			delay(1);
13591 		}
13592 		if (error == 0) {
13593 			/*
13594 			 * Successful in waiting for previous cycle to timeout,
13595 			 * now set the Flash Cycle Done.
13596 			 */
13597 			hsfsts |= HSFSTS_DONE;
13598 			if (sc->sc_type >= WM_T_PCH_SPT)
13599 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13600 				    hsfsts & 0xffffUL);
13601 			else
13602 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
13603 				    hsfsts);
13604 		}
13605 	}
13606 	return error;
13607 }
13608 
13609 /******************************************************************************
13610  * This function starts a flash cycle and waits for its completion
13611  *
13612  * sc - The pointer to the hw structure
13613  ****************************************************************************/
13614 static int32_t
13615 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
13616 {
13617 	uint16_t hsflctl;
13618 	uint16_t hsfsts;
13619 	int32_t error = 1;
13620 	uint32_t i = 0;
13621 
13622 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
13623 	if (sc->sc_type >= WM_T_PCH_SPT)
13624 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
13625 	else
13626 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
13627 	hsflctl |= HSFCTL_GO;
13628 	if (sc->sc_type >= WM_T_PCH_SPT)
13629 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13630 		    (uint32_t)hsflctl << 16);
13631 	else
13632 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
13633 
13634 	/* Wait till FDONE bit is set to 1 */
13635 	do {
13636 		if (sc->sc_type >= WM_T_PCH_SPT)
13637 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
13638 			    & 0xffffUL;
13639 		else
13640 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
13641 		if (hsfsts & HSFSTS_DONE)
13642 			break;
13643 		delay(1);
13644 		i++;
13645 	} while (i < timeout);
13646 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
13647 		error = 0;
13648 
13649 	return error;
13650 }
13651 
13652 /******************************************************************************
13653  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
13654  *
13655  * sc - The pointer to the hw structure
13656  * index - The index of the byte or word to read.
13657  * size - Size of data to read, 1=byte 2=word, 4=dword
13658  * data - Pointer to the word to store the value read.
13659  *****************************************************************************/
13660 static int32_t
13661 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
13662     uint32_t size, uint32_t *data)
13663 {
13664 	uint16_t hsfsts;
13665 	uint16_t hsflctl;
13666 	uint32_t flash_linear_address;
13667 	uint32_t flash_data = 0;
13668 	int32_t error = 1;
13669 	int32_t count = 0;
13670 
13671 	if (size < 1  || size > 4 || data == 0x0 ||
13672 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
13673 		return error;
13674 
13675 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
13676 	    sc->sc_ich8_flash_base;
13677 
13678 	do {
13679 		delay(1);
13680 		/* Steps */
13681 		error = wm_ich8_cycle_init(sc);
13682 		if (error)
13683 			break;
13684 
13685 		if (sc->sc_type >= WM_T_PCH_SPT)
13686 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
13687 			    >> 16;
13688 		else
13689 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
13690 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
13691 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
13692 		    & HSFCTL_BCOUNT_MASK;
13693 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
13694 		if (sc->sc_type >= WM_T_PCH_SPT) {
13695 			/*
13696 			 * In SPT, This register is in Lan memory space, not
13697 			 * flash. Therefore, only 32 bit access is supported.
13698 			 */
13699 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13700 			    (uint32_t)hsflctl << 16);
13701 		} else
13702 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
13703 
13704 		/*
13705 		 * Write the last 24 bits of index into Flash Linear address
13706 		 * field in Flash Address
13707 		 */
13708 		/* TODO: TBD maybe check the index against the size of flash */
13709 
13710 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
13711 
13712 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
13713 
13714 		/*
13715 		 * Check if FCERR is set to 1, if set to 1, clear it and try
13716 		 * the whole sequence a few more times, else read in (shift in)
13717 		 * the Flash Data0, the order is least significant byte first
13718 		 * msb to lsb
13719 		 */
13720 		if (error == 0) {
13721 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
13722 			if (size == 1)
13723 				*data = (uint8_t)(flash_data & 0x000000FF);
13724 			else if (size == 2)
13725 				*data = (uint16_t)(flash_data & 0x0000FFFF);
13726 			else if (size == 4)
13727 				*data = (uint32_t)flash_data;
13728 			break;
13729 		} else {
13730 			/*
13731 			 * If we've gotten here, then things are probably
13732 			 * completely hosed, but if the error condition is
13733 			 * detected, it won't hurt to give it another try...
13734 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
13735 			 */
13736 			if (sc->sc_type >= WM_T_PCH_SPT)
13737 				hsfsts = ICH8_FLASH_READ32(sc,
13738 				    ICH_FLASH_HSFSTS) & 0xffffUL;
13739 			else
13740 				hsfsts = ICH8_FLASH_READ16(sc,
13741 				    ICH_FLASH_HSFSTS);
13742 
13743 			if (hsfsts & HSFSTS_ERR) {
13744 				/* Repeat for some time before giving up. */
13745 				continue;
13746 			} else if ((hsfsts & HSFSTS_DONE) == 0)
13747 				break;
13748 		}
13749 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
13750 
13751 	return error;
13752 }
13753 
13754 /******************************************************************************
13755  * Reads a single byte from the NVM using the ICH8 flash access registers.
13756  *
13757  * sc - pointer to wm_hw structure
13758  * index - The index of the byte to read.
13759  * data - Pointer to a byte to store the value read.
13760  *****************************************************************************/
13761 static int32_t
13762 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
13763 {
13764 	int32_t status;
13765 	uint32_t word = 0;
13766 
13767 	status = wm_read_ich8_data(sc, index, 1, &word);
13768 	if (status == 0)
13769 		*data = (uint8_t)word;
13770 	else
13771 		*data = 0;
13772 
13773 	return status;
13774 }
13775 
13776 /******************************************************************************
13777  * Reads a word from the NVM using the ICH8 flash access registers.
13778  *
13779  * sc - pointer to wm_hw structure
13780  * index - The starting byte index of the word to read.
13781  * data - Pointer to a word to store the value read.
13782  *****************************************************************************/
13783 static int32_t
13784 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
13785 {
13786 	int32_t status;
13787 	uint32_t word = 0;
13788 
13789 	status = wm_read_ich8_data(sc, index, 2, &word);
13790 	if (status == 0)
13791 		*data = (uint16_t)word;
13792 	else
13793 		*data = 0;
13794 
13795 	return status;
13796 }
13797 
13798 /******************************************************************************
13799  * Reads a dword from the NVM using the ICH8 flash access registers.
13800  *
13801  * sc - pointer to wm_hw structure
13802  * index - The starting byte index of the word to read.
13803  * data - Pointer to a word to store the value read.
13804  *****************************************************************************/
13805 static int32_t
13806 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
13807 {
13808 	int32_t status;
13809 
13810 	status = wm_read_ich8_data(sc, index, 4, data);
13811 	return status;
13812 }
13813 
13814 /******************************************************************************
13815  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
13816  * register.
13817  *
13818  * sc - Struct containing variables accessed by shared code
13819  * offset - offset of word in the EEPROM to read
13820  * data - word read from the EEPROM
13821  * words - number of words to read
13822  *****************************************************************************/
13823 static int
13824 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
13825 {
13826 	int32_t	 rv = 0;
13827 	uint32_t flash_bank = 0;
13828 	uint32_t act_offset = 0;
13829 	uint32_t bank_offset = 0;
13830 	uint16_t word = 0;
13831 	uint16_t i = 0;
13832 
13833 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13834 		device_xname(sc->sc_dev), __func__));
13835 
13836 	if (sc->nvm.acquire(sc) != 0)
13837 		return -1;
13838 
13839 	/*
13840 	 * We need to know which is the valid flash bank.  In the event
13841 	 * that we didn't allocate eeprom_shadow_ram, we may not be
13842 	 * managing flash_bank. So it cannot be trusted and needs
13843 	 * to be updated with each read.
13844 	 */
13845 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
13846 	if (rv) {
13847 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
13848 			device_xname(sc->sc_dev)));
13849 		flash_bank = 0;
13850 	}
13851 
13852 	/*
13853 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
13854 	 * size
13855 	 */
13856 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
13857 
13858 	for (i = 0; i < words; i++) {
13859 		/* The NVM part needs a byte offset, hence * 2 */
13860 		act_offset = bank_offset + ((offset + i) * 2);
13861 		rv = wm_read_ich8_word(sc, act_offset, &word);
13862 		if (rv) {
13863 			aprint_error_dev(sc->sc_dev,
13864 			    "%s: failed to read NVM\n", __func__);
13865 			break;
13866 		}
13867 		data[i] = word;
13868 	}
13869 
13870 	sc->nvm.release(sc);
13871 	return rv;
13872 }
13873 
13874 /******************************************************************************
13875  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
13876  * register.
13877  *
13878  * sc - Struct containing variables accessed by shared code
13879  * offset - offset of word in the EEPROM to read
13880  * data - word read from the EEPROM
13881  * words - number of words to read
13882  *****************************************************************************/
13883 static int
13884 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
13885 {
13886 	int32_t	 rv = 0;
13887 	uint32_t flash_bank = 0;
13888 	uint32_t act_offset = 0;
13889 	uint32_t bank_offset = 0;
13890 	uint32_t dword = 0;
13891 	uint16_t i = 0;
13892 
13893 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13894 		device_xname(sc->sc_dev), __func__));
13895 
13896 	if (sc->nvm.acquire(sc) != 0)
13897 		return -1;
13898 
13899 	/*
13900 	 * We need to know which is the valid flash bank.  In the event
13901 	 * that we didn't allocate eeprom_shadow_ram, we may not be
13902 	 * managing flash_bank. So it cannot be trusted and needs
13903 	 * to be updated with each read.
13904 	 */
13905 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
13906 	if (rv) {
13907 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
13908 			device_xname(sc->sc_dev)));
13909 		flash_bank = 0;
13910 	}
13911 
13912 	/*
13913 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
13914 	 * size
13915 	 */
13916 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
13917 
13918 	for (i = 0; i < words; i++) {
13919 		/* The NVM part needs a byte offset, hence * 2 */
13920 		act_offset = bank_offset + ((offset + i) * 2);
13921 		/* but we must read dword aligned, so mask ... */
13922 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
13923 		if (rv) {
13924 			aprint_error_dev(sc->sc_dev,
13925 			    "%s: failed to read NVM\n", __func__);
13926 			break;
13927 		}
13928 		/* ... and pick out low or high word */
13929 		if ((act_offset & 0x2) == 0)
13930 			data[i] = (uint16_t)(dword & 0xFFFF);
13931 		else
13932 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
13933 	}
13934 
13935 	sc->nvm.release(sc);
13936 	return rv;
13937 }
13938 
13939 /* iNVM */
13940 
13941 static int
13942 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
13943 {
13944 	int32_t	 rv = 0;
13945 	uint32_t invm_dword;
13946 	uint16_t i;
13947 	uint8_t record_type, word_address;
13948 
13949 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13950 		device_xname(sc->sc_dev), __func__));
13951 
13952 	for (i = 0; i < INVM_SIZE; i++) {
13953 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
13954 		/* Get record type */
13955 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
13956 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
13957 			break;
13958 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
13959 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
13960 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
13961 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
13962 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
13963 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
13964 			if (word_address == address) {
13965 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
13966 				rv = 0;
13967 				break;
13968 			}
13969 		}
13970 	}
13971 
13972 	return rv;
13973 }
13974 
13975 static int
13976 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
13977 {
13978 	int rv = 0;
13979 	int i;
13980 
13981 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13982 		device_xname(sc->sc_dev), __func__));
13983 
13984 	if (sc->nvm.acquire(sc) != 0)
13985 		return -1;
13986 
13987 	for (i = 0; i < words; i++) {
13988 		switch (offset + i) {
13989 		case NVM_OFF_MACADDR:
13990 		case NVM_OFF_MACADDR1:
13991 		case NVM_OFF_MACADDR2:
13992 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
13993 			if (rv != 0) {
13994 				data[i] = 0xffff;
13995 				rv = -1;
13996 			}
13997 			break;
13998 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
13999 			rv = wm_nvm_read_word_invm(sc, offset, data);
14000 			if (rv != 0) {
14001 				*data = INVM_DEFAULT_AL;
14002 				rv = 0;
14003 			}
14004 			break;
14005 		case NVM_OFF_CFG2:
14006 			rv = wm_nvm_read_word_invm(sc, offset, data);
14007 			if (rv != 0) {
14008 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
14009 				rv = 0;
14010 			}
14011 			break;
14012 		case NVM_OFF_CFG4:
14013 			rv = wm_nvm_read_word_invm(sc, offset, data);
14014 			if (rv != 0) {
14015 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
14016 				rv = 0;
14017 			}
14018 			break;
14019 		case NVM_OFF_LED_1_CFG:
14020 			rv = wm_nvm_read_word_invm(sc, offset, data);
14021 			if (rv != 0) {
14022 				*data = NVM_LED_1_CFG_DEFAULT_I211;
14023 				rv = 0;
14024 			}
14025 			break;
14026 		case NVM_OFF_LED_0_2_CFG:
14027 			rv = wm_nvm_read_word_invm(sc, offset, data);
14028 			if (rv != 0) {
14029 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
14030 				rv = 0;
14031 			}
14032 			break;
14033 		case NVM_OFF_ID_LED_SETTINGS:
14034 			rv = wm_nvm_read_word_invm(sc, offset, data);
14035 			if (rv != 0) {
14036 				*data = ID_LED_RESERVED_FFFF;
14037 				rv = 0;
14038 			}
14039 			break;
14040 		default:
14041 			DPRINTF(sc, WM_DEBUG_NVM,
14042 			    ("NVM word 0x%02x is not mapped.\n", offset));
14043 			*data = NVM_RESERVED_WORD;
14044 			break;
14045 		}
14046 	}
14047 
14048 	sc->nvm.release(sc);
14049 	return rv;
14050 }
14051 
14052 /* Lock, detecting NVM type, validate checksum, version and read */
14053 
14054 static int
14055 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
14056 {
14057 	uint32_t eecd = 0;
14058 
14059 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
14060 	    || sc->sc_type == WM_T_82583) {
14061 		eecd = CSR_READ(sc, WMREG_EECD);
14062 
14063 		/* Isolate bits 15 & 16 */
14064 		eecd = ((eecd >> 15) & 0x03);
14065 
14066 		/* If both bits are set, device is Flash type */
14067 		if (eecd == 0x03)
14068 			return 0;
14069 	}
14070 	return 1;
14071 }
14072 
14073 static int
14074 wm_nvm_flash_presence_i210(struct wm_softc *sc)
14075 {
14076 	uint32_t eec;
14077 
14078 	eec = CSR_READ(sc, WMREG_EEC);
14079 	if ((eec & EEC_FLASH_DETECTED) != 0)
14080 		return 1;
14081 
14082 	return 0;
14083 }
14084 
14085 /*
14086  * wm_nvm_validate_checksum
14087  *
14088  * The checksum is defined as the sum of the first 64 (16 bit) words.
14089  */
14090 static int
14091 wm_nvm_validate_checksum(struct wm_softc *sc)
14092 {
14093 	uint16_t checksum;
14094 	uint16_t eeprom_data;
14095 #ifdef WM_DEBUG
14096 	uint16_t csum_wordaddr, valid_checksum;
14097 #endif
14098 	int i;
14099 
14100 	checksum = 0;
14101 
14102 	/* Don't check for I211 */
14103 	if (sc->sc_type == WM_T_I211)
14104 		return 0;
14105 
14106 #ifdef WM_DEBUG
14107 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
14108 	    || (sc->sc_type == WM_T_PCH_CNP)) {
14109 		csum_wordaddr = NVM_OFF_COMPAT;
14110 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
14111 	} else {
14112 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
14113 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
14114 	}
14115 
14116 	/* Dump EEPROM image for debug */
14117 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
14118 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
14119 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
14120 		/* XXX PCH_SPT? */
14121 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
14122 		if ((eeprom_data & valid_checksum) == 0)
14123 			DPRINTF(sc, WM_DEBUG_NVM,
14124 			    ("%s: NVM need to be updated (%04x != %04x)\n",
14125 				device_xname(sc->sc_dev), eeprom_data,
14126 				    valid_checksum));
14127 	}
14128 
14129 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
14130 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
14131 		for (i = 0; i < NVM_SIZE; i++) {
14132 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
14133 				printf("XXXX ");
14134 			else
14135 				printf("%04hx ", eeprom_data);
14136 			if (i % 8 == 7)
14137 				printf("\n");
14138 		}
14139 	}
14140 
14141 #endif /* WM_DEBUG */
14142 
14143 	for (i = 0; i < NVM_SIZE; i++) {
14144 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
14145 			return 1;
14146 		checksum += eeprom_data;
14147 	}
14148 
14149 	if (checksum != (uint16_t) NVM_CHECKSUM) {
14150 #ifdef WM_DEBUG
14151 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
14152 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
14153 #endif
14154 	}
14155 
14156 	return 0;
14157 }
14158 
14159 static void
14160 wm_nvm_version_invm(struct wm_softc *sc)
14161 {
14162 	uint32_t dword;
14163 
14164 	/*
14165 	 * Linux's code to decode version is very strange, so we don't
14166 	 * obey that algorithm and just use word 61 as the document.
14167 	 * Perhaps it's not perfect though...
14168 	 *
14169 	 * Example:
14170 	 *
14171 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
14172 	 */
14173 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
14174 	dword = __SHIFTOUT(dword, INVM_VER_1);
14175 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
14176 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
14177 }
14178 
14179 static void
14180 wm_nvm_version(struct wm_softc *sc)
14181 {
14182 	uint16_t major, minor, build, patch;
14183 	uint16_t uid0, uid1;
14184 	uint16_t nvm_data;
14185 	uint16_t off;
14186 	bool check_version = false;
14187 	bool check_optionrom = false;
14188 	bool have_build = false;
14189 	bool have_uid = true;
14190 
14191 	/*
14192 	 * Version format:
14193 	 *
14194 	 * XYYZ
14195 	 * X0YZ
14196 	 * X0YY
14197 	 *
14198 	 * Example:
14199 	 *
14200 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
14201 	 *	82571	0x50a6	5.10.6?
14202 	 *	82572	0x506a	5.6.10?
14203 	 *	82572EI	0x5069	5.6.9?
14204 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
14205 	 *		0x2013	2.1.3?
14206 	 *	82583	0x10a0	1.10.0? (document says it's default value)
14207 	 * ICH8+82567	0x0040	0.4.0?
14208 	 * ICH9+82566	0x1040	1.4.0?
14209 	 *ICH10+82567	0x0043	0.4.3?
14210 	 *  PCH+82577	0x00c1	0.12.1?
14211 	 * PCH2+82579	0x00d3	0.13.3?
14212 	 *		0x00d4	0.13.4?
14213 	 *  LPT+I218	0x0023	0.2.3?
14214 	 *  SPT+I219	0x0084	0.8.4?
14215 	 *  CNP+I219	0x0054	0.5.4?
14216 	 */
14217 
14218 	/*
14219 	 * XXX
14220 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
14221 	 * I've never seen on real 82574 hardware with such small SPI ROM.
14222 	 */
14223 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
14224 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
14225 		have_uid = false;
14226 
14227 	switch (sc->sc_type) {
14228 	case WM_T_82571:
14229 	case WM_T_82572:
14230 	case WM_T_82574:
14231 	case WM_T_82583:
14232 		check_version = true;
14233 		check_optionrom = true;
14234 		have_build = true;
14235 		break;
14236 	case WM_T_ICH8:
14237 	case WM_T_ICH9:
14238 	case WM_T_ICH10:
14239 	case WM_T_PCH:
14240 	case WM_T_PCH2:
14241 	case WM_T_PCH_LPT:
14242 	case WM_T_PCH_SPT:
14243 	case WM_T_PCH_CNP:
14244 		check_version = true;
14245 		have_build = true;
14246 		have_uid = false;
14247 		break;
14248 	case WM_T_82575:
14249 	case WM_T_82576:
14250 	case WM_T_82580:
14251 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
14252 			check_version = true;
14253 		break;
14254 	case WM_T_I211:
14255 		wm_nvm_version_invm(sc);
14256 		have_uid = false;
14257 		goto printver;
14258 	case WM_T_I210:
14259 		if (!wm_nvm_flash_presence_i210(sc)) {
14260 			wm_nvm_version_invm(sc);
14261 			have_uid = false;
14262 			goto printver;
14263 		}
14264 		/* FALLTHROUGH */
14265 	case WM_T_I350:
14266 	case WM_T_I354:
14267 		check_version = true;
14268 		check_optionrom = true;
14269 		break;
14270 	default:
14271 		return;
14272 	}
14273 	if (check_version
14274 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
14275 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
14276 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
14277 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
14278 			build = nvm_data & NVM_BUILD_MASK;
14279 			have_build = true;
14280 		} else
14281 			minor = nvm_data & 0x00ff;
14282 
14283 		/* Decimal */
14284 		minor = (minor / 16) * 10 + (minor % 16);
14285 		sc->sc_nvm_ver_major = major;
14286 		sc->sc_nvm_ver_minor = minor;
14287 
14288 printver:
14289 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
14290 		    sc->sc_nvm_ver_minor);
14291 		if (have_build) {
14292 			sc->sc_nvm_ver_build = build;
14293 			aprint_verbose(".%d", build);
14294 		}
14295 	}
14296 
14297 	/* Assume the Option ROM area is at avove NVM_SIZE */
14298 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
14299 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
14300 		/* Option ROM Version */
14301 		if ((off != 0x0000) && (off != 0xffff)) {
14302 			int rv;
14303 
14304 			off += NVM_COMBO_VER_OFF;
14305 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
14306 			rv |= wm_nvm_read(sc, off, 1, &uid0);
14307 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
14308 			    && (uid1 != 0) && (uid1 != 0xffff)) {
14309 				/* 16bits */
14310 				major = uid0 >> 8;
14311 				build = (uid0 << 8) | (uid1 >> 8);
14312 				patch = uid1 & 0x00ff;
14313 				aprint_verbose(", option ROM Version %d.%d.%d",
14314 				    major, build, patch);
14315 			}
14316 		}
14317 	}
14318 
14319 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
14320 		aprint_verbose(", Image Unique ID %08x",
14321 		    ((uint32_t)uid1 << 16) | uid0);
14322 }
14323 
14324 /*
14325  * wm_nvm_read:
14326  *
14327  *	Read data from the serial EEPROM.
14328  */
14329 static int
14330 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
14331 {
14332 	int rv;
14333 
14334 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14335 		device_xname(sc->sc_dev), __func__));
14336 
14337 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
14338 		return -1;
14339 
14340 	rv = sc->nvm.read(sc, word, wordcnt, data);
14341 
14342 	return rv;
14343 }
14344 
14345 /*
14346  * Hardware semaphores.
14347  * Very complexed...
14348  */
14349 
14350 static int
14351 wm_get_null(struct wm_softc *sc)
14352 {
14353 
14354 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14355 		device_xname(sc->sc_dev), __func__));
14356 	return 0;
14357 }
14358 
14359 static void
14360 wm_put_null(struct wm_softc *sc)
14361 {
14362 
14363 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14364 		device_xname(sc->sc_dev), __func__));
14365 	return;
14366 }
14367 
14368 static int
14369 wm_get_eecd(struct wm_softc *sc)
14370 {
14371 	uint32_t reg;
14372 	int x;
14373 
14374 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
14375 		device_xname(sc->sc_dev), __func__));
14376 
14377 	reg = CSR_READ(sc, WMREG_EECD);
14378 
14379 	/* Request EEPROM access. */
14380 	reg |= EECD_EE_REQ;
14381 	CSR_WRITE(sc, WMREG_EECD, reg);
14382 
14383 	/* ..and wait for it to be granted. */
14384 	for (x = 0; x < 1000; x++) {
14385 		reg = CSR_READ(sc, WMREG_EECD);
14386 		if (reg & EECD_EE_GNT)
14387 			break;
14388 		delay(5);
14389 	}
14390 	if ((reg & EECD_EE_GNT) == 0) {
14391 		aprint_error_dev(sc->sc_dev,
14392 		    "could not acquire EEPROM GNT\n");
14393 		reg &= ~EECD_EE_REQ;
14394 		CSR_WRITE(sc, WMREG_EECD, reg);
14395 		return -1;
14396 	}
14397 
14398 	return 0;
14399 }
14400 
14401 static void
14402 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
14403 {
14404 
14405 	*eecd |= EECD_SK;
14406 	CSR_WRITE(sc, WMREG_EECD, *eecd);
14407 	CSR_WRITE_FLUSH(sc);
14408 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
14409 		delay(1);
14410 	else
14411 		delay(50);
14412 }
14413 
14414 static void
14415 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
14416 {
14417 
14418 	*eecd &= ~EECD_SK;
14419 	CSR_WRITE(sc, WMREG_EECD, *eecd);
14420 	CSR_WRITE_FLUSH(sc);
14421 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
14422 		delay(1);
14423 	else
14424 		delay(50);
14425 }
14426 
14427 static void
14428 wm_put_eecd(struct wm_softc *sc)
14429 {
14430 	uint32_t reg;
14431 
14432 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14433 		device_xname(sc->sc_dev), __func__));
14434 
14435 	/* Stop nvm */
14436 	reg = CSR_READ(sc, WMREG_EECD);
14437 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
14438 		/* Pull CS high */
14439 		reg |= EECD_CS;
14440 		wm_nvm_eec_clock_lower(sc, &reg);
14441 	} else {
14442 		/* CS on Microwire is active-high */
14443 		reg &= ~(EECD_CS | EECD_DI);
14444 		CSR_WRITE(sc, WMREG_EECD, reg);
14445 		wm_nvm_eec_clock_raise(sc, &reg);
14446 		wm_nvm_eec_clock_lower(sc, &reg);
14447 	}
14448 
14449 	reg = CSR_READ(sc, WMREG_EECD);
14450 	reg &= ~EECD_EE_REQ;
14451 	CSR_WRITE(sc, WMREG_EECD, reg);
14452 
14453 	return;
14454 }
14455 
14456 /*
14457  * Get hardware semaphore.
14458  * Same as e1000_get_hw_semaphore_generic()
14459  */
14460 static int
14461 wm_get_swsm_semaphore(struct wm_softc *sc)
14462 {
14463 	int32_t timeout;
14464 	uint32_t swsm;
14465 
14466 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14467 		device_xname(sc->sc_dev), __func__));
14468 	KASSERT(sc->sc_nvm_wordsize > 0);
14469 
14470 retry:
14471 	/* Get the SW semaphore. */
14472 	timeout = sc->sc_nvm_wordsize + 1;
14473 	while (timeout) {
14474 		swsm = CSR_READ(sc, WMREG_SWSM);
14475 
14476 		if ((swsm & SWSM_SMBI) == 0)
14477 			break;
14478 
14479 		delay(50);
14480 		timeout--;
14481 	}
14482 
14483 	if (timeout == 0) {
14484 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
14485 			/*
14486 			 * In rare circumstances, the SW semaphore may already
14487 			 * be held unintentionally. Clear the semaphore once
14488 			 * before giving up.
14489 			 */
14490 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
14491 			wm_put_swsm_semaphore(sc);
14492 			goto retry;
14493 		}
14494 		aprint_error_dev(sc->sc_dev,
14495 		    "could not acquire SWSM SMBI\n");
14496 		return 1;
14497 	}
14498 
14499 	/* Get the FW semaphore. */
14500 	timeout = sc->sc_nvm_wordsize + 1;
14501 	while (timeout) {
14502 		swsm = CSR_READ(sc, WMREG_SWSM);
14503 		swsm |= SWSM_SWESMBI;
14504 		CSR_WRITE(sc, WMREG_SWSM, swsm);
14505 		/* If we managed to set the bit we got the semaphore. */
14506 		swsm = CSR_READ(sc, WMREG_SWSM);
14507 		if (swsm & SWSM_SWESMBI)
14508 			break;
14509 
14510 		delay(50);
14511 		timeout--;
14512 	}
14513 
14514 	if (timeout == 0) {
14515 		aprint_error_dev(sc->sc_dev,
14516 		    "could not acquire SWSM SWESMBI\n");
14517 		/* Release semaphores */
14518 		wm_put_swsm_semaphore(sc);
14519 		return 1;
14520 	}
14521 	return 0;
14522 }
14523 
14524 /*
14525  * Put hardware semaphore.
14526  * Same as e1000_put_hw_semaphore_generic()
14527  */
14528 static void
14529 wm_put_swsm_semaphore(struct wm_softc *sc)
14530 {
14531 	uint32_t swsm;
14532 
14533 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14534 		device_xname(sc->sc_dev), __func__));
14535 
14536 	swsm = CSR_READ(sc, WMREG_SWSM);
14537 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
14538 	CSR_WRITE(sc, WMREG_SWSM, swsm);
14539 }
14540 
14541 /*
14542  * Get SW/FW semaphore.
14543  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
14544  */
14545 static int
14546 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
14547 {
14548 	uint32_t swfw_sync;
14549 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
14550 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
14551 	int timeout;
14552 
14553 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14554 		device_xname(sc->sc_dev), __func__));
14555 
14556 	if (sc->sc_type == WM_T_80003)
14557 		timeout = 50;
14558 	else
14559 		timeout = 200;
14560 
14561 	while (timeout) {
14562 		if (wm_get_swsm_semaphore(sc)) {
14563 			aprint_error_dev(sc->sc_dev,
14564 			    "%s: failed to get semaphore\n",
14565 			    __func__);
14566 			return 1;
14567 		}
14568 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
14569 		if ((swfw_sync & (swmask | fwmask)) == 0) {
14570 			swfw_sync |= swmask;
14571 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
14572 			wm_put_swsm_semaphore(sc);
14573 			return 0;
14574 		}
14575 		wm_put_swsm_semaphore(sc);
14576 		delay(5000);
14577 		timeout--;
14578 	}
14579 	device_printf(sc->sc_dev,
14580 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
14581 	    mask, swfw_sync);
14582 	return 1;
14583 }
14584 
14585 static void
14586 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
14587 {
14588 	uint32_t swfw_sync;
14589 
14590 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14591 		device_xname(sc->sc_dev), __func__));
14592 
14593 	while (wm_get_swsm_semaphore(sc) != 0)
14594 		continue;
14595 
14596 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
14597 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
14598 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
14599 
14600 	wm_put_swsm_semaphore(sc);
14601 }
14602 
14603 static int
14604 wm_get_nvm_80003(struct wm_softc *sc)
14605 {
14606 	int rv;
14607 
14608 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
14609 		device_xname(sc->sc_dev), __func__));
14610 
14611 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
14612 		aprint_error_dev(sc->sc_dev,
14613 		    "%s: failed to get semaphore(SWFW)\n", __func__);
14614 		return rv;
14615 	}
14616 
14617 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14618 	    && (rv = wm_get_eecd(sc)) != 0) {
14619 		aprint_error_dev(sc->sc_dev,
14620 		    "%s: failed to get semaphore(EECD)\n", __func__);
14621 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
14622 		return rv;
14623 	}
14624 
14625 	return 0;
14626 }
14627 
14628 static void
14629 wm_put_nvm_80003(struct wm_softc *sc)
14630 {
14631 
14632 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14633 		device_xname(sc->sc_dev), __func__));
14634 
14635 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14636 		wm_put_eecd(sc);
14637 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
14638 }
14639 
14640 static int
14641 wm_get_nvm_82571(struct wm_softc *sc)
14642 {
14643 	int rv;
14644 
14645 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14646 		device_xname(sc->sc_dev), __func__));
14647 
14648 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
14649 		return rv;
14650 
14651 	switch (sc->sc_type) {
14652 	case WM_T_82573:
14653 		break;
14654 	default:
14655 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14656 			rv = wm_get_eecd(sc);
14657 		break;
14658 	}
14659 
14660 	if (rv != 0) {
14661 		aprint_error_dev(sc->sc_dev,
14662 		    "%s: failed to get semaphore\n",
14663 		    __func__);
14664 		wm_put_swsm_semaphore(sc);
14665 	}
14666 
14667 	return rv;
14668 }
14669 
14670 static void
14671 wm_put_nvm_82571(struct wm_softc *sc)
14672 {
14673 
14674 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14675 		device_xname(sc->sc_dev), __func__));
14676 
14677 	switch (sc->sc_type) {
14678 	case WM_T_82573:
14679 		break;
14680 	default:
14681 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14682 			wm_put_eecd(sc);
14683 		break;
14684 	}
14685 
14686 	wm_put_swsm_semaphore(sc);
14687 }
14688 
14689 static int
14690 wm_get_phy_82575(struct wm_softc *sc)
14691 {
14692 
14693 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14694 		device_xname(sc->sc_dev), __func__));
14695 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
14696 }
14697 
14698 static void
14699 wm_put_phy_82575(struct wm_softc *sc)
14700 {
14701 
14702 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14703 		device_xname(sc->sc_dev), __func__));
14704 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
14705 }
14706 
14707 static int
14708 wm_get_swfwhw_semaphore(struct wm_softc *sc)
14709 {
14710 	uint32_t ext_ctrl;
14711 	int timeout = 200;
14712 
14713 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14714 		device_xname(sc->sc_dev), __func__));
14715 
14716 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
14717 	for (timeout = 0; timeout < 200; timeout++) {
14718 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14719 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
14720 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14721 
14722 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14723 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
14724 			return 0;
14725 		delay(5000);
14726 	}
14727 	device_printf(sc->sc_dev,
14728 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
14729 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
14730 	return 1;
14731 }
14732 
14733 static void
14734 wm_put_swfwhw_semaphore(struct wm_softc *sc)
14735 {
14736 	uint32_t ext_ctrl;
14737 
14738 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14739 		device_xname(sc->sc_dev), __func__));
14740 
14741 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14742 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14743 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14744 
14745 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
14746 }
14747 
14748 static int
14749 wm_get_swflag_ich8lan(struct wm_softc *sc)
14750 {
14751 	uint32_t ext_ctrl;
14752 	int timeout;
14753 
14754 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14755 		device_xname(sc->sc_dev), __func__));
14756 	mutex_enter(sc->sc_ich_phymtx);
14757 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
14758 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14759 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
14760 			break;
14761 		delay(1000);
14762 	}
14763 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
14764 		device_printf(sc->sc_dev,
14765 		    "SW has already locked the resource\n");
14766 		goto out;
14767 	}
14768 
14769 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
14770 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14771 	for (timeout = 0; timeout < 1000; timeout++) {
14772 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14773 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
14774 			break;
14775 		delay(1000);
14776 	}
14777 	if (timeout >= 1000) {
14778 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
14779 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14780 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14781 		goto out;
14782 	}
14783 	return 0;
14784 
14785 out:
14786 	mutex_exit(sc->sc_ich_phymtx);
14787 	return 1;
14788 }
14789 
14790 static void
14791 wm_put_swflag_ich8lan(struct wm_softc *sc)
14792 {
14793 	uint32_t ext_ctrl;
14794 
14795 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14796 		device_xname(sc->sc_dev), __func__));
14797 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14798 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
14799 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14800 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14801 	} else {
14802 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
14803 	}
14804 
14805 	mutex_exit(sc->sc_ich_phymtx);
14806 }
14807 
14808 static int
14809 wm_get_nvm_ich8lan(struct wm_softc *sc)
14810 {
14811 
14812 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14813 		device_xname(sc->sc_dev), __func__));
14814 	mutex_enter(sc->sc_ich_nvmmtx);
14815 
14816 	return 0;
14817 }
14818 
14819 static void
14820 wm_put_nvm_ich8lan(struct wm_softc *sc)
14821 {
14822 
14823 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14824 		device_xname(sc->sc_dev), __func__));
14825 	mutex_exit(sc->sc_ich_nvmmtx);
14826 }
14827 
14828 static int
14829 wm_get_hw_semaphore_82573(struct wm_softc *sc)
14830 {
14831 	int i = 0;
14832 	uint32_t reg;
14833 
14834 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14835 		device_xname(sc->sc_dev), __func__));
14836 
14837 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14838 	do {
14839 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
14840 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
14841 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14842 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
14843 			break;
14844 		delay(2*1000);
14845 		i++;
14846 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
14847 
14848 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
14849 		wm_put_hw_semaphore_82573(sc);
14850 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
14851 		    device_xname(sc->sc_dev));
14852 		return -1;
14853 	}
14854 
14855 	return 0;
14856 }
14857 
14858 static void
14859 wm_put_hw_semaphore_82573(struct wm_softc *sc)
14860 {
14861 	uint32_t reg;
14862 
14863 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14864 		device_xname(sc->sc_dev), __func__));
14865 
14866 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14867 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14868 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
14869 }
14870 
14871 /*
14872  * Management mode and power management related subroutines.
14873  * BMC, AMT, suspend/resume and EEE.
14874  */
14875 
14876 #ifdef WM_WOL
14877 static int
14878 wm_check_mng_mode(struct wm_softc *sc)
14879 {
14880 	int rv;
14881 
14882 	switch (sc->sc_type) {
14883 	case WM_T_ICH8:
14884 	case WM_T_ICH9:
14885 	case WM_T_ICH10:
14886 	case WM_T_PCH:
14887 	case WM_T_PCH2:
14888 	case WM_T_PCH_LPT:
14889 	case WM_T_PCH_SPT:
14890 	case WM_T_PCH_CNP:
14891 		rv = wm_check_mng_mode_ich8lan(sc);
14892 		break;
14893 	case WM_T_82574:
14894 	case WM_T_82583:
14895 		rv = wm_check_mng_mode_82574(sc);
14896 		break;
14897 	case WM_T_82571:
14898 	case WM_T_82572:
14899 	case WM_T_82573:
14900 	case WM_T_80003:
14901 		rv = wm_check_mng_mode_generic(sc);
14902 		break;
14903 	default:
14904 		/* Noting to do */
14905 		rv = 0;
14906 		break;
14907 	}
14908 
14909 	return rv;
14910 }
14911 
14912 static int
14913 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
14914 {
14915 	uint32_t fwsm;
14916 
14917 	fwsm = CSR_READ(sc, WMREG_FWSM);
14918 
14919 	if (((fwsm & FWSM_FW_VALID) != 0)
14920 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
14921 		return 1;
14922 
14923 	return 0;
14924 }
14925 
14926 static int
14927 wm_check_mng_mode_82574(struct wm_softc *sc)
14928 {
14929 	uint16_t data;
14930 
14931 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
14932 
14933 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
14934 		return 1;
14935 
14936 	return 0;
14937 }
14938 
14939 static int
14940 wm_check_mng_mode_generic(struct wm_softc *sc)
14941 {
14942 	uint32_t fwsm;
14943 
14944 	fwsm = CSR_READ(sc, WMREG_FWSM);
14945 
14946 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
14947 		return 1;
14948 
14949 	return 0;
14950 }
14951 #endif /* WM_WOL */
14952 
14953 static int
14954 wm_enable_mng_pass_thru(struct wm_softc *sc)
14955 {
14956 	uint32_t manc, fwsm, factps;
14957 
14958 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
14959 		return 0;
14960 
14961 	manc = CSR_READ(sc, WMREG_MANC);
14962 
14963 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
14964 		device_xname(sc->sc_dev), manc));
14965 	if ((manc & MANC_RECV_TCO_EN) == 0)
14966 		return 0;
14967 
14968 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
14969 		fwsm = CSR_READ(sc, WMREG_FWSM);
14970 		factps = CSR_READ(sc, WMREG_FACTPS);
14971 		if (((factps & FACTPS_MNGCG) == 0)
14972 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
14973 			return 1;
14974 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
14975 		uint16_t data;
14976 
14977 		factps = CSR_READ(sc, WMREG_FACTPS);
14978 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
14979 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
14980 			device_xname(sc->sc_dev), factps, data));
14981 		if (((factps & FACTPS_MNGCG) == 0)
14982 		    && ((data & NVM_CFG2_MNGM_MASK)
14983 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
14984 			return 1;
14985 	} else if (((manc & MANC_SMBUS_EN) != 0)
14986 	    && ((manc & MANC_ASF_EN) == 0))
14987 		return 1;
14988 
14989 	return 0;
14990 }
14991 
14992 static bool
14993 wm_phy_resetisblocked(struct wm_softc *sc)
14994 {
14995 	bool blocked = false;
14996 	uint32_t reg;
14997 	int i = 0;
14998 
14999 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15000 		device_xname(sc->sc_dev), __func__));
15001 
15002 	switch (sc->sc_type) {
15003 	case WM_T_ICH8:
15004 	case WM_T_ICH9:
15005 	case WM_T_ICH10:
15006 	case WM_T_PCH:
15007 	case WM_T_PCH2:
15008 	case WM_T_PCH_LPT:
15009 	case WM_T_PCH_SPT:
15010 	case WM_T_PCH_CNP:
15011 		do {
15012 			reg = CSR_READ(sc, WMREG_FWSM);
15013 			if ((reg & FWSM_RSPCIPHY) == 0) {
15014 				blocked = true;
15015 				delay(10*1000);
15016 				continue;
15017 			}
15018 			blocked = false;
15019 		} while (blocked && (i++ < 30));
15020 		return blocked;
15021 		break;
15022 	case WM_T_82571:
15023 	case WM_T_82572:
15024 	case WM_T_82573:
15025 	case WM_T_82574:
15026 	case WM_T_82583:
15027 	case WM_T_80003:
15028 		reg = CSR_READ(sc, WMREG_MANC);
15029 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
15030 			return true;
15031 		else
15032 			return false;
15033 		break;
15034 	default:
15035 		/* No problem */
15036 		break;
15037 	}
15038 
15039 	return false;
15040 }
15041 
15042 static void
15043 wm_get_hw_control(struct wm_softc *sc)
15044 {
15045 	uint32_t reg;
15046 
15047 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15048 		device_xname(sc->sc_dev), __func__));
15049 
15050 	if (sc->sc_type == WM_T_82573) {
15051 		reg = CSR_READ(sc, WMREG_SWSM);
15052 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
15053 	} else if (sc->sc_type >= WM_T_82571) {
15054 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
15055 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
15056 	}
15057 }
15058 
15059 static void
15060 wm_release_hw_control(struct wm_softc *sc)
15061 {
15062 	uint32_t reg;
15063 
15064 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
15065 		device_xname(sc->sc_dev), __func__));
15066 
15067 	if (sc->sc_type == WM_T_82573) {
15068 		reg = CSR_READ(sc, WMREG_SWSM);
15069 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
15070 	} else if (sc->sc_type >= WM_T_82571) {
15071 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
15072 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
15073 	}
15074 }
15075 
15076 static void
15077 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
15078 {
15079 	uint32_t reg;
15080 
15081 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15082 		device_xname(sc->sc_dev), __func__));
15083 
15084 	if (sc->sc_type < WM_T_PCH2)
15085 		return;
15086 
15087 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
15088 
15089 	if (gate)
15090 		reg |= EXTCNFCTR_GATE_PHY_CFG;
15091 	else
15092 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
15093 
15094 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
15095 }
15096 
15097 static int
15098 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
15099 {
15100 	uint32_t fwsm, reg;
15101 	int rv = 0;
15102 
15103 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15104 		device_xname(sc->sc_dev), __func__));
15105 
15106 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
15107 	wm_gate_hw_phy_config_ich8lan(sc, true);
15108 
15109 	/* Disable ULP */
15110 	wm_ulp_disable(sc);
15111 
15112 	/* Acquire PHY semaphore */
15113 	rv = sc->phy.acquire(sc);
15114 	if (rv != 0) {
15115 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
15116 		device_xname(sc->sc_dev), __func__));
15117 		return -1;
15118 	}
15119 
15120 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
15121 	 * inaccessible and resetting the PHY is not blocked, toggle the
15122 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
15123 	 */
15124 	fwsm = CSR_READ(sc, WMREG_FWSM);
15125 	switch (sc->sc_type) {
15126 	case WM_T_PCH_LPT:
15127 	case WM_T_PCH_SPT:
15128 	case WM_T_PCH_CNP:
15129 		if (wm_phy_is_accessible_pchlan(sc))
15130 			break;
15131 
15132 		/* Before toggling LANPHYPC, see if PHY is accessible by
15133 		 * forcing MAC to SMBus mode first.
15134 		 */
15135 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
15136 		reg |= CTRL_EXT_FORCE_SMBUS;
15137 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15138 #if 0
15139 		/* XXX Isn't this required??? */
15140 		CSR_WRITE_FLUSH(sc);
15141 #endif
15142 		/* Wait 50 milliseconds for MAC to finish any retries
15143 		 * that it might be trying to perform from previous
15144 		 * attempts to acknowledge any phy read requests.
15145 		 */
15146 		delay(50 * 1000);
15147 		/* FALLTHROUGH */
15148 	case WM_T_PCH2:
15149 		if (wm_phy_is_accessible_pchlan(sc) == true)
15150 			break;
15151 		/* FALLTHROUGH */
15152 	case WM_T_PCH:
15153 		if (sc->sc_type == WM_T_PCH)
15154 			if ((fwsm & FWSM_FW_VALID) != 0)
15155 				break;
15156 
15157 		if (wm_phy_resetisblocked(sc) == true) {
15158 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
15159 			break;
15160 		}
15161 
15162 		/* Toggle LANPHYPC Value bit */
15163 		wm_toggle_lanphypc_pch_lpt(sc);
15164 
15165 		if (sc->sc_type >= WM_T_PCH_LPT) {
15166 			if (wm_phy_is_accessible_pchlan(sc) == true)
15167 				break;
15168 
15169 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
15170 			 * so ensure that the MAC is also out of SMBus mode
15171 			 */
15172 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
15173 			reg &= ~CTRL_EXT_FORCE_SMBUS;
15174 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15175 
15176 			if (wm_phy_is_accessible_pchlan(sc) == true)
15177 				break;
15178 			rv = -1;
15179 		}
15180 		break;
15181 	default:
15182 		break;
15183 	}
15184 
15185 	/* Release semaphore */
15186 	sc->phy.release(sc);
15187 
15188 	if (rv == 0) {
15189 		/* Check to see if able to reset PHY.  Print error if not */
15190 		if (wm_phy_resetisblocked(sc)) {
15191 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
15192 			goto out;
15193 		}
15194 
15195 		/* Reset the PHY before any access to it.  Doing so, ensures
15196 		 * that the PHY is in a known good state before we read/write
15197 		 * PHY registers.  The generic reset is sufficient here,
15198 		 * because we haven't determined the PHY type yet.
15199 		 */
15200 		if (wm_reset_phy(sc) != 0)
15201 			goto out;
15202 
15203 		/* On a successful reset, possibly need to wait for the PHY
15204 		 * to quiesce to an accessible state before returning control
15205 		 * to the calling function.  If the PHY does not quiesce, then
15206 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
15207 		 *  the PHY is in.
15208 		 */
15209 		if (wm_phy_resetisblocked(sc))
15210 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
15211 	}
15212 
15213 out:
15214 	/* Ungate automatic PHY configuration on non-managed 82579 */
15215 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
15216 		delay(10*1000);
15217 		wm_gate_hw_phy_config_ich8lan(sc, false);
15218 	}
15219 
15220 	return 0;
15221 }
15222 
15223 static void
15224 wm_init_manageability(struct wm_softc *sc)
15225 {
15226 
15227 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15228 		device_xname(sc->sc_dev), __func__));
15229 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
15230 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
15231 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
15232 
15233 		/* Disable hardware interception of ARP */
15234 		manc &= ~MANC_ARP_EN;
15235 
15236 		/* Enable receiving management packets to the host */
15237 		if (sc->sc_type >= WM_T_82571) {
15238 			manc |= MANC_EN_MNG2HOST;
15239 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
15240 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
15241 		}
15242 
15243 		CSR_WRITE(sc, WMREG_MANC, manc);
15244 	}
15245 }
15246 
15247 static void
15248 wm_release_manageability(struct wm_softc *sc)
15249 {
15250 
15251 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
15252 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
15253 
15254 		manc |= MANC_ARP_EN;
15255 		if (sc->sc_type >= WM_T_82571)
15256 			manc &= ~MANC_EN_MNG2HOST;
15257 
15258 		CSR_WRITE(sc, WMREG_MANC, manc);
15259 	}
15260 }
15261 
15262 static void
15263 wm_get_wakeup(struct wm_softc *sc)
15264 {
15265 
15266 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
15267 	switch (sc->sc_type) {
15268 	case WM_T_82573:
15269 	case WM_T_82583:
15270 		sc->sc_flags |= WM_F_HAS_AMT;
15271 		/* FALLTHROUGH */
15272 	case WM_T_80003:
15273 	case WM_T_82575:
15274 	case WM_T_82576:
15275 	case WM_T_82580:
15276 	case WM_T_I350:
15277 	case WM_T_I354:
15278 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
15279 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
15280 		/* FALLTHROUGH */
15281 	case WM_T_82541:
15282 	case WM_T_82541_2:
15283 	case WM_T_82547:
15284 	case WM_T_82547_2:
15285 	case WM_T_82571:
15286 	case WM_T_82572:
15287 	case WM_T_82574:
15288 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
15289 		break;
15290 	case WM_T_ICH8:
15291 	case WM_T_ICH9:
15292 	case WM_T_ICH10:
15293 	case WM_T_PCH:
15294 	case WM_T_PCH2:
15295 	case WM_T_PCH_LPT:
15296 	case WM_T_PCH_SPT:
15297 	case WM_T_PCH_CNP:
15298 		sc->sc_flags |= WM_F_HAS_AMT;
15299 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
15300 		break;
15301 	default:
15302 		break;
15303 	}
15304 
15305 	/* 1: HAS_MANAGE */
15306 	if (wm_enable_mng_pass_thru(sc) != 0)
15307 		sc->sc_flags |= WM_F_HAS_MANAGE;
15308 
15309 	/*
15310 	 * Note that the WOL flags is set after the resetting of the eeprom
15311 	 * stuff
15312 	 */
15313 }
15314 
15315 /*
15316  * Unconfigure Ultra Low Power mode.
15317  * Only for I217 and newer (see below).
15318  */
15319 static int
15320 wm_ulp_disable(struct wm_softc *sc)
15321 {
15322 	uint32_t reg;
15323 	uint16_t phyreg;
15324 	int i = 0, rv = 0;
15325 
15326 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15327 		device_xname(sc->sc_dev), __func__));
15328 	/* Exclude old devices */
15329 	if ((sc->sc_type < WM_T_PCH_LPT)
15330 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
15331 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
15332 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
15333 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
15334 		return 0;
15335 
15336 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
15337 		/* Request ME un-configure ULP mode in the PHY */
15338 		reg = CSR_READ(sc, WMREG_H2ME);
15339 		reg &= ~H2ME_ULP;
15340 		reg |= H2ME_ENFORCE_SETTINGS;
15341 		CSR_WRITE(sc, WMREG_H2ME, reg);
15342 
15343 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
15344 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
15345 			if (i++ == 30) {
15346 				device_printf(sc->sc_dev, "%s timed out\n",
15347 				    __func__);
15348 				return -1;
15349 			}
15350 			delay(10 * 1000);
15351 		}
15352 		reg = CSR_READ(sc, WMREG_H2ME);
15353 		reg &= ~H2ME_ENFORCE_SETTINGS;
15354 		CSR_WRITE(sc, WMREG_H2ME, reg);
15355 
15356 		return 0;
15357 	}
15358 
15359 	/* Acquire semaphore */
15360 	rv = sc->phy.acquire(sc);
15361 	if (rv != 0) {
15362 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
15363 		device_xname(sc->sc_dev), __func__));
15364 		return -1;
15365 	}
15366 
15367 	/* Toggle LANPHYPC */
15368 	wm_toggle_lanphypc_pch_lpt(sc);
15369 
15370 	/* Unforce SMBus mode in PHY */
15371 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
15372 	if (rv != 0) {
15373 		uint32_t reg2;
15374 
15375 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
15376 			__func__);
15377 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
15378 		reg2 |= CTRL_EXT_FORCE_SMBUS;
15379 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
15380 		delay(50 * 1000);
15381 
15382 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
15383 		    &phyreg);
15384 		if (rv != 0)
15385 			goto release;
15386 	}
15387 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
15388 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
15389 
15390 	/* Unforce SMBus mode in MAC */
15391 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
15392 	reg &= ~CTRL_EXT_FORCE_SMBUS;
15393 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15394 
15395 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
15396 	if (rv != 0)
15397 		goto release;
15398 	phyreg |= HV_PM_CTRL_K1_ENA;
15399 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
15400 
15401 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
15402 		&phyreg);
15403 	if (rv != 0)
15404 		goto release;
15405 	phyreg &= ~(I218_ULP_CONFIG1_IND
15406 	    | I218_ULP_CONFIG1_STICKY_ULP
15407 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
15408 	    | I218_ULP_CONFIG1_WOL_HOST
15409 	    | I218_ULP_CONFIG1_INBAND_EXIT
15410 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
15411 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
15412 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
15413 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
15414 	phyreg |= I218_ULP_CONFIG1_START;
15415 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
15416 
15417 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
15418 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
15419 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
15420 
15421 release:
15422 	/* Release semaphore */
15423 	sc->phy.release(sc);
15424 	wm_gmii_reset(sc);
15425 	delay(50 * 1000);
15426 
15427 	return rv;
15428 }
15429 
15430 /* WOL in the newer chipset interfaces (pchlan) */
15431 static int
15432 wm_enable_phy_wakeup(struct wm_softc *sc)
15433 {
15434 	device_t dev = sc->sc_dev;
15435 	uint32_t mreg, moff;
15436 	uint16_t wuce, wuc, wufc, preg;
15437 	int i, rv;
15438 
15439 	KASSERT(sc->sc_type >= WM_T_PCH);
15440 
15441 	/* Copy MAC RARs to PHY RARs */
15442 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
15443 
15444 	/* Activate PHY wakeup */
15445 	rv = sc->phy.acquire(sc);
15446 	if (rv != 0) {
15447 		device_printf(dev, "%s: failed to acquire semaphore\n",
15448 		    __func__);
15449 		return rv;
15450 	}
15451 
15452 	/*
15453 	 * Enable access to PHY wakeup registers.
15454 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
15455 	 */
15456 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
15457 	if (rv != 0) {
15458 		device_printf(dev,
15459 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
15460 		goto release;
15461 	}
15462 
15463 	/* Copy MAC MTA to PHY MTA */
15464 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
15465 		uint16_t lo, hi;
15466 
15467 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
15468 		lo = (uint16_t)(mreg & 0xffff);
15469 		hi = (uint16_t)((mreg >> 16) & 0xffff);
15470 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
15471 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
15472 	}
15473 
15474 	/* Configure PHY Rx Control register */
15475 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
15476 	mreg = CSR_READ(sc, WMREG_RCTL);
15477 	if (mreg & RCTL_UPE)
15478 		preg |= BM_RCTL_UPE;
15479 	if (mreg & RCTL_MPE)
15480 		preg |= BM_RCTL_MPE;
15481 	preg &= ~(BM_RCTL_MO_MASK);
15482 	moff = __SHIFTOUT(mreg, RCTL_MO);
15483 	if (moff != 0)
15484 		preg |= moff << BM_RCTL_MO_SHIFT;
15485 	if (mreg & RCTL_BAM)
15486 		preg |= BM_RCTL_BAM;
15487 	if (mreg & RCTL_PMCF)
15488 		preg |= BM_RCTL_PMCF;
15489 	mreg = CSR_READ(sc, WMREG_CTRL);
15490 	if (mreg & CTRL_RFCE)
15491 		preg |= BM_RCTL_RFCE;
15492 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
15493 
15494 	wuc = WUC_APME | WUC_PME_EN;
15495 	wufc = WUFC_MAG;
15496 	/* Enable PHY wakeup in MAC register */
15497 	CSR_WRITE(sc, WMREG_WUC,
15498 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
15499 	CSR_WRITE(sc, WMREG_WUFC, wufc);
15500 
15501 	/* Configure and enable PHY wakeup in PHY registers */
15502 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
15503 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
15504 
15505 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
15506 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
15507 
15508 release:
15509 	sc->phy.release(sc);
15510 
15511 	return 0;
15512 }
15513 
15514 /* Power down workaround on D3 */
15515 static void
15516 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
15517 {
15518 	uint32_t reg;
15519 	uint16_t phyreg;
15520 	int i;
15521 
15522 	for (i = 0; i < 2; i++) {
15523 		/* Disable link */
15524 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
15525 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
15526 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
15527 
15528 		/*
15529 		 * Call gig speed drop workaround on Gig disable before
15530 		 * accessing any PHY registers
15531 		 */
15532 		if (sc->sc_type == WM_T_ICH8)
15533 			wm_gig_downshift_workaround_ich8lan(sc);
15534 
15535 		/* Write VR power-down enable */
15536 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
15537 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
15538 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
15539 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
15540 
15541 		/* Read it back and test */
15542 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
15543 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
15544 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
15545 			break;
15546 
15547 		/* Issue PHY reset and repeat at most one more time */
15548 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
15549 	}
15550 }
15551 
15552 /*
15553  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
15554  *  @sc: pointer to the HW structure
15555  *
15556  *  During S0 to Sx transition, it is possible the link remains at gig
15557  *  instead of negotiating to a lower speed.  Before going to Sx, set
15558  *  'Gig Disable' to force link speed negotiation to a lower speed based on
15559  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
15560  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
15561  *  needs to be written.
15562  *  Parts that support (and are linked to a partner which support) EEE in
15563  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
15564  *  than 10Mbps w/o EEE.
15565  */
15566 static void
15567 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
15568 {
15569 	device_t dev = sc->sc_dev;
15570 	struct ethercom *ec = &sc->sc_ethercom;
15571 	uint32_t phy_ctrl;
15572 	int rv;
15573 
15574 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
15575 	phy_ctrl |= PHY_CTRL_GBE_DIS;
15576 
15577 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
15578 
15579 	if (sc->sc_phytype == WMPHY_I217) {
15580 		uint16_t devid = sc->sc_pcidevid;
15581 
15582 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
15583 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
15584 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
15585 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
15586 		    (sc->sc_type >= WM_T_PCH_SPT))
15587 			CSR_WRITE(sc, WMREG_FEXTNVM6,
15588 			    CSR_READ(sc, WMREG_FEXTNVM6)
15589 			    & ~FEXTNVM6_REQ_PLL_CLK);
15590 
15591 		if (sc->phy.acquire(sc) != 0)
15592 			goto out;
15593 
15594 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
15595 			uint16_t eee_advert;
15596 
15597 			rv = wm_read_emi_reg_locked(dev,
15598 			    I217_EEE_ADVERTISEMENT, &eee_advert);
15599 			if (rv)
15600 				goto release;
15601 
15602 			/*
15603 			 * Disable LPLU if both link partners support 100BaseT
15604 			 * EEE and 100Full is advertised on both ends of the
15605 			 * link, and enable Auto Enable LPI since there will
15606 			 * be no driver to enable LPI while in Sx.
15607 			 */
15608 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
15609 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
15610 				uint16_t anar, phy_reg;
15611 
15612 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
15613 				    &anar);
15614 				if (anar & ANAR_TX_FD) {
15615 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
15616 					    PHY_CTRL_NOND0A_LPLU);
15617 
15618 					/* Set Auto Enable LPI after link up */
15619 					sc->phy.readreg_locked(dev, 2,
15620 					    I217_LPI_GPIO_CTRL, &phy_reg);
15621 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
15622 					sc->phy.writereg_locked(dev, 2,
15623 					    I217_LPI_GPIO_CTRL, phy_reg);
15624 				}
15625 			}
15626 		}
15627 
15628 		/*
15629 		 * For i217 Intel Rapid Start Technology support,
15630 		 * when the system is going into Sx and no manageability engine
15631 		 * is present, the driver must configure proxy to reset only on
15632 		 * power good.	LPI (Low Power Idle) state must also reset only
15633 		 * on power good, as well as the MTA (Multicast table array).
15634 		 * The SMBus release must also be disabled on LCD reset.
15635 		 */
15636 
15637 		/*
15638 		 * Enable MTA to reset for Intel Rapid Start Technology
15639 		 * Support
15640 		 */
15641 
15642 release:
15643 		sc->phy.release(sc);
15644 	}
15645 out:
15646 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
15647 
15648 	if (sc->sc_type == WM_T_ICH8)
15649 		wm_gig_downshift_workaround_ich8lan(sc);
15650 
15651 	if (sc->sc_type >= WM_T_PCH) {
15652 		wm_oem_bits_config_ich8lan(sc, false);
15653 
15654 		/* Reset PHY to activate OEM bits on 82577/8 */
15655 		if (sc->sc_type == WM_T_PCH)
15656 			wm_reset_phy(sc);
15657 
15658 		if (sc->phy.acquire(sc) != 0)
15659 			return;
15660 		wm_write_smbus_addr(sc);
15661 		sc->phy.release(sc);
15662 	}
15663 }
15664 
15665 /*
15666  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
15667  *  @sc: pointer to the HW structure
15668  *
15669  *  During Sx to S0 transitions on non-managed devices or managed devices
15670  *  on which PHY resets are not blocked, if the PHY registers cannot be
15671  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
15672  *  the PHY.
15673  *  On i217, setup Intel Rapid Start Technology.
15674  */
15675 static int
15676 wm_resume_workarounds_pchlan(struct wm_softc *sc)
15677 {
15678 	device_t dev = sc->sc_dev;
15679 	int rv;
15680 
15681 	if (sc->sc_type < WM_T_PCH2)
15682 		return 0;
15683 
15684 	rv = wm_init_phy_workarounds_pchlan(sc);
15685 	if (rv != 0)
15686 		return -1;
15687 
15688 	/* For i217 Intel Rapid Start Technology support when the system
15689 	 * is transitioning from Sx and no manageability engine is present
15690 	 * configure SMBus to restore on reset, disable proxy, and enable
15691 	 * the reset on MTA (Multicast table array).
15692 	 */
15693 	if (sc->sc_phytype == WMPHY_I217) {
15694 		uint16_t phy_reg;
15695 
15696 		if (sc->phy.acquire(sc) != 0)
15697 			return -1;
15698 
15699 		/* Clear Auto Enable LPI after link up */
15700 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
15701 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
15702 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
15703 
15704 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
15705 			/* Restore clear on SMB if no manageability engine
15706 			 * is present
15707 			 */
15708 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
15709 			    &phy_reg);
15710 			if (rv != 0)
15711 				goto release;
15712 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
15713 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
15714 
15715 			/* Disable Proxy */
15716 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
15717 		}
15718 		/* Enable reset on MTA */
15719 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
15720 		if (rv != 0)
15721 			goto release;
15722 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
15723 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
15724 
15725 release:
15726 		sc->phy.release(sc);
15727 		return rv;
15728 	}
15729 
15730 	return 0;
15731 }
15732 
15733 static void
15734 wm_enable_wakeup(struct wm_softc *sc)
15735 {
15736 	uint32_t reg, pmreg;
15737 	pcireg_t pmode;
15738 	int rv = 0;
15739 
15740 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15741 		device_xname(sc->sc_dev), __func__));
15742 
15743 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
15744 	    &pmreg, NULL) == 0)
15745 		return;
15746 
15747 	if ((sc->sc_flags & WM_F_WOL) == 0)
15748 		goto pme;
15749 
15750 	/* Advertise the wakeup capability */
15751 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
15752 	    | CTRL_SWDPIN(3));
15753 
15754 	/* Keep the laser running on fiber adapters */
15755 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
15756 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
15757 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
15758 		reg |= CTRL_EXT_SWDPIN(3);
15759 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15760 	}
15761 
15762 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
15763 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
15764 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
15765 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
15766 		wm_suspend_workarounds_ich8lan(sc);
15767 
15768 #if 0	/* For the multicast packet */
15769 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
15770 	reg |= WUFC_MC;
15771 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
15772 #endif
15773 
15774 	if (sc->sc_type >= WM_T_PCH) {
15775 		rv = wm_enable_phy_wakeup(sc);
15776 		if (rv != 0)
15777 			goto pme;
15778 	} else {
15779 		/* Enable wakeup by the MAC */
15780 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
15781 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
15782 	}
15783 
15784 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
15785 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
15786 		|| (sc->sc_type == WM_T_PCH2))
15787 	    && (sc->sc_phytype == WMPHY_IGP_3))
15788 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
15789 
15790 pme:
15791 	/* Request PME */
15792 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
15793 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
15794 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
15795 		/* For WOL */
15796 		pmode |= PCI_PMCSR_PME_EN;
15797 	} else {
15798 		/* Disable WOL */
15799 		pmode &= ~PCI_PMCSR_PME_EN;
15800 	}
15801 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
15802 }
15803 
15804 /* Disable ASPM L0s and/or L1 for workaround */
15805 static void
15806 wm_disable_aspm(struct wm_softc *sc)
15807 {
15808 	pcireg_t reg, mask = 0;
15809 	unsigned const char *str = "";
15810 
15811 	/*
15812 	 *  Only for PCIe device which has PCIe capability in the PCI config
15813 	 * space.
15814 	 */
15815 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
15816 		return;
15817 
15818 	switch (sc->sc_type) {
15819 	case WM_T_82571:
15820 	case WM_T_82572:
15821 		/*
15822 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
15823 		 * State Power management L1 State (ASPM L1).
15824 		 */
15825 		mask = PCIE_LCSR_ASPM_L1;
15826 		str = "L1 is";
15827 		break;
15828 	case WM_T_82573:
15829 	case WM_T_82574:
15830 	case WM_T_82583:
15831 		/*
15832 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
15833 		 *
15834 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
15835 		 * some chipset.  The document of 82574 and 82583 says that
15836 		 * disabling L0s with some specific chipset is sufficient,
15837 		 * but we follow as of the Intel em driver does.
15838 		 *
15839 		 * References:
15840 		 * Errata 8 of the Specification Update of i82573.
15841 		 * Errata 20 of the Specification Update of i82574.
15842 		 * Errata 9 of the Specification Update of i82583.
15843 		 */
15844 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
15845 		str = "L0s and L1 are";
15846 		break;
15847 	default:
15848 		return;
15849 	}
15850 
15851 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
15852 	    sc->sc_pcixe_capoff + PCIE_LCSR);
15853 	reg &= ~mask;
15854 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
15855 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
15856 
15857 	/* Print only in wm_attach() */
15858 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
15859 		aprint_verbose_dev(sc->sc_dev,
15860 		    "ASPM %s disabled to workaround the errata.\n", str);
15861 }
15862 
15863 /* LPLU */
15864 
15865 static void
15866 wm_lplu_d0_disable(struct wm_softc *sc)
15867 {
15868 	struct mii_data *mii = &sc->sc_mii;
15869 	uint32_t reg;
15870 	uint16_t phyval;
15871 
15872 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15873 		device_xname(sc->sc_dev), __func__));
15874 
15875 	if (sc->sc_phytype == WMPHY_IFE)
15876 		return;
15877 
15878 	switch (sc->sc_type) {
15879 	case WM_T_82571:
15880 	case WM_T_82572:
15881 	case WM_T_82573:
15882 	case WM_T_82575:
15883 	case WM_T_82576:
15884 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
15885 		phyval &= ~PMR_D0_LPLU;
15886 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
15887 		break;
15888 	case WM_T_82580:
15889 	case WM_T_I350:
15890 	case WM_T_I210:
15891 	case WM_T_I211:
15892 		reg = CSR_READ(sc, WMREG_PHPM);
15893 		reg &= ~PHPM_D0A_LPLU;
15894 		CSR_WRITE(sc, WMREG_PHPM, reg);
15895 		break;
15896 	case WM_T_82574:
15897 	case WM_T_82583:
15898 	case WM_T_ICH8:
15899 	case WM_T_ICH9:
15900 	case WM_T_ICH10:
15901 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
15902 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
15903 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
15904 		CSR_WRITE_FLUSH(sc);
15905 		break;
15906 	case WM_T_PCH:
15907 	case WM_T_PCH2:
15908 	case WM_T_PCH_LPT:
15909 	case WM_T_PCH_SPT:
15910 	case WM_T_PCH_CNP:
15911 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
15912 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
15913 		if (wm_phy_resetisblocked(sc) == false)
15914 			phyval |= HV_OEM_BITS_ANEGNOW;
15915 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
15916 		break;
15917 	default:
15918 		break;
15919 	}
15920 }
15921 
15922 /* EEE */
15923 
15924 static int
15925 wm_set_eee_i350(struct wm_softc *sc)
15926 {
15927 	struct ethercom *ec = &sc->sc_ethercom;
15928 	uint32_t ipcnfg, eeer;
15929 	uint32_t ipcnfg_mask
15930 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
15931 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
15932 
15933 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
15934 
15935 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
15936 	eeer = CSR_READ(sc, WMREG_EEER);
15937 
15938 	/* Enable or disable per user setting */
15939 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
15940 		ipcnfg |= ipcnfg_mask;
15941 		eeer |= eeer_mask;
15942 	} else {
15943 		ipcnfg &= ~ipcnfg_mask;
15944 		eeer &= ~eeer_mask;
15945 	}
15946 
15947 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
15948 	CSR_WRITE(sc, WMREG_EEER, eeer);
15949 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
15950 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
15951 
15952 	return 0;
15953 }
15954 
15955 static int
15956 wm_set_eee_pchlan(struct wm_softc *sc)
15957 {
15958 	device_t dev = sc->sc_dev;
15959 	struct ethercom *ec = &sc->sc_ethercom;
15960 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
15961 	int rv = 0;
15962 
15963 	switch (sc->sc_phytype) {
15964 	case WMPHY_82579:
15965 		lpa = I82579_EEE_LP_ABILITY;
15966 		pcs_status = I82579_EEE_PCS_STATUS;
15967 		adv_addr = I82579_EEE_ADVERTISEMENT;
15968 		break;
15969 	case WMPHY_I217:
15970 		lpa = I217_EEE_LP_ABILITY;
15971 		pcs_status = I217_EEE_PCS_STATUS;
15972 		adv_addr = I217_EEE_ADVERTISEMENT;
15973 		break;
15974 	default:
15975 		return 0;
15976 	}
15977 
15978 	if (sc->phy.acquire(sc)) {
15979 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
15980 		return 0;
15981 	}
15982 
15983 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
15984 	if (rv != 0)
15985 		goto release;
15986 
15987 	/* Clear bits that enable EEE in various speeds */
15988 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
15989 
15990 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
15991 		/* Save off link partner's EEE ability */
15992 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
15993 		if (rv != 0)
15994 			goto release;
15995 
15996 		/* Read EEE advertisement */
15997 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
15998 			goto release;
15999 
16000 		/*
16001 		 * Enable EEE only for speeds in which the link partner is
16002 		 * EEE capable and for which we advertise EEE.
16003 		 */
16004 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
16005 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
16006 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
16007 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
16008 			if ((data & ANLPAR_TX_FD) != 0)
16009 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
16010 			else {
16011 				/*
16012 				 * EEE is not supported in 100Half, so ignore
16013 				 * partner's EEE in 100 ability if full-duplex
16014 				 * is not advertised.
16015 				 */
16016 				sc->eee_lp_ability
16017 				    &= ~AN_EEEADVERT_100_TX;
16018 			}
16019 		}
16020 	}
16021 
16022 	if (sc->sc_phytype == WMPHY_82579) {
16023 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
16024 		if (rv != 0)
16025 			goto release;
16026 
16027 		data &= ~I82579_LPI_PLL_SHUT_100;
16028 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
16029 	}
16030 
16031 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
16032 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
16033 		goto release;
16034 
16035 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
16036 release:
16037 	sc->phy.release(sc);
16038 
16039 	return rv;
16040 }
16041 
16042 static int
16043 wm_set_eee(struct wm_softc *sc)
16044 {
16045 	struct ethercom *ec = &sc->sc_ethercom;
16046 
16047 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
16048 		return 0;
16049 
16050 	if (sc->sc_type == WM_T_I354) {
16051 		/* I354 uses an external PHY */
16052 		return 0; /* not yet */
16053 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
16054 		return wm_set_eee_i350(sc);
16055 	else if (sc->sc_type >= WM_T_PCH2)
16056 		return wm_set_eee_pchlan(sc);
16057 
16058 	return 0;
16059 }
16060 
16061 /*
16062  * Workarounds (mainly PHY related).
16063  * Basically, PHY's workarounds are in the PHY drivers.
16064  */
16065 
16066 /* Work-around for 82566 Kumeran PCS lock loss */
16067 static int
16068 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
16069 {
16070 	struct mii_data *mii = &sc->sc_mii;
16071 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
16072 	int i, reg, rv;
16073 	uint16_t phyreg;
16074 
16075 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16076 		device_xname(sc->sc_dev), __func__));
16077 
16078 	/* If the link is not up, do nothing */
16079 	if ((status & STATUS_LU) == 0)
16080 		return 0;
16081 
16082 	/* Nothing to do if the link is other than 1Gbps */
16083 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
16084 		return 0;
16085 
16086 	for (i = 0; i < 10; i++) {
16087 		/* read twice */
16088 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
16089 		if (rv != 0)
16090 			return rv;
16091 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
16092 		if (rv != 0)
16093 			return rv;
16094 
16095 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
16096 			goto out;	/* GOOD! */
16097 
16098 		/* Reset the PHY */
16099 		wm_reset_phy(sc);
16100 		delay(5*1000);
16101 	}
16102 
16103 	/* Disable GigE link negotiation */
16104 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
16105 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
16106 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
16107 
16108 	/*
16109 	 * Call gig speed drop workaround on Gig disable before accessing
16110 	 * any PHY registers.
16111 	 */
16112 	wm_gig_downshift_workaround_ich8lan(sc);
16113 
16114 out:
16115 	return 0;
16116 }
16117 
16118 /*
16119  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
16120  *  @sc: pointer to the HW structure
16121  *
16122  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
16123  *  LPLU, Gig disable, MDIC PHY reset):
16124  *    1) Set Kumeran Near-end loopback
16125  *    2) Clear Kumeran Near-end loopback
16126  *  Should only be called for ICH8[m] devices with any 1G Phy.
16127  */
16128 static void
16129 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
16130 {
16131 	uint16_t kmreg;
16132 
16133 	/* Only for igp3 */
16134 	if (sc->sc_phytype == WMPHY_IGP_3) {
16135 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
16136 			return;
16137 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
16138 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
16139 			return;
16140 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
16141 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
16142 	}
16143 }
16144 
16145 /*
16146  * Workaround for pch's PHYs
16147  * XXX should be moved to new PHY driver?
16148  */
16149 static int
16150 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
16151 {
16152 	device_t dev = sc->sc_dev;
16153 	struct mii_data *mii = &sc->sc_mii;
16154 	struct mii_softc *child;
16155 	uint16_t phy_data, phyrev = 0;
16156 	int phytype = sc->sc_phytype;
16157 	int rv;
16158 
16159 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16160 		device_xname(dev), __func__));
16161 	KASSERT(sc->sc_type == WM_T_PCH);
16162 
16163 	/* Set MDIO slow mode before any other MDIO access */
16164 	if (phytype == WMPHY_82577)
16165 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
16166 			return rv;
16167 
16168 	child = LIST_FIRST(&mii->mii_phys);
16169 	if (child != NULL)
16170 		phyrev = child->mii_mpd_rev;
16171 
16172 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
16173 	if ((child != NULL) &&
16174 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
16175 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
16176 		/* Disable generation of early preamble (0x4431) */
16177 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
16178 		    &phy_data);
16179 		if (rv != 0)
16180 			return rv;
16181 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
16182 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
16183 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
16184 		    phy_data);
16185 		if (rv != 0)
16186 			return rv;
16187 
16188 		/* Preamble tuning for SSC */
16189 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
16190 		if (rv != 0)
16191 			return rv;
16192 	}
16193 
16194 	/* 82578 */
16195 	if (phytype == WMPHY_82578) {
16196 		/*
16197 		 * Return registers to default by doing a soft reset then
16198 		 * writing 0x3140 to the control register
16199 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
16200 		 */
16201 		if ((child != NULL) && (phyrev < 2)) {
16202 			PHY_RESET(child);
16203 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
16204 			if (rv != 0)
16205 				return rv;
16206 		}
16207 	}
16208 
16209 	/* Select page 0 */
16210 	if ((rv = sc->phy.acquire(sc)) != 0)
16211 		return rv;
16212 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
16213 	sc->phy.release(sc);
16214 	if (rv != 0)
16215 		return rv;
16216 
16217 	/*
16218 	 * Configure the K1 Si workaround during phy reset assuming there is
16219 	 * link so that it disables K1 if link is in 1Gbps.
16220 	 */
16221 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
16222 		return rv;
16223 
16224 	/* Workaround for link disconnects on a busy hub in half duplex */
16225 	rv = sc->phy.acquire(sc);
16226 	if (rv)
16227 		return rv;
16228 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
16229 	if (rv)
16230 		goto release;
16231 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
16232 	    phy_data & 0x00ff);
16233 	if (rv)
16234 		goto release;
16235 
16236 	/* Set MSE higher to enable link to stay up when noise is high */
16237 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
16238 release:
16239 	sc->phy.release(sc);
16240 
16241 	return rv;
16242 }
16243 
16244 /*
16245  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
16246  *  @sc:   pointer to the HW structure
16247  */
16248 static void
16249 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
16250 {
16251 
16252 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16253 		device_xname(sc->sc_dev), __func__));
16254 
16255 	if (sc->phy.acquire(sc) != 0)
16256 		return;
16257 
16258 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
16259 
16260 	sc->phy.release(sc);
16261 }
16262 
16263 static void
16264 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
16265 {
16266 	device_t dev = sc->sc_dev;
16267 	uint32_t mac_reg;
16268 	uint16_t i, wuce;
16269 	int count;
16270 
16271 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16272 		device_xname(dev), __func__));
16273 
16274 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
16275 		return;
16276 
16277 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
16278 	count = wm_rar_count(sc);
16279 	for (i = 0; i < count; i++) {
16280 		uint16_t lo, hi;
16281 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
16282 		lo = (uint16_t)(mac_reg & 0xffff);
16283 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
16284 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
16285 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
16286 
16287 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
16288 		lo = (uint16_t)(mac_reg & 0xffff);
16289 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
16290 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
16291 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
16292 	}
16293 
16294 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
16295 }
16296 
16297 /*
16298  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
16299  *  with 82579 PHY
16300  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
16301  */
16302 static int
16303 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
16304 {
16305 	device_t dev = sc->sc_dev;
16306 	int rar_count;
16307 	int rv;
16308 	uint32_t mac_reg;
16309 	uint16_t dft_ctrl, data;
16310 	uint16_t i;
16311 
16312 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16313 		device_xname(dev), __func__));
16314 
16315 	if (sc->sc_type < WM_T_PCH2)
16316 		return 0;
16317 
16318 	/* Acquire PHY semaphore */
16319 	rv = sc->phy.acquire(sc);
16320 	if (rv != 0)
16321 		return rv;
16322 
16323 	/* Disable Rx path while enabling/disabling workaround */
16324 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
16325 	if (rv != 0)
16326 		goto out;
16327 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
16328 	    dft_ctrl | (1 << 14));
16329 	if (rv != 0)
16330 		goto out;
16331 
16332 	if (enable) {
16333 		/* Write Rx addresses (rar_entry_count for RAL/H, and
16334 		 * SHRAL/H) and initial CRC values to the MAC
16335 		 */
16336 		rar_count = wm_rar_count(sc);
16337 		for (i = 0; i < rar_count; i++) {
16338 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
16339 			uint32_t addr_high, addr_low;
16340 
16341 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
16342 			if (!(addr_high & RAL_AV))
16343 				continue;
16344 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
16345 			mac_addr[0] = (addr_low & 0xFF);
16346 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
16347 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
16348 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
16349 			mac_addr[4] = (addr_high & 0xFF);
16350 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
16351 
16352 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
16353 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
16354 		}
16355 
16356 		/* Write Rx addresses to the PHY */
16357 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
16358 	}
16359 
16360 	/*
16361 	 * If enable ==
16362 	 *	true: Enable jumbo frame workaround in the MAC.
16363 	 *	false: Write MAC register values back to h/w defaults.
16364 	 */
16365 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
16366 	if (enable) {
16367 		mac_reg &= ~(1 << 14);
16368 		mac_reg |= (7 << 15);
16369 	} else
16370 		mac_reg &= ~(0xf << 14);
16371 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
16372 
16373 	mac_reg = CSR_READ(sc, WMREG_RCTL);
16374 	if (enable) {
16375 		mac_reg |= RCTL_SECRC;
16376 		sc->sc_rctl |= RCTL_SECRC;
16377 		sc->sc_flags |= WM_F_CRC_STRIP;
16378 	} else {
16379 		mac_reg &= ~RCTL_SECRC;
16380 		sc->sc_rctl &= ~RCTL_SECRC;
16381 		sc->sc_flags &= ~WM_F_CRC_STRIP;
16382 	}
16383 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
16384 
16385 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
16386 	if (rv != 0)
16387 		goto out;
16388 	if (enable)
16389 		data |= 1 << 0;
16390 	else
16391 		data &= ~(1 << 0);
16392 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
16393 	if (rv != 0)
16394 		goto out;
16395 
16396 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
16397 	if (rv != 0)
16398 		goto out;
16399 	/*
16400 	 * XXX FreeBSD and Linux do the same thing that they set the same value
16401 	 * on both the enable case and the disable case. Is it correct?
16402 	 */
16403 	data &= ~(0xf << 8);
16404 	data |= (0xb << 8);
16405 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
16406 	if (rv != 0)
16407 		goto out;
16408 
16409 	/*
16410 	 * If enable ==
16411 	 *	true: Enable jumbo frame workaround in the PHY.
16412 	 *	false: Write PHY register values back to h/w defaults.
16413 	 */
16414 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
16415 	if (rv != 0)
16416 		goto out;
16417 	data &= ~(0x7F << 5);
16418 	if (enable)
16419 		data |= (0x37 << 5);
16420 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
16421 	if (rv != 0)
16422 		goto out;
16423 
16424 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
16425 	if (rv != 0)
16426 		goto out;
16427 	if (enable)
16428 		data &= ~(1 << 13);
16429 	else
16430 		data |= (1 << 13);
16431 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
16432 	if (rv != 0)
16433 		goto out;
16434 
16435 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
16436 	if (rv != 0)
16437 		goto out;
16438 	data &= ~(0x3FF << 2);
16439 	if (enable)
16440 		data |= (I82579_TX_PTR_GAP << 2);
16441 	else
16442 		data |= (0x8 << 2);
16443 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
16444 	if (rv != 0)
16445 		goto out;
16446 
16447 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
16448 	    enable ? 0xf100 : 0x7e00);
16449 	if (rv != 0)
16450 		goto out;
16451 
16452 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
16453 	if (rv != 0)
16454 		goto out;
16455 	if (enable)
16456 		data |= 1 << 10;
16457 	else
16458 		data &= ~(1 << 10);
16459 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
16460 	if (rv != 0)
16461 		goto out;
16462 
16463 	/* Re-enable Rx path after enabling/disabling workaround */
16464 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
16465 	    dft_ctrl & ~(1 << 14));
16466 
16467 out:
16468 	sc->phy.release(sc);
16469 
16470 	return rv;
16471 }
16472 
16473 /*
16474  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
16475  *  done after every PHY reset.
16476  */
16477 static int
16478 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
16479 {
16480 	device_t dev = sc->sc_dev;
16481 	int rv;
16482 
16483 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16484 		device_xname(dev), __func__));
16485 	KASSERT(sc->sc_type == WM_T_PCH2);
16486 
16487 	/* Set MDIO slow mode before any other MDIO access */
16488 	rv = wm_set_mdio_slow_mode_hv(sc);
16489 	if (rv != 0)
16490 		return rv;
16491 
16492 	rv = sc->phy.acquire(sc);
16493 	if (rv != 0)
16494 		return rv;
16495 	/* Set MSE higher to enable link to stay up when noise is high */
16496 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
16497 	if (rv != 0)
16498 		goto release;
16499 	/* Drop link after 5 times MSE threshold was reached */
16500 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
16501 release:
16502 	sc->phy.release(sc);
16503 
16504 	return rv;
16505 }
16506 
16507 /**
16508  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
16509  *  @link: link up bool flag
16510  *
16511  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
16512  *  preventing further DMA write requests.  Workaround the issue by disabling
16513  *  the de-assertion of the clock request when in 1Gpbs mode.
16514  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
16515  *  speeds in order to avoid Tx hangs.
16516  **/
16517 static int
16518 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
16519 {
16520 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
16521 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
16522 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
16523 	uint16_t phyreg;
16524 
16525 	if (link && (speed == STATUS_SPEED_1000)) {
16526 		sc->phy.acquire(sc);
16527 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
16528 		    &phyreg);
16529 		if (rv != 0)
16530 			goto release;
16531 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
16532 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
16533 		if (rv != 0)
16534 			goto release;
16535 		delay(20);
16536 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
16537 
16538 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
16539 		    &phyreg);
16540 release:
16541 		sc->phy.release(sc);
16542 		return rv;
16543 	}
16544 
16545 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
16546 
16547 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
16548 	if (((child != NULL) && (child->mii_mpd_rev > 5))
16549 	    || !link
16550 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
16551 		goto update_fextnvm6;
16552 
16553 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
16554 
16555 	/* Clear link status transmit timeout */
16556 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
16557 	if (speed == STATUS_SPEED_100) {
16558 		/* Set inband Tx timeout to 5x10us for 100Half */
16559 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
16560 
16561 		/* Do not extend the K1 entry latency for 100Half */
16562 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
16563 	} else {
16564 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
16565 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
16566 
16567 		/* Extend the K1 entry latency for 10 Mbps */
16568 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
16569 	}
16570 
16571 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
16572 
16573 update_fextnvm6:
16574 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
16575 	return 0;
16576 }
16577 
16578 /*
16579  *  wm_k1_gig_workaround_hv - K1 Si workaround
16580  *  @sc:   pointer to the HW structure
16581  *  @link: link up bool flag
16582  *
16583  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
16584  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
16585  *  If link is down, the function will restore the default K1 setting located
16586  *  in the NVM.
16587  */
16588 static int
16589 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
16590 {
16591 	int k1_enable = sc->sc_nvm_k1_enabled;
16592 
16593 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16594 		device_xname(sc->sc_dev), __func__));
16595 
16596 	if (sc->phy.acquire(sc) != 0)
16597 		return -1;
16598 
16599 	if (link) {
16600 		k1_enable = 0;
16601 
16602 		/* Link stall fix for link up */
16603 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
16604 		    0x0100);
16605 	} else {
16606 		/* Link stall fix for link down */
16607 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
16608 		    0x4100);
16609 	}
16610 
16611 	wm_configure_k1_ich8lan(sc, k1_enable);
16612 	sc->phy.release(sc);
16613 
16614 	return 0;
16615 }
16616 
16617 /*
16618  *  wm_k1_workaround_lv - K1 Si workaround
16619  *  @sc:   pointer to the HW structure
16620  *
16621  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
16622  *  Disable K1 for 1000 and 100 speeds
16623  */
16624 static int
16625 wm_k1_workaround_lv(struct wm_softc *sc)
16626 {
16627 	uint32_t reg;
16628 	uint16_t phyreg;
16629 	int rv;
16630 
16631 	if (sc->sc_type != WM_T_PCH2)
16632 		return 0;
16633 
16634 	/* Set K1 beacon duration based on 10Mbps speed */
16635 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
16636 	if (rv != 0)
16637 		return rv;
16638 
16639 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
16640 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
16641 		if (phyreg &
16642 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
16643 			/* LV 1G/100 Packet drop issue wa  */
16644 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
16645 			    &phyreg);
16646 			if (rv != 0)
16647 				return rv;
16648 			phyreg &= ~HV_PM_CTRL_K1_ENA;
16649 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
16650 			    phyreg);
16651 			if (rv != 0)
16652 				return rv;
16653 		} else {
16654 			/* For 10Mbps */
16655 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
16656 			reg &= ~FEXTNVM4_BEACON_DURATION;
16657 			reg |= FEXTNVM4_BEACON_DURATION_16US;
16658 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
16659 		}
16660 	}
16661 
16662 	return 0;
16663 }
16664 
16665 /*
16666  *  wm_link_stall_workaround_hv - Si workaround
16667  *  @sc: pointer to the HW structure
16668  *
16669  *  This function works around a Si bug where the link partner can get
16670  *  a link up indication before the PHY does. If small packets are sent
16671  *  by the link partner they can be placed in the packet buffer without
16672  *  being properly accounted for by the PHY and will stall preventing
16673  *  further packets from being received.  The workaround is to clear the
16674  *  packet buffer after the PHY detects link up.
16675  */
16676 static int
16677 wm_link_stall_workaround_hv(struct wm_softc *sc)
16678 {
16679 	uint16_t phyreg;
16680 
16681 	if (sc->sc_phytype != WMPHY_82578)
16682 		return 0;
16683 
16684 	/* Do not apply workaround if in PHY loopback bit 14 set */
16685 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
16686 	if ((phyreg & BMCR_LOOP) != 0)
16687 		return 0;
16688 
16689 	/* Check if link is up and at 1Gbps */
16690 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
16691 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
16692 	    | BM_CS_STATUS_SPEED_MASK;
16693 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
16694 		| BM_CS_STATUS_SPEED_1000))
16695 		return 0;
16696 
16697 	delay(200 * 1000);	/* XXX too big */
16698 
16699 	/* Flush the packets in the fifo buffer */
16700 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
16701 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
16702 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
16703 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
16704 
16705 	return 0;
16706 }
16707 
16708 static int
16709 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
16710 {
16711 	int rv;
16712 	uint16_t reg;
16713 
16714 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
16715 	if (rv != 0)
16716 		return rv;
16717 
16718 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
16719 	    reg | HV_KMRN_MDIO_SLOW);
16720 }
16721 
16722 /*
16723  *  wm_configure_k1_ich8lan - Configure K1 power state
16724  *  @sc: pointer to the HW structure
16725  *  @enable: K1 state to configure
16726  *
16727  *  Configure the K1 power state based on the provided parameter.
16728  *  Assumes semaphore already acquired.
16729  */
16730 static void
16731 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
16732 {
16733 	uint32_t ctrl, ctrl_ext, tmp;
16734 	uint16_t kmreg;
16735 	int rv;
16736 
16737 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
16738 
16739 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
16740 	if (rv != 0)
16741 		return;
16742 
16743 	if (k1_enable)
16744 		kmreg |= KUMCTRLSTA_K1_ENABLE;
16745 	else
16746 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
16747 
16748 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
16749 	if (rv != 0)
16750 		return;
16751 
16752 	delay(20);
16753 
16754 	ctrl = CSR_READ(sc, WMREG_CTRL);
16755 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
16756 
16757 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
16758 	tmp |= CTRL_FRCSPD;
16759 
16760 	CSR_WRITE(sc, WMREG_CTRL, tmp);
16761 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
16762 	CSR_WRITE_FLUSH(sc);
16763 	delay(20);
16764 
16765 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
16766 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
16767 	CSR_WRITE_FLUSH(sc);
16768 	delay(20);
16769 
16770 	return;
16771 }
16772 
16773 /* special case - for 82575 - need to do manual init ... */
16774 static void
16775 wm_reset_init_script_82575(struct wm_softc *sc)
16776 {
16777 	/*
16778 	 * Remark: this is untested code - we have no board without EEPROM
16779 	 *  same setup as mentioned int the FreeBSD driver for the i82575
16780 	 */
16781 
16782 	/* SerDes configuration via SERDESCTRL */
16783 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
16784 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
16785 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
16786 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
16787 
16788 	/* CCM configuration via CCMCTL register */
16789 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
16790 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
16791 
16792 	/* PCIe lanes configuration */
16793 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
16794 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
16795 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
16796 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
16797 
16798 	/* PCIe PLL Configuration */
16799 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
16800 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
16801 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
16802 }
16803 
16804 static void
16805 wm_reset_mdicnfg_82580(struct wm_softc *sc)
16806 {
16807 	uint32_t reg;
16808 	uint16_t nvmword;
16809 	int rv;
16810 
16811 	if (sc->sc_type != WM_T_82580)
16812 		return;
16813 	if ((sc->sc_flags & WM_F_SGMII) == 0)
16814 		return;
16815 
16816 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
16817 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
16818 	if (rv != 0) {
16819 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
16820 		    __func__);
16821 		return;
16822 	}
16823 
16824 	reg = CSR_READ(sc, WMREG_MDICNFG);
16825 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
16826 		reg |= MDICNFG_DEST;
16827 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
16828 		reg |= MDICNFG_COM_MDIO;
16829 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
16830 }
16831 
16832 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
16833 
16834 static bool
16835 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
16836 {
16837 	uint32_t reg;
16838 	uint16_t id1, id2;
16839 	int i, rv;
16840 
16841 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16842 		device_xname(sc->sc_dev), __func__));
16843 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
16844 
16845 	id1 = id2 = 0xffff;
16846 	for (i = 0; i < 2; i++) {
16847 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
16848 		    &id1);
16849 		if ((rv != 0) || MII_INVALIDID(id1))
16850 			continue;
16851 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
16852 		    &id2);
16853 		if ((rv != 0) || MII_INVALIDID(id2))
16854 			continue;
16855 		break;
16856 	}
16857 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
16858 		goto out;
16859 
16860 	/*
16861 	 * In case the PHY needs to be in mdio slow mode,
16862 	 * set slow mode and try to get the PHY id again.
16863 	 */
16864 	rv = 0;
16865 	if (sc->sc_type < WM_T_PCH_LPT) {
16866 		sc->phy.release(sc);
16867 		wm_set_mdio_slow_mode_hv(sc);
16868 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
16869 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
16870 		sc->phy.acquire(sc);
16871 	}
16872 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
16873 		device_printf(sc->sc_dev, "XXX return with false\n");
16874 		return false;
16875 	}
16876 out:
16877 	if (sc->sc_type >= WM_T_PCH_LPT) {
16878 		/* Only unforce SMBus if ME is not active */
16879 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
16880 			uint16_t phyreg;
16881 
16882 			/* Unforce SMBus mode in PHY */
16883 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
16884 			    CV_SMB_CTRL, &phyreg);
16885 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
16886 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
16887 			    CV_SMB_CTRL, phyreg);
16888 
16889 			/* Unforce SMBus mode in MAC */
16890 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
16891 			reg &= ~CTRL_EXT_FORCE_SMBUS;
16892 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16893 		}
16894 	}
16895 	return true;
16896 }
16897 
16898 static void
16899 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
16900 {
16901 	uint32_t reg;
16902 	int i;
16903 
16904 	/* Set PHY Config Counter to 50msec */
16905 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
16906 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
16907 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
16908 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
16909 
16910 	/* Toggle LANPHYPC */
16911 	reg = CSR_READ(sc, WMREG_CTRL);
16912 	reg |= CTRL_LANPHYPC_OVERRIDE;
16913 	reg &= ~CTRL_LANPHYPC_VALUE;
16914 	CSR_WRITE(sc, WMREG_CTRL, reg);
16915 	CSR_WRITE_FLUSH(sc);
16916 	delay(1000);
16917 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
16918 	CSR_WRITE(sc, WMREG_CTRL, reg);
16919 	CSR_WRITE_FLUSH(sc);
16920 
16921 	if (sc->sc_type < WM_T_PCH_LPT)
16922 		delay(50 * 1000);
16923 	else {
16924 		i = 20;
16925 
16926 		do {
16927 			delay(5 * 1000);
16928 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
16929 		    && i--);
16930 
16931 		delay(30 * 1000);
16932 	}
16933 }
16934 
16935 static int
16936 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
16937 {
16938 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
16939 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
16940 	uint32_t rxa;
16941 	uint16_t scale = 0, lat_enc = 0;
16942 	int32_t obff_hwm = 0;
16943 	int64_t lat_ns, value;
16944 
16945 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16946 		device_xname(sc->sc_dev), __func__));
16947 
16948 	if (link) {
16949 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
16950 		uint32_t status;
16951 		uint16_t speed;
16952 		pcireg_t preg;
16953 
16954 		status = CSR_READ(sc, WMREG_STATUS);
16955 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
16956 		case STATUS_SPEED_10:
16957 			speed = 10;
16958 			break;
16959 		case STATUS_SPEED_100:
16960 			speed = 100;
16961 			break;
16962 		case STATUS_SPEED_1000:
16963 			speed = 1000;
16964 			break;
16965 		default:
16966 			device_printf(sc->sc_dev, "Unknown speed "
16967 			    "(status = %08x)\n", status);
16968 			return -1;
16969 		}
16970 
16971 		/* Rx Packet Buffer Allocation size (KB) */
16972 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
16973 
16974 		/*
16975 		 * Determine the maximum latency tolerated by the device.
16976 		 *
16977 		 * Per the PCIe spec, the tolerated latencies are encoded as
16978 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
16979 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
16980 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
16981 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
16982 		 */
16983 		lat_ns = ((int64_t)rxa * 1024 -
16984 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
16985 			+ ETHER_HDR_LEN))) * 8 * 1000;
16986 		if (lat_ns < 0)
16987 			lat_ns = 0;
16988 		else
16989 			lat_ns /= speed;
16990 		value = lat_ns;
16991 
16992 		while (value > LTRV_VALUE) {
16993 			scale ++;
16994 			value = howmany(value, __BIT(5));
16995 		}
16996 		if (scale > LTRV_SCALE_MAX) {
16997 			device_printf(sc->sc_dev,
16998 			    "Invalid LTR latency scale %d\n", scale);
16999 			return -1;
17000 		}
17001 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
17002 
17003 		/* Determine the maximum latency tolerated by the platform */
17004 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
17005 		    WM_PCI_LTR_CAP_LPT);
17006 		max_snoop = preg & 0xffff;
17007 		max_nosnoop = preg >> 16;
17008 
17009 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
17010 
17011 		if (lat_enc > max_ltr_enc) {
17012 			lat_enc = max_ltr_enc;
17013 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
17014 			    * PCI_LTR_SCALETONS(
17015 				    __SHIFTOUT(lat_enc,
17016 					PCI_LTR_MAXSNOOPLAT_SCALE));
17017 		}
17018 
17019 		if (lat_ns) {
17020 			lat_ns *= speed * 1000;
17021 			lat_ns /= 8;
17022 			lat_ns /= 1000000000;
17023 			obff_hwm = (int32_t)(rxa - lat_ns);
17024 		}
17025 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
17026 			device_printf(sc->sc_dev, "Invalid high water mark %d"
17027 			    "(rxa = %d, lat_ns = %d)\n",
17028 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
17029 			return -1;
17030 		}
17031 	}
17032 	/* Snoop and No-Snoop latencies the same */
17033 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
17034 	CSR_WRITE(sc, WMREG_LTRV, reg);
17035 
17036 	/* Set OBFF high water mark */
17037 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
17038 	reg |= obff_hwm;
17039 	CSR_WRITE(sc, WMREG_SVT, reg);
17040 
17041 	/* Enable OBFF */
17042 	reg = CSR_READ(sc, WMREG_SVCR);
17043 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
17044 	CSR_WRITE(sc, WMREG_SVCR, reg);
17045 
17046 	return 0;
17047 }
17048 
17049 /*
17050  * I210 Errata 25 and I211 Errata 10
17051  * Slow System Clock.
17052  *
17053  * Note that this function is called on both FLASH and iNVM case on NetBSD.
17054  */
17055 static int
17056 wm_pll_workaround_i210(struct wm_softc *sc)
17057 {
17058 	uint32_t mdicnfg, wuc;
17059 	uint32_t reg;
17060 	pcireg_t pcireg;
17061 	uint32_t pmreg;
17062 	uint16_t nvmword, tmp_nvmword;
17063 	uint16_t phyval;
17064 	bool wa_done = false;
17065 	int i, rv = 0;
17066 
17067 	/* Get Power Management cap offset */
17068 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
17069 	    &pmreg, NULL) == 0)
17070 		return -1;
17071 
17072 	/* Save WUC and MDICNFG registers */
17073 	wuc = CSR_READ(sc, WMREG_WUC);
17074 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
17075 
17076 	reg = mdicnfg & ~MDICNFG_DEST;
17077 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
17078 
17079 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
17080 		/*
17081 		 * The default value of the Initialization Control Word 1
17082 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
17083 		 */
17084 		nvmword = INVM_DEFAULT_AL;
17085 	}
17086 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
17087 
17088 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
17089 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
17090 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
17091 
17092 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
17093 			rv = 0;
17094 			break; /* OK */
17095 		} else
17096 			rv = -1;
17097 
17098 		wa_done = true;
17099 		/* Directly reset the internal PHY */
17100 		reg = CSR_READ(sc, WMREG_CTRL);
17101 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
17102 
17103 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
17104 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
17105 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
17106 
17107 		CSR_WRITE(sc, WMREG_WUC, 0);
17108 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
17109 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
17110 
17111 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
17112 		    pmreg + PCI_PMCSR);
17113 		pcireg |= PCI_PMCSR_STATE_D3;
17114 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
17115 		    pmreg + PCI_PMCSR, pcireg);
17116 		delay(1000);
17117 		pcireg &= ~PCI_PMCSR_STATE_D3;
17118 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
17119 		    pmreg + PCI_PMCSR, pcireg);
17120 
17121 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
17122 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
17123 
17124 		/* Restore WUC register */
17125 		CSR_WRITE(sc, WMREG_WUC, wuc);
17126 	}
17127 
17128 	/* Restore MDICNFG setting */
17129 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
17130 	if (wa_done)
17131 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
17132 	return rv;
17133 }
17134 
17135 static void
17136 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
17137 {
17138 	uint32_t reg;
17139 
17140 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
17141 		device_xname(sc->sc_dev), __func__));
17142 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
17143 	    || (sc->sc_type == WM_T_PCH_CNP));
17144 
17145 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
17146 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
17147 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
17148 
17149 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
17150 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
17151 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
17152 }
17153 
17154 /* Sysctl functions */
17155 static int
17156 wm_sysctl_tdh_handler(SYSCTLFN_ARGS)
17157 {
17158 	struct sysctlnode node = *rnode;
17159 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
17160 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
17161 	struct wm_softc *sc = txq->txq_sc;
17162 	uint32_t reg;
17163 
17164 	reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
17165 	node.sysctl_data = &reg;
17166 	return sysctl_lookup(SYSCTLFN_CALL(&node));
17167 }
17168 
17169 static int
17170 wm_sysctl_tdt_handler(SYSCTLFN_ARGS)
17171 {
17172 	struct sysctlnode node = *rnode;
17173 	struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
17174 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
17175 	struct wm_softc *sc = txq->txq_sc;
17176 	uint32_t reg;
17177 
17178 	reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));
17179 	node.sysctl_data = &reg;
17180 	return sysctl_lookup(SYSCTLFN_CALL(&node));
17181 }
17182 
17183 #ifdef WM_DEBUG
17184 static int
17185 wm_sysctl_debug(SYSCTLFN_ARGS)
17186 {
17187 	struct sysctlnode node = *rnode;
17188 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
17189 	uint32_t dflags;
17190 	int error;
17191 
17192 	dflags = sc->sc_debug;
17193 	node.sysctl_data = &dflags;
17194 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
17195 
17196 	if (error || newp == NULL)
17197 		return error;
17198 
17199 	sc->sc_debug = dflags;
17200 	device_printf(sc->sc_dev, "TARC0: %08x\n", CSR_READ(sc, WMREG_TARC0));
17201 	device_printf(sc->sc_dev, "TDT0: %08x\n", CSR_READ(sc, WMREG_TDT(0)));
17202 
17203 	return 0;
17204 }
17205 #endif
17206