xref: /netbsd-src/sys/dev/pci/if_wm.c (revision 627f7eb200a4419d89b531d55fccd2ee3ffdcde0)
1 /*	$NetBSD: if_wm.c,v 1.702 2021/03/11 01:23:33 msaitoh Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Check XXX'ed comments
76  *	- TX Multi queue improvement (refine queue selection logic)
77  *	- Split header buffer for newer descriptors
78  *	- EEE (Energy Efficiency Ethernet) for I354
79  *	- Virtual Function
80  *	- Set LED correctly (based on contents in EEPROM)
81  *	- Rework how parameters are loaded from the EEPROM.
82  */
83 
84 #include <sys/cdefs.h>
85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.702 2021/03/11 01:23:33 msaitoh Exp $");
86 
87 #ifdef _KERNEL_OPT
88 #include "opt_net_mpsafe.h"
89 #include "opt_if_wm.h"
90 #endif
91 
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/callout.h>
95 #include <sys/mbuf.h>
96 #include <sys/malloc.h>
97 #include <sys/kmem.h>
98 #include <sys/kernel.h>
99 #include <sys/socket.h>
100 #include <sys/ioctl.h>
101 #include <sys/errno.h>
102 #include <sys/device.h>
103 #include <sys/queue.h>
104 #include <sys/syslog.h>
105 #include <sys/interrupt.h>
106 #include <sys/cpu.h>
107 #include <sys/pcq.h>
108 #include <sys/sysctl.h>
109 #include <sys/workqueue.h>
110 
111 #include <sys/rndsource.h>
112 
113 #include <net/if.h>
114 #include <net/if_dl.h>
115 #include <net/if_media.h>
116 #include <net/if_ether.h>
117 
118 #include <net/bpf.h>
119 
120 #include <net/rss_config.h>
121 
122 #include <netinet/in.h>			/* XXX for struct ip */
123 #include <netinet/in_systm.h>		/* XXX for struct ip */
124 #include <netinet/ip.h>			/* XXX for struct ip */
125 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
126 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
127 
128 #include <sys/bus.h>
129 #include <sys/intr.h>
130 #include <machine/endian.h>
131 
132 #include <dev/mii/mii.h>
133 #include <dev/mii/mdio.h>
134 #include <dev/mii/miivar.h>
135 #include <dev/mii/miidevs.h>
136 #include <dev/mii/mii_bitbang.h>
137 #include <dev/mii/ikphyreg.h>
138 #include <dev/mii/igphyreg.h>
139 #include <dev/mii/igphyvar.h>
140 #include <dev/mii/inbmphyreg.h>
141 #include <dev/mii/ihphyreg.h>
142 #include <dev/mii/makphyreg.h>
143 
144 #include <dev/pci/pcireg.h>
145 #include <dev/pci/pcivar.h>
146 #include <dev/pci/pcidevs.h>
147 
148 #include <dev/pci/if_wmreg.h>
149 #include <dev/pci/if_wmvar.h>
150 
151 #ifdef WM_DEBUG
152 #define	WM_DEBUG_LINK		__BIT(0)
153 #define	WM_DEBUG_TX		__BIT(1)
154 #define	WM_DEBUG_RX		__BIT(2)
155 #define	WM_DEBUG_GMII		__BIT(3)
156 #define	WM_DEBUG_MANAGE		__BIT(4)
157 #define	WM_DEBUG_NVM		__BIT(5)
158 #define	WM_DEBUG_INIT		__BIT(6)
159 #define	WM_DEBUG_LOCK		__BIT(7)
160 
161 #if 0
162 #define WM_DEBUG_DEFAULT	WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | \
163 	WM_DEBUG_GMII | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT |    \
164 	WM_DEBUG_LOCK
165 #endif
166 
167 #define	DPRINTF(sc, x, y)			  \
168 	do {					  \
169 		if ((sc)->sc_debug & (x))	  \
170 			printf y;		  \
171 	} while (0)
172 #else
173 #define	DPRINTF(sc, x, y)	__nothing
174 #endif /* WM_DEBUG */
175 
176 #ifdef NET_MPSAFE
177 #define WM_MPSAFE	1
178 #define WM_CALLOUT_FLAGS	CALLOUT_MPSAFE
179 #define WM_SOFTINT_FLAGS	SOFTINT_MPSAFE
180 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
181 #else
182 #define WM_CALLOUT_FLAGS	0
183 #define WM_SOFTINT_FLAGS	0
184 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
185 #endif
186 
187 #define WM_WORKQUEUE_PRI PRI_SOFTNET
188 
189 /*
190  * This device driver's max interrupt numbers.
191  */
192 #define WM_MAX_NQUEUEINTR	16
193 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
194 
195 #ifndef WM_DISABLE_MSI
196 #define	WM_DISABLE_MSI 0
197 #endif
198 #ifndef WM_DISABLE_MSIX
199 #define	WM_DISABLE_MSIX 0
200 #endif
201 
202 int wm_disable_msi = WM_DISABLE_MSI;
203 int wm_disable_msix = WM_DISABLE_MSIX;
204 
205 #ifndef WM_WATCHDOG_TIMEOUT
206 #define WM_WATCHDOG_TIMEOUT 5
207 #endif
208 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
209 
210 /*
211  * Transmit descriptor list size.  Due to errata, we can only have
212  * 256 hardware descriptors in the ring on < 82544, but we use 4096
213  * on >= 82544. We tell the upper layers that they can queue a lot
214  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
215  * of them at a time.
216  *
217  * We allow up to 64 DMA segments per packet.  Pathological packet
218  * chains containing many small mbufs have been observed in zero-copy
219  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
220  * m_defrag() is called to reduce it.
221  */
222 #define	WM_NTXSEGS		64
223 #define	WM_IFQUEUELEN		256
224 #define	WM_TXQUEUELEN_MAX	64
225 #define	WM_TXQUEUELEN_MAX_82547	16
226 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
227 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
228 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
229 #define	WM_NTXDESC_82542	256
230 #define	WM_NTXDESC_82544	4096
231 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
232 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
233 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
234 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
235 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
236 
237 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
238 
239 #define	WM_TXINTERQSIZE		256
240 
241 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
242 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
243 #endif
244 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
245 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
246 #endif
247 
248 /*
249  * Receive descriptor list size.  We have one Rx buffer for normal
250  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
251  * packet.  We allocate 256 receive descriptors, each with a 2k
252  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
253  */
254 #define	WM_NRXDESC		256U
255 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
256 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
257 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
258 
259 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
260 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
261 #endif
262 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
263 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
264 #endif
265 
266 typedef union txdescs {
267 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
268 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
269 } txdescs_t;
270 
271 typedef union rxdescs {
272 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
273 	ext_rxdesc_t	 sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
274 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
275 } rxdescs_t;
276 
277 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
278 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
279 
280 /*
281  * Software state for transmit jobs.
282  */
283 struct wm_txsoft {
284 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
285 	bus_dmamap_t txs_dmamap;	/* our DMA map */
286 	int txs_firstdesc;		/* first descriptor in packet */
287 	int txs_lastdesc;		/* last descriptor in packet */
288 	int txs_ndesc;			/* # of descriptors used */
289 };
290 
291 /*
292  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
293  * buffer and a DMA map. For packets which fill more than one buffer, we chain
294  * them together.
295  */
296 struct wm_rxsoft {
297 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
298 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
299 };
300 
301 #define WM_LINKUP_TIMEOUT	50
302 
303 static uint16_t swfwphysem[] = {
304 	SWFW_PHY0_SM,
305 	SWFW_PHY1_SM,
306 	SWFW_PHY2_SM,
307 	SWFW_PHY3_SM
308 };
309 
310 static const uint32_t wm_82580_rxpbs_table[] = {
311 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
312 };
313 
314 struct wm_softc;
315 
316 #if defined(_LP64) && !defined(WM_DISABLE_EVENT_COUNTERS)
317 #if !defined(WM_EVENT_COUNTERS)
318 #define WM_EVENT_COUNTERS 1
319 #endif
320 #endif
321 
322 #ifdef WM_EVENT_COUNTERS
323 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
324 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
325 	struct evcnt qname##_ev_##evname;
326 
327 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
328 	do {								\
329 		snprintf((q)->qname##_##evname##_evcnt_name,		\
330 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
331 		    "%s%02d%s", #qname, (qnum), #evname);		\
332 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
333 		    (evtype), NULL, (xname),				\
334 		    (q)->qname##_##evname##_evcnt_name);		\
335 	} while (0)
336 
337 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
338 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
339 
340 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
341 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
342 
343 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
344 	evcnt_detach(&(q)->qname##_ev_##evname);
345 #endif /* WM_EVENT_COUNTERS */
346 
347 struct wm_txqueue {
348 	kmutex_t *txq_lock;		/* lock for tx operations */
349 
350 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
351 
352 	/* Software state for the transmit descriptors. */
353 	int txq_num;			/* must be a power of two */
354 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
355 
356 	/* TX control data structures. */
357 	int txq_ndesc;			/* must be a power of two */
358 	size_t txq_descsize;		/* a tx descriptor size */
359 	txdescs_t *txq_descs_u;
360 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
361 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
362 	int txq_desc_rseg;		/* real number of control segment */
363 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
364 #define	txq_descs	txq_descs_u->sctxu_txdescs
365 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
366 
367 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
368 
369 	int txq_free;			/* number of free Tx descriptors */
370 	int txq_next;			/* next ready Tx descriptor */
371 
372 	int txq_sfree;			/* number of free Tx jobs */
373 	int txq_snext;			/* next free Tx job */
374 	int txq_sdirty;			/* dirty Tx jobs */
375 
376 	/* These 4 variables are used only on the 82547. */
377 	int txq_fifo_size;		/* Tx FIFO size */
378 	int txq_fifo_head;		/* current head of FIFO */
379 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
380 	int txq_fifo_stall;		/* Tx FIFO is stalled */
381 
382 	/*
383 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
384 	 * CPUs. This queue intermediate them without block.
385 	 */
386 	pcq_t *txq_interq;
387 
388 	/*
389 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
390 	 * to manage Tx H/W queue's busy flag.
391 	 */
392 	int txq_flags;			/* flags for H/W queue, see below */
393 #define	WM_TXQ_NO_SPACE		0x1
394 #define	WM_TXQ_LINKDOWN_DISCARD	0x2
395 
396 	bool txq_stopping;
397 
398 	bool txq_sending;
399 	time_t txq_lastsent;
400 
401 	/* Checksum flags used for previous packet */
402 	uint32_t	txq_last_hw_cmd;
403 	uint8_t		txq_last_hw_fields;
404 	uint16_t	txq_last_hw_ipcs;
405 	uint16_t	txq_last_hw_tucs;
406 
407 	uint32_t txq_packets;		/* for AIM */
408 	uint32_t txq_bytes;		/* for AIM */
409 #ifdef WM_EVENT_COUNTERS
410 	/* TX event counters */
411 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
412 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
413 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
414 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
415 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
416 					    /* XXX not used? */
417 
418 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
419 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
420 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
421 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
422 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
423 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
424 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
425 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
426 					    /* other than toomanyseg */
427 
428 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
429 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
430 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
431 	WM_Q_EVCNT_DEFINE(txq, skipcontext) /* Tx skip wring cksum context */
432 
433 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
434 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
435 #endif /* WM_EVENT_COUNTERS */
436 };
437 
438 struct wm_rxqueue {
439 	kmutex_t *rxq_lock;		/* lock for rx operations */
440 
441 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
442 
443 	/* Software state for the receive descriptors. */
444 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
445 
446 	/* RX control data structures. */
447 	int rxq_ndesc;			/* must be a power of two */
448 	size_t rxq_descsize;		/* a rx descriptor size */
449 	rxdescs_t *rxq_descs_u;
450 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
451 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
452 	int rxq_desc_rseg;		/* real number of control segment */
453 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
454 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
455 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
456 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
457 
458 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
459 
460 	int rxq_ptr;			/* next ready Rx desc/queue ent */
461 	int rxq_discard;
462 	int rxq_len;
463 	struct mbuf *rxq_head;
464 	struct mbuf *rxq_tail;
465 	struct mbuf **rxq_tailp;
466 
467 	bool rxq_stopping;
468 
469 	uint32_t rxq_packets;		/* for AIM */
470 	uint32_t rxq_bytes;		/* for AIM */
471 #ifdef WM_EVENT_COUNTERS
472 	/* RX event counters */
473 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
474 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
475 
476 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
477 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
478 #endif
479 };
480 
481 struct wm_queue {
482 	int wmq_id;			/* index of TX/RX queues */
483 	int wmq_intr_idx;		/* index of MSI-X tables */
484 
485 	uint32_t wmq_itr;		/* interrupt interval per queue. */
486 	bool wmq_set_itr;
487 
488 	struct wm_txqueue wmq_txq;
489 	struct wm_rxqueue wmq_rxq;
490 	char sysctlname[32];		/* Name for sysctl */
491 
492 	bool wmq_txrx_use_workqueue;
493 	struct work wmq_cookie;
494 	void *wmq_si;
495 };
496 
497 struct wm_phyop {
498 	int (*acquire)(struct wm_softc *);
499 	void (*release)(struct wm_softc *);
500 	int (*readreg_locked)(device_t, int, int, uint16_t *);
501 	int (*writereg_locked)(device_t, int, int, uint16_t);
502 	int reset_delay_us;
503 	bool no_errprint;
504 };
505 
506 struct wm_nvmop {
507 	int (*acquire)(struct wm_softc *);
508 	void (*release)(struct wm_softc *);
509 	int (*read)(struct wm_softc *, int, int, uint16_t *);
510 };
511 
512 /*
513  * Software state per device.
514  */
515 struct wm_softc {
516 	device_t sc_dev;		/* generic device information */
517 	bus_space_tag_t sc_st;		/* bus space tag */
518 	bus_space_handle_t sc_sh;	/* bus space handle */
519 	bus_size_t sc_ss;		/* bus space size */
520 	bus_space_tag_t sc_iot;		/* I/O space tag */
521 	bus_space_handle_t sc_ioh;	/* I/O space handle */
522 	bus_size_t sc_ios;		/* I/O space size */
523 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
524 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
525 	bus_size_t sc_flashs;		/* flash registers space size */
526 	off_t sc_flashreg_offset;	/*
527 					 * offset to flash registers from
528 					 * start of BAR
529 					 */
530 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
531 
532 	struct ethercom sc_ethercom;	/* ethernet common data */
533 	struct mii_data sc_mii;		/* MII/media information */
534 
535 	pci_chipset_tag_t sc_pc;
536 	pcitag_t sc_pcitag;
537 	int sc_bus_speed;		/* PCI/PCIX bus speed */
538 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
539 
540 	uint16_t sc_pcidevid;		/* PCI device ID */
541 	wm_chip_type sc_type;		/* MAC type */
542 	int sc_rev;			/* MAC revision */
543 	wm_phy_type sc_phytype;		/* PHY type */
544 	uint8_t sc_sfptype;		/* SFP type */
545 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
546 #define	WM_MEDIATYPE_UNKNOWN		0x00
547 #define	WM_MEDIATYPE_FIBER		0x01
548 #define	WM_MEDIATYPE_COPPER		0x02
549 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
550 	int sc_funcid;			/* unit number of the chip (0 to 3) */
551 	int sc_flags;			/* flags; see below */
552 	u_short sc_if_flags;		/* last if_flags */
553 	int sc_ec_capenable;		/* last ec_capenable */
554 	int sc_flowflags;		/* 802.3x flow control flags */
555 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
556 	int sc_align_tweak;
557 
558 	void *sc_ihs[WM_MAX_NINTR];	/*
559 					 * interrupt cookie.
560 					 * - legacy and msi use sc_ihs[0] only
561 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
562 					 */
563 	pci_intr_handle_t *sc_intrs;	/*
564 					 * legacy and msi use sc_intrs[0] only
565 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
566 					 */
567 	int sc_nintrs;			/* number of interrupts */
568 
569 	int sc_link_intr_idx;		/* index of MSI-X tables */
570 
571 	callout_t sc_tick_ch;		/* tick callout */
572 	bool sc_core_stopping;
573 
574 	int sc_nvm_ver_major;
575 	int sc_nvm_ver_minor;
576 	int sc_nvm_ver_build;
577 	int sc_nvm_addrbits;		/* NVM address bits */
578 	unsigned int sc_nvm_wordsize;	/* NVM word size */
579 	int sc_ich8_flash_base;
580 	int sc_ich8_flash_bank_size;
581 	int sc_nvm_k1_enabled;
582 
583 	int sc_nqueues;
584 	struct wm_queue *sc_queue;
585 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
586 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
587 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
588 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
589 	struct workqueue *sc_queue_wq;
590 	bool sc_txrx_use_workqueue;
591 
592 	int sc_affinity_offset;
593 
594 #ifdef WM_EVENT_COUNTERS
595 	/* Event counters. */
596 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
597 
598 	/* WM_T_82542_2_1 only */
599 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
600 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
601 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
602 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
603 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
604 #endif /* WM_EVENT_COUNTERS */
605 
606 	struct sysctllog *sc_sysctllog;
607 
608 	/* This variable are used only on the 82547. */
609 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
610 
611 	uint32_t sc_ctrl;		/* prototype CTRL register */
612 #if 0
613 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
614 #endif
615 	uint32_t sc_icr;		/* prototype interrupt bits */
616 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
617 	uint32_t sc_tctl;		/* prototype TCTL register */
618 	uint32_t sc_rctl;		/* prototype RCTL register */
619 	uint32_t sc_txcw;		/* prototype TXCW register */
620 	uint32_t sc_tipg;		/* prototype TIPG register */
621 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
622 	uint32_t sc_pba;		/* prototype PBA register */
623 
624 	int sc_tbi_linkup;		/* TBI link status */
625 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
626 	int sc_tbi_serdes_ticks;	/* tbi ticks */
627 
628 	int sc_mchash_type;		/* multicast filter offset */
629 
630 	krndsource_t rnd_source;	/* random source */
631 
632 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
633 
634 	kmutex_t *sc_core_lock;		/* lock for softc operations */
635 	kmutex_t *sc_ich_phymtx;	/*
636 					 * 82574/82583/ICH/PCH specific PHY
637 					 * mutex. For 82574/82583, the mutex
638 					 * is used for both PHY and NVM.
639 					 */
640 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
641 
642 	struct wm_phyop phy;
643 	struct wm_nvmop nvm;
644 #ifdef WM_DEBUG
645 	uint32_t sc_debug;
646 #endif
647 };
648 
649 #define WM_CORE_LOCK(_sc)						\
650 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
651 #define WM_CORE_UNLOCK(_sc)						\
652 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
653 #define WM_CORE_LOCKED(_sc)						\
654 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
655 
656 #define	WM_RXCHAIN_RESET(rxq)						\
657 do {									\
658 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
659 	*(rxq)->rxq_tailp = NULL;					\
660 	(rxq)->rxq_len = 0;						\
661 } while (/*CONSTCOND*/0)
662 
663 #define	WM_RXCHAIN_LINK(rxq, m)						\
664 do {									\
665 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
666 	(rxq)->rxq_tailp = &(m)->m_next;				\
667 } while (/*CONSTCOND*/0)
668 
669 #ifdef WM_EVENT_COUNTERS
670 #define	WM_EVCNT_INCR(ev)						\
671 	atomic_store_relaxed(&((ev)->ev_count),				\
672 	    atomic_load_relaxed(&(ev)->ev_count) + 1)
673 #define	WM_EVCNT_ADD(ev, val)						\
674 	atomic_store_relaxed(&((ev)->ev_count),				\
675 	    atomic_load_relaxed(&(ev)->ev_count) + (val))
676 
677 #define WM_Q_EVCNT_INCR(qname, evname)			\
678 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
679 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
680 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
681 #else /* !WM_EVENT_COUNTERS */
682 #define	WM_EVCNT_INCR(ev)	/* nothing */
683 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
684 
685 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
686 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
687 #endif /* !WM_EVENT_COUNTERS */
688 
689 #define	CSR_READ(sc, reg)						\
690 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
691 #define	CSR_WRITE(sc, reg, val)						\
692 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
693 #define	CSR_WRITE_FLUSH(sc)						\
694 	(void)CSR_READ((sc), WMREG_STATUS)
695 
696 #define ICH8_FLASH_READ32(sc, reg)					\
697 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
698 	    (reg) + sc->sc_flashreg_offset)
699 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
700 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
701 	    (reg) + sc->sc_flashreg_offset, (data))
702 
703 #define ICH8_FLASH_READ16(sc, reg)					\
704 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
705 	    (reg) + sc->sc_flashreg_offset)
706 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
707 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
708 	    (reg) + sc->sc_flashreg_offset, (data))
709 
710 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
711 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
712 
713 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
714 #define	WM_CDTXADDR_HI(txq, x)						\
715 	(sizeof(bus_addr_t) == 8 ?					\
716 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
717 
718 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
719 #define	WM_CDRXADDR_HI(rxq, x)						\
720 	(sizeof(bus_addr_t) == 8 ?					\
721 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
722 
723 /*
724  * Register read/write functions.
725  * Other than CSR_{READ|WRITE}().
726  */
727 #if 0
728 static inline uint32_t wm_io_read(struct wm_softc *, int);
729 #endif
730 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
731 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
732     uint32_t, uint32_t);
733 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
734 
735 /*
736  * Descriptor sync/init functions.
737  */
738 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
739 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
740 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
741 
742 /*
743  * Device driver interface functions and commonly used functions.
744  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
745  */
746 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
747 static int	wm_match(device_t, cfdata_t, void *);
748 static void	wm_attach(device_t, device_t, void *);
749 static int	wm_detach(device_t, int);
750 static bool	wm_suspend(device_t, const pmf_qual_t *);
751 static bool	wm_resume(device_t, const pmf_qual_t *);
752 static void	wm_watchdog(struct ifnet *);
753 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
754     uint16_t *);
755 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
756     uint16_t *);
757 static void	wm_tick(void *);
758 static int	wm_ifflags_cb(struct ethercom *);
759 static int	wm_ioctl(struct ifnet *, u_long, void *);
760 /* MAC address related */
761 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
762 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
763 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
764 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
765 static int	wm_rar_count(struct wm_softc *);
766 static void	wm_set_filter(struct wm_softc *);
767 /* Reset and init related */
768 static void	wm_set_vlan(struct wm_softc *);
769 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
770 static void	wm_get_auto_rd_done(struct wm_softc *);
771 static void	wm_lan_init_done(struct wm_softc *);
772 static void	wm_get_cfg_done(struct wm_softc *);
773 static int	wm_phy_post_reset(struct wm_softc *);
774 static int	wm_write_smbus_addr(struct wm_softc *);
775 static int	wm_init_lcd_from_nvm(struct wm_softc *);
776 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
777 static void	wm_initialize_hardware_bits(struct wm_softc *);
778 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
779 static int	wm_reset_phy(struct wm_softc *);
780 static void	wm_flush_desc_rings(struct wm_softc *);
781 static void	wm_reset(struct wm_softc *);
782 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
783 static void	wm_rxdrain(struct wm_rxqueue *);
784 static void	wm_init_rss(struct wm_softc *);
785 static void	wm_adjust_qnum(struct wm_softc *, int);
786 static inline bool	wm_is_using_msix(struct wm_softc *);
787 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
788 static int	wm_softint_establish_queue(struct wm_softc *, int, int);
789 static int	wm_setup_legacy(struct wm_softc *);
790 static int	wm_setup_msix(struct wm_softc *);
791 static int	wm_init(struct ifnet *);
792 static int	wm_init_locked(struct ifnet *);
793 static void	wm_init_sysctls(struct wm_softc *);
794 static void	wm_unset_stopping_flags(struct wm_softc *);
795 static void	wm_set_stopping_flags(struct wm_softc *);
796 static void	wm_stop(struct ifnet *, int);
797 static void	wm_stop_locked(struct ifnet *, bool, bool);
798 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
799 static void	wm_82547_txfifo_stall(void *);
800 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
801 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
802 /* DMA related */
803 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
804 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
805 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
806 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
807     struct wm_txqueue *);
808 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
809 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
810 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
811     struct wm_rxqueue *);
812 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
813 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
814 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
815 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
816 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
817 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
818 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
819     struct wm_txqueue *);
820 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
821     struct wm_rxqueue *);
822 static int	wm_alloc_txrx_queues(struct wm_softc *);
823 static void	wm_free_txrx_queues(struct wm_softc *);
824 static int	wm_init_txrx_queues(struct wm_softc *);
825 /* Start */
826 static void	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
827     struct wm_txsoft *, uint32_t *, uint8_t *);
828 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
829 static void	wm_start(struct ifnet *);
830 static void	wm_start_locked(struct ifnet *);
831 static int	wm_transmit(struct ifnet *, struct mbuf *);
832 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
833 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
834 		    bool);
835 static void	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
836     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
837 static void	wm_nq_start(struct ifnet *);
838 static void	wm_nq_start_locked(struct ifnet *);
839 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
840 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
841 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
842 		    bool);
843 static void	wm_deferred_start_locked(struct wm_txqueue *);
844 static void	wm_handle_queue(void *);
845 static void	wm_handle_queue_work(struct work *, void *);
846 /* Interrupt */
847 static bool	wm_txeof(struct wm_txqueue *, u_int);
848 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
849 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
850 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
851 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
852 static void	wm_linkintr(struct wm_softc *, uint32_t);
853 static int	wm_intr_legacy(void *);
854 static inline void	wm_txrxintr_disable(struct wm_queue *);
855 static inline void	wm_txrxintr_enable(struct wm_queue *);
856 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
857 static int	wm_txrxintr_msix(void *);
858 static int	wm_linkintr_msix(void *);
859 
860 /*
861  * Media related.
862  * GMII, SGMII, TBI, SERDES and SFP.
863  */
864 /* Common */
865 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
866 /* GMII related */
867 static void	wm_gmii_reset(struct wm_softc *);
868 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
869 static int	wm_get_phy_id_82575(struct wm_softc *);
870 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
871 static int	wm_gmii_mediachange(struct ifnet *);
872 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
873 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
874 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
875 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
876 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
877 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
878 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
879 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
880 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
881 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
882 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
883 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
884 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
885 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
886 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
887 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
888 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
889 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
890 	bool);
891 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
892 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
893 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
894 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
895 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
896 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
897 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
898 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
899 static void	wm_gmii_statchg(struct ifnet *);
900 /*
901  * kumeran related (80003, ICH* and PCH*).
902  * These functions are not for accessing MII registers but for accessing
903  * kumeran specific registers.
904  */
905 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
906 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
907 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
908 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
909 /* EMI register related */
910 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
911 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
912 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
913 /* SGMII */
914 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
915 static void	wm_sgmii_sfp_preconfig(struct wm_softc *);
916 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
917 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
918 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
919 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
920 /* TBI related */
921 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
922 static void	wm_tbi_mediainit(struct wm_softc *);
923 static int	wm_tbi_mediachange(struct ifnet *);
924 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
925 static int	wm_check_for_link(struct wm_softc *);
926 static void	wm_tbi_tick(struct wm_softc *);
927 /* SERDES related */
928 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
929 static int	wm_serdes_mediachange(struct ifnet *);
930 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
931 static void	wm_serdes_tick(struct wm_softc *);
932 /* SFP related */
933 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
934 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
935 
936 /*
937  * NVM related.
938  * Microwire, SPI (w/wo EERD) and Flash.
939  */
940 /* Misc functions */
941 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
942 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
943 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
944 /* Microwire */
945 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
946 /* SPI */
947 static int	wm_nvm_ready_spi(struct wm_softc *);
948 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
949 /* Using with EERD */
950 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
951 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
952 /* Flash */
953 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
954     unsigned int *);
955 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
956 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
957 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
958     uint32_t *);
959 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
960 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
961 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
962 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
963 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
964 /* iNVM */
965 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
966 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
967 /* Lock, detecting NVM type, validate checksum and read */
968 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
969 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
970 static int	wm_nvm_validate_checksum(struct wm_softc *);
971 static void	wm_nvm_version_invm(struct wm_softc *);
972 static void	wm_nvm_version(struct wm_softc *);
973 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
974 
975 /*
976  * Hardware semaphores.
977  * Very complexed...
978  */
979 static int	wm_get_null(struct wm_softc *);
980 static void	wm_put_null(struct wm_softc *);
981 static int	wm_get_eecd(struct wm_softc *);
982 static void	wm_put_eecd(struct wm_softc *);
983 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
984 static void	wm_put_swsm_semaphore(struct wm_softc *);
985 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
986 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
987 static int	wm_get_nvm_80003(struct wm_softc *);
988 static void	wm_put_nvm_80003(struct wm_softc *);
989 static int	wm_get_nvm_82571(struct wm_softc *);
990 static void	wm_put_nvm_82571(struct wm_softc *);
991 static int	wm_get_phy_82575(struct wm_softc *);
992 static void	wm_put_phy_82575(struct wm_softc *);
993 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
994 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
995 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
996 static void	wm_put_swflag_ich8lan(struct wm_softc *);
997 static int	wm_get_nvm_ich8lan(struct wm_softc *);
998 static void	wm_put_nvm_ich8lan(struct wm_softc *);
999 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
1000 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
1001 
1002 /*
1003  * Management mode and power management related subroutines.
1004  * BMC, AMT, suspend/resume and EEE.
1005  */
1006 #if 0
1007 static int	wm_check_mng_mode(struct wm_softc *);
1008 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
1009 static int	wm_check_mng_mode_82574(struct wm_softc *);
1010 static int	wm_check_mng_mode_generic(struct wm_softc *);
1011 #endif
1012 static int	wm_enable_mng_pass_thru(struct wm_softc *);
1013 static bool	wm_phy_resetisblocked(struct wm_softc *);
1014 static void	wm_get_hw_control(struct wm_softc *);
1015 static void	wm_release_hw_control(struct wm_softc *);
1016 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
1017 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
1018 static void	wm_init_manageability(struct wm_softc *);
1019 static void	wm_release_manageability(struct wm_softc *);
1020 static void	wm_get_wakeup(struct wm_softc *);
1021 static int	wm_ulp_disable(struct wm_softc *);
1022 static int	wm_enable_phy_wakeup(struct wm_softc *);
1023 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
1024 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
1025 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
1026 static void	wm_enable_wakeup(struct wm_softc *);
1027 static void	wm_disable_aspm(struct wm_softc *);
1028 /* LPLU (Low Power Link Up) */
1029 static void	wm_lplu_d0_disable(struct wm_softc *);
1030 /* EEE */
1031 static int	wm_set_eee_i350(struct wm_softc *);
1032 static int	wm_set_eee_pchlan(struct wm_softc *);
1033 static int	wm_set_eee(struct wm_softc *);
1034 
1035 /*
1036  * Workarounds (mainly PHY related).
1037  * Basically, PHY's workarounds are in the PHY drivers.
1038  */
1039 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
1040 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
1041 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
1042 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
1043 static void	wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *);
1044 static int	wm_lv_jumbo_workaround_ich8lan(struct wm_softc *, bool);
1045 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
1046 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
1047 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
1048 static int	wm_k1_workaround_lv(struct wm_softc *);
1049 static int	wm_link_stall_workaround_hv(struct wm_softc *);
1050 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
1051 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
1052 static void	wm_reset_init_script_82575(struct wm_softc *);
1053 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
1054 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
1055 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
1056 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
1057 static int	wm_pll_workaround_i210(struct wm_softc *);
1058 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
1059 static bool	wm_phy_need_linkdown_discard(struct wm_softc *);
1060 static void	wm_set_linkdown_discard(struct wm_softc *);
1061 static void	wm_clear_linkdown_discard(struct wm_softc *);
1062 
1063 #ifdef WM_DEBUG
1064 static int	wm_sysctl_debug(SYSCTLFN_PROTO);
1065 #endif
1066 
1067 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
1068     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
1069 
1070 /*
1071  * Devices supported by this driver.
1072  */
1073 static const struct wm_product {
1074 	pci_vendor_id_t		wmp_vendor;
1075 	pci_product_id_t	wmp_product;
1076 	const char		*wmp_name;
1077 	wm_chip_type		wmp_type;
1078 	uint32_t		wmp_flags;
1079 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
1080 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
1081 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
1082 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
1083 #define WMP_MEDIATYPE(x)	((x) & 0x03)
1084 } wm_products[] = {
1085 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
1086 	  "Intel i82542 1000BASE-X Ethernet",
1087 	  WM_T_82542_2_1,	WMP_F_FIBER },
1088 
1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
1090 	  "Intel i82543GC 1000BASE-X Ethernet",
1091 	  WM_T_82543,		WMP_F_FIBER },
1092 
1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
1094 	  "Intel i82543GC 1000BASE-T Ethernet",
1095 	  WM_T_82543,		WMP_F_COPPER },
1096 
1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
1098 	  "Intel i82544EI 1000BASE-T Ethernet",
1099 	  WM_T_82544,		WMP_F_COPPER },
1100 
1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
1102 	  "Intel i82544EI 1000BASE-X Ethernet",
1103 	  WM_T_82544,		WMP_F_FIBER },
1104 
1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
1106 	  "Intel i82544GC 1000BASE-T Ethernet",
1107 	  WM_T_82544,		WMP_F_COPPER },
1108 
1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
1110 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
1111 	  WM_T_82544,		WMP_F_COPPER },
1112 
1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
1114 	  "Intel i82540EM 1000BASE-T Ethernet",
1115 	  WM_T_82540,		WMP_F_COPPER },
1116 
1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
1118 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
1119 	  WM_T_82540,		WMP_F_COPPER },
1120 
1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
1122 	  "Intel i82540EP 1000BASE-T Ethernet",
1123 	  WM_T_82540,		WMP_F_COPPER },
1124 
1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
1126 	  "Intel i82540EP 1000BASE-T Ethernet",
1127 	  WM_T_82540,		WMP_F_COPPER },
1128 
1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
1130 	  "Intel i82540EP 1000BASE-T Ethernet",
1131 	  WM_T_82540,		WMP_F_COPPER },
1132 
1133 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
1134 	  "Intel i82545EM 1000BASE-T Ethernet",
1135 	  WM_T_82545,		WMP_F_COPPER },
1136 
1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
1138 	  "Intel i82545GM 1000BASE-T Ethernet",
1139 	  WM_T_82545_3,		WMP_F_COPPER },
1140 
1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
1142 	  "Intel i82545GM 1000BASE-X Ethernet",
1143 	  WM_T_82545_3,		WMP_F_FIBER },
1144 
1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
1146 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
1147 	  WM_T_82545_3,		WMP_F_SERDES },
1148 
1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
1150 	  "Intel i82546EB 1000BASE-T Ethernet",
1151 	  WM_T_82546,		WMP_F_COPPER },
1152 
1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
1154 	  "Intel i82546EB 1000BASE-T Ethernet",
1155 	  WM_T_82546,		WMP_F_COPPER },
1156 
1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
1158 	  "Intel i82545EM 1000BASE-X Ethernet",
1159 	  WM_T_82545,		WMP_F_FIBER },
1160 
1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
1162 	  "Intel i82546EB 1000BASE-X Ethernet",
1163 	  WM_T_82546,		WMP_F_FIBER },
1164 
1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
1166 	  "Intel i82546GB 1000BASE-T Ethernet",
1167 	  WM_T_82546_3,		WMP_F_COPPER },
1168 
1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
1170 	  "Intel i82546GB 1000BASE-X Ethernet",
1171 	  WM_T_82546_3,		WMP_F_FIBER },
1172 
1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
1174 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
1175 	  WM_T_82546_3,		WMP_F_SERDES },
1176 
1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1178 	  "i82546GB quad-port Gigabit Ethernet",
1179 	  WM_T_82546_3,		WMP_F_COPPER },
1180 
1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1182 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
1183 	  WM_T_82546_3,		WMP_F_COPPER },
1184 
1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
1186 	  "Intel PRO/1000MT (82546GB)",
1187 	  WM_T_82546_3,		WMP_F_COPPER },
1188 
1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
1190 	  "Intel i82541EI 1000BASE-T Ethernet",
1191 	  WM_T_82541,		WMP_F_COPPER },
1192 
1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
1194 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1195 	  WM_T_82541,		WMP_F_COPPER },
1196 
1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
1198 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
1199 	  WM_T_82541,		WMP_F_COPPER },
1200 
1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
1202 	  "Intel i82541ER 1000BASE-T Ethernet",
1203 	  WM_T_82541_2,		WMP_F_COPPER },
1204 
1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
1206 	  "Intel i82541GI 1000BASE-T Ethernet",
1207 	  WM_T_82541_2,		WMP_F_COPPER },
1208 
1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
1210 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
1211 	  WM_T_82541_2,		WMP_F_COPPER },
1212 
1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
1214 	  "Intel i82541PI 1000BASE-T Ethernet",
1215 	  WM_T_82541_2,		WMP_F_COPPER },
1216 
1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
1218 	  "Intel i82547EI 1000BASE-T Ethernet",
1219 	  WM_T_82547,		WMP_F_COPPER },
1220 
1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
1222 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
1223 	  WM_T_82547,		WMP_F_COPPER },
1224 
1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
1226 	  "Intel i82547GI 1000BASE-T Ethernet",
1227 	  WM_T_82547_2,		WMP_F_COPPER },
1228 
1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
1230 	  "Intel PRO/1000 PT (82571EB)",
1231 	  WM_T_82571,		WMP_F_COPPER },
1232 
1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
1234 	  "Intel PRO/1000 PF (82571EB)",
1235 	  WM_T_82571,		WMP_F_FIBER },
1236 
1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
1238 	  "Intel PRO/1000 PB (82571EB)",
1239 	  WM_T_82571,		WMP_F_SERDES },
1240 
1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1242 	  "Intel PRO/1000 QT (82571EB)",
1243 	  WM_T_82571,		WMP_F_COPPER },
1244 
1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1246 	  "Intel PRO/1000 PT Quad Port Server Adapter",
1247 	  WM_T_82571,		WMP_F_COPPER },
1248 
1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1250 	  "Intel Gigabit PT Quad Port Server ExpressModule",
1251 	  WM_T_82571,		WMP_F_COPPER },
1252 
1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1254 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1255 	  WM_T_82571,		WMP_F_SERDES },
1256 
1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1258 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1259 	  WM_T_82571,		WMP_F_SERDES },
1260 
1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1262 	  "Intel 82571EB Quad 1000baseX Ethernet",
1263 	  WM_T_82571,		WMP_F_FIBER },
1264 
1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
1266 	  "Intel i82572EI 1000baseT Ethernet",
1267 	  WM_T_82572,		WMP_F_COPPER },
1268 
1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
1270 	  "Intel i82572EI 1000baseX Ethernet",
1271 	  WM_T_82572,		WMP_F_FIBER },
1272 
1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
1274 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
1275 	  WM_T_82572,		WMP_F_SERDES },
1276 
1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
1278 	  "Intel i82572EI 1000baseT Ethernet",
1279 	  WM_T_82572,		WMP_F_COPPER },
1280 
1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
1282 	  "Intel i82573E",
1283 	  WM_T_82573,		WMP_F_COPPER },
1284 
1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
1286 	  "Intel i82573E IAMT",
1287 	  WM_T_82573,		WMP_F_COPPER },
1288 
1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
1290 	  "Intel i82573L Gigabit Ethernet",
1291 	  WM_T_82573,		WMP_F_COPPER },
1292 
1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
1294 	  "Intel i82574L",
1295 	  WM_T_82574,		WMP_F_COPPER },
1296 
1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
1298 	  "Intel i82574L",
1299 	  WM_T_82574,		WMP_F_COPPER },
1300 
1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
1302 	  "Intel i82583V",
1303 	  WM_T_82583,		WMP_F_COPPER },
1304 
1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1306 	  "i80003 dual 1000baseT Ethernet",
1307 	  WM_T_80003,		WMP_F_COPPER },
1308 
1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1310 	  "i80003 dual 1000baseX Ethernet",
1311 	  WM_T_80003,		WMP_F_COPPER },
1312 
1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1314 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1315 	  WM_T_80003,		WMP_F_SERDES },
1316 
1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1318 	  "Intel i80003 1000baseT Ethernet",
1319 	  WM_T_80003,		WMP_F_COPPER },
1320 
1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1322 	  "Intel i80003 Gigabit Ethernet (SERDES)",
1323 	  WM_T_80003,		WMP_F_SERDES },
1324 
1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
1326 	  "Intel i82801H (M_AMT) LAN Controller",
1327 	  WM_T_ICH8,		WMP_F_COPPER },
1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
1329 	  "Intel i82801H (AMT) LAN Controller",
1330 	  WM_T_ICH8,		WMP_F_COPPER },
1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
1332 	  "Intel i82801H LAN Controller",
1333 	  WM_T_ICH8,		WMP_F_COPPER },
1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1335 	  "Intel i82801H (IFE) 10/100 LAN Controller",
1336 	  WM_T_ICH8,		WMP_F_COPPER },
1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
1338 	  "Intel i82801H (M) LAN Controller",
1339 	  WM_T_ICH8,		WMP_F_COPPER },
1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
1341 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
1342 	  WM_T_ICH8,		WMP_F_COPPER },
1343 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
1344 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
1345 	  WM_T_ICH8,		WMP_F_COPPER },
1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
1347 	  "82567V-3 LAN Controller",
1348 	  WM_T_ICH8,		WMP_F_COPPER },
1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1350 	  "82801I (AMT) LAN Controller",
1351 	  WM_T_ICH9,		WMP_F_COPPER },
1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
1353 	  "82801I 10/100 LAN Controller",
1354 	  WM_T_ICH9,		WMP_F_COPPER },
1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
1356 	  "82801I (G) 10/100 LAN Controller",
1357 	  WM_T_ICH9,		WMP_F_COPPER },
1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
1359 	  "82801I (GT) 10/100 LAN Controller",
1360 	  WM_T_ICH9,		WMP_F_COPPER },
1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
1362 	  "82801I (C) LAN Controller",
1363 	  WM_T_ICH9,		WMP_F_COPPER },
1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
1365 	  "82801I mobile LAN Controller",
1366 	  WM_T_ICH9,		WMP_F_COPPER },
1367 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1368 	  "82801I mobile (V) LAN Controller",
1369 	  WM_T_ICH9,		WMP_F_COPPER },
1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1371 	  "82801I mobile (AMT) LAN Controller",
1372 	  WM_T_ICH9,		WMP_F_COPPER },
1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
1374 	  "82567LM-4 LAN Controller",
1375 	  WM_T_ICH9,		WMP_F_COPPER },
1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1377 	  "82567LM-2 LAN Controller",
1378 	  WM_T_ICH10,		WMP_F_COPPER },
1379 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1380 	  "82567LF-2 LAN Controller",
1381 	  WM_T_ICH10,		WMP_F_COPPER },
1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1383 	  "82567LM-3 LAN Controller",
1384 	  WM_T_ICH10,		WMP_F_COPPER },
1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1386 	  "82567LF-3 LAN Controller",
1387 	  WM_T_ICH10,		WMP_F_COPPER },
1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
1389 	  "82567V-2 LAN Controller",
1390 	  WM_T_ICH10,		WMP_F_COPPER },
1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
1392 	  "82567V-3? LAN Controller",
1393 	  WM_T_ICH10,		WMP_F_COPPER },
1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
1395 	  "HANKSVILLE LAN Controller",
1396 	  WM_T_ICH10,		WMP_F_COPPER },
1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
1398 	  "PCH LAN (82577LM) Controller",
1399 	  WM_T_PCH,		WMP_F_COPPER },
1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
1401 	  "PCH LAN (82577LC) Controller",
1402 	  WM_T_PCH,		WMP_F_COPPER },
1403 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
1404 	  "PCH LAN (82578DM) Controller",
1405 	  WM_T_PCH,		WMP_F_COPPER },
1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
1407 	  "PCH LAN (82578DC) Controller",
1408 	  WM_T_PCH,		WMP_F_COPPER },
1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
1410 	  "PCH2 LAN (82579LM) Controller",
1411 	  WM_T_PCH2,		WMP_F_COPPER },
1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
1413 	  "PCH2 LAN (82579V) Controller",
1414 	  WM_T_PCH2,		WMP_F_COPPER },
1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
1416 	  "82575EB dual-1000baseT Ethernet",
1417 	  WM_T_82575,		WMP_F_COPPER },
1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1419 	  "82575EB dual-1000baseX Ethernet (SERDES)",
1420 	  WM_T_82575,		WMP_F_SERDES },
1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1422 	  "82575GB quad-1000baseT Ethernet",
1423 	  WM_T_82575,		WMP_F_COPPER },
1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1425 	  "82575GB quad-1000baseT Ethernet (PM)",
1426 	  WM_T_82575,		WMP_F_COPPER },
1427 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
1428 	  "82576 1000BaseT Ethernet",
1429 	  WM_T_82576,		WMP_F_COPPER },
1430 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
1431 	  "82576 1000BaseX Ethernet",
1432 	  WM_T_82576,		WMP_F_FIBER },
1433 
1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
1435 	  "82576 gigabit Ethernet (SERDES)",
1436 	  WM_T_82576,		WMP_F_SERDES },
1437 
1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1439 	  "82576 quad-1000BaseT Ethernet",
1440 	  WM_T_82576,		WMP_F_COPPER },
1441 
1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1443 	  "82576 Gigabit ET2 Quad Port Server Adapter",
1444 	  WM_T_82576,		WMP_F_COPPER },
1445 
1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
1447 	  "82576 gigabit Ethernet",
1448 	  WM_T_82576,		WMP_F_COPPER },
1449 
1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
1451 	  "82576 gigabit Ethernet (SERDES)",
1452 	  WM_T_82576,		WMP_F_SERDES },
1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1454 	  "82576 quad-gigabit Ethernet (SERDES)",
1455 	  WM_T_82576,		WMP_F_SERDES },
1456 
1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
1458 	  "82580 1000BaseT Ethernet",
1459 	  WM_T_82580,		WMP_F_COPPER },
1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
1461 	  "82580 1000BaseX Ethernet",
1462 	  WM_T_82580,		WMP_F_FIBER },
1463 
1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
1465 	  "82580 1000BaseT Ethernet (SERDES)",
1466 	  WM_T_82580,		WMP_F_SERDES },
1467 
1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1469 	  "82580 gigabit Ethernet (SGMII)",
1470 	  WM_T_82580,		WMP_F_COPPER },
1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1472 	  "82580 dual-1000BaseT Ethernet",
1473 	  WM_T_82580,		WMP_F_COPPER },
1474 
1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1476 	  "82580 quad-1000BaseX Ethernet",
1477 	  WM_T_82580,		WMP_F_FIBER },
1478 
1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1480 	  "DH89XXCC Gigabit Ethernet (SGMII)",
1481 	  WM_T_82580,		WMP_F_COPPER },
1482 
1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1484 	  "DH89XXCC Gigabit Ethernet (SERDES)",
1485 	  WM_T_82580,		WMP_F_SERDES },
1486 
1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1488 	  "DH89XXCC 1000BASE-KX Ethernet",
1489 	  WM_T_82580,		WMP_F_SERDES },
1490 
1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1492 	  "DH89XXCC Gigabit Ethernet (SFP)",
1493 	  WM_T_82580,		WMP_F_SERDES },
1494 
1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1496 	  "I350 Gigabit Network Connection",
1497 	  WM_T_I350,		WMP_F_COPPER },
1498 
1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1500 	  "I350 Gigabit Fiber Network Connection",
1501 	  WM_T_I350,		WMP_F_FIBER },
1502 
1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1504 	  "I350 Gigabit Backplane Connection",
1505 	  WM_T_I350,		WMP_F_SERDES },
1506 
1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
1508 	  "I350 Quad Port Gigabit Ethernet",
1509 	  WM_T_I350,		WMP_F_SERDES },
1510 
1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1512 	  "I350 Gigabit Connection",
1513 	  WM_T_I350,		WMP_F_COPPER },
1514 
1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
1516 	  "I354 Gigabit Ethernet (KX)",
1517 	  WM_T_I354,		WMP_F_SERDES },
1518 
1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1520 	  "I354 Gigabit Ethernet (SGMII)",
1521 	  WM_T_I354,		WMP_F_COPPER },
1522 
1523 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
1524 	  "I354 Gigabit Ethernet (2.5G)",
1525 	  WM_T_I354,		WMP_F_COPPER },
1526 
1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1528 	  "I210-T1 Ethernet Server Adapter",
1529 	  WM_T_I210,		WMP_F_COPPER },
1530 
1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1532 	  "I210 Ethernet (Copper OEM)",
1533 	  WM_T_I210,		WMP_F_COPPER },
1534 
1535 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1536 	  "I210 Ethernet (Copper IT)",
1537 	  WM_T_I210,		WMP_F_COPPER },
1538 
1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1540 	  "I210 Ethernet (Copper, FLASH less)",
1541 	  WM_T_I210,		WMP_F_COPPER },
1542 
1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1544 	  "I210 Gigabit Ethernet (Fiber)",
1545 	  WM_T_I210,		WMP_F_FIBER },
1546 
1547 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1548 	  "I210 Gigabit Ethernet (SERDES)",
1549 	  WM_T_I210,		WMP_F_SERDES },
1550 
1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1552 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
1553 	  WM_T_I210,		WMP_F_SERDES },
1554 
1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1556 	  "I210 Gigabit Ethernet (SGMII)",
1557 	  WM_T_I210,		WMP_F_COPPER },
1558 
1559 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
1560 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
1561 	  WM_T_I210,		WMP_F_COPPER },
1562 
1563 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1564 	  "I211 Ethernet (COPPER)",
1565 	  WM_T_I211,		WMP_F_COPPER },
1566 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1567 	  "I217 V Ethernet Connection",
1568 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1569 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1570 	  "I217 LM Ethernet Connection",
1571 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1573 	  "I218 V Ethernet Connection",
1574 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1575 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
1576 	  "I218 V Ethernet Connection",
1577 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1578 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
1579 	  "I218 V Ethernet Connection",
1580 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1581 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1582 	  "I218 LM Ethernet Connection",
1583 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1584 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
1585 	  "I218 LM Ethernet Connection",
1586 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1587 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
1588 	  "I218 LM Ethernet Connection",
1589 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1590 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
1591 	  "I219 LM Ethernet Connection",
1592 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1593 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
1594 	  "I219 LM Ethernet Connection",
1595 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1596 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
1597 	  "I219 LM Ethernet Connection",
1598 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1599 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
1600 	  "I219 LM Ethernet Connection",
1601 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1602 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
1603 	  "I219 LM Ethernet Connection",
1604 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1605 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
1606 	  "I219 LM Ethernet Connection",
1607 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1608 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
1609 	  "I219 LM Ethernet Connection",
1610 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1611 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
1612 	  "I219 LM Ethernet Connection",
1613 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1614 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
1615 	  "I219 LM Ethernet Connection",
1616 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1617 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
1618 	  "I219 LM Ethernet Connection",
1619 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1620 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
1621 	  "I219 LM Ethernet Connection",
1622 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1623 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
1624 	  "I219 LM Ethernet Connection",
1625 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1626 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
1627 	  "I219 LM Ethernet Connection",
1628 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1629 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
1630 	  "I219 LM Ethernet Connection",
1631 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1632 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
1633 	  "I219 LM Ethernet Connection",
1634 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1635 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
1636 	  "I219 V Ethernet Connection",
1637 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1638 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
1639 	  "I219 V Ethernet Connection",
1640 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1641 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
1642 	  "I219 V Ethernet Connection",
1643 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1644 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
1645 	  "I219 V Ethernet Connection",
1646 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1647 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
1648 	  "I219 V Ethernet Connection",
1649 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1650 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
1651 	  "I219 V Ethernet Connection",
1652 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1653 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
1654 	  "I219 V Ethernet Connection",
1655 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1656 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
1657 	  "I219 V Ethernet Connection",
1658 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1659 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
1660 	  "I219 V Ethernet Connection",
1661 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1662 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
1663 	  "I219 V Ethernet Connection",
1664 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1665 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
1666 	  "I219 V Ethernet Connection",
1667 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1668 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
1669 	  "I219 V Ethernet Connection",
1670 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1671 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
1672 	  "I219 V Ethernet Connection",
1673 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1674 	{ 0,			0,
1675 	  NULL,
1676 	  0,			0 },
1677 };
1678 
1679 /*
1680  * Register read/write functions.
1681  * Other than CSR_{READ|WRITE}().
1682  */
1683 
1684 #if 0 /* Not currently used */
1685 static inline uint32_t
1686 wm_io_read(struct wm_softc *sc, int reg)
1687 {
1688 
1689 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1690 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1691 }
1692 #endif
1693 
1694 static inline void
1695 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1696 {
1697 
1698 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1699 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1700 }
1701 
1702 static inline void
1703 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1704     uint32_t data)
1705 {
1706 	uint32_t regval;
1707 	int i;
1708 
1709 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1710 
1711 	CSR_WRITE(sc, reg, regval);
1712 
1713 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1714 		delay(5);
1715 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1716 			break;
1717 	}
1718 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1719 		aprint_error("%s: WARNING:"
1720 		    " i82575 reg 0x%08x setup did not indicate ready\n",
1721 		    device_xname(sc->sc_dev), reg);
1722 	}
1723 }
1724 
1725 static inline void
1726 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1727 {
1728 	wa->wa_low = htole32(v & 0xffffffffU);
1729 	if (sizeof(bus_addr_t) == 8)
1730 		wa->wa_high = htole32((uint64_t) v >> 32);
1731 	else
1732 		wa->wa_high = 0;
1733 }
1734 
1735 /*
1736  * Descriptor sync/init functions.
1737  */
1738 static inline void
1739 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1740 {
1741 	struct wm_softc *sc = txq->txq_sc;
1742 
1743 	/* If it will wrap around, sync to the end of the ring. */
1744 	if ((start + num) > WM_NTXDESC(txq)) {
1745 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1746 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
1747 		    (WM_NTXDESC(txq) - start), ops);
1748 		num -= (WM_NTXDESC(txq) - start);
1749 		start = 0;
1750 	}
1751 
1752 	/* Now sync whatever is left. */
1753 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1754 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1755 }
1756 
1757 static inline void
1758 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1759 {
1760 	struct wm_softc *sc = rxq->rxq_sc;
1761 
1762 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1763 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1764 }
1765 
1766 static inline void
1767 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1768 {
1769 	struct wm_softc *sc = rxq->rxq_sc;
1770 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1771 	struct mbuf *m = rxs->rxs_mbuf;
1772 
1773 	/*
1774 	 * Note: We scoot the packet forward 2 bytes in the buffer
1775 	 * so that the payload after the Ethernet header is aligned
1776 	 * to a 4-byte boundary.
1777 
1778 	 * XXX BRAINDAMAGE ALERT!
1779 	 * The stupid chip uses the same size for every buffer, which
1780 	 * is set in the Receive Control register.  We are using the 2K
1781 	 * size option, but what we REALLY want is (2K - 2)!  For this
1782 	 * reason, we can't "scoot" packets longer than the standard
1783 	 * Ethernet MTU.  On strict-alignment platforms, if the total
1784 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
1785 	 * the upper layer copy the headers.
1786 	 */
1787 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1788 
1789 	if (sc->sc_type == WM_T_82574) {
1790 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1791 		rxd->erx_data.erxd_addr =
1792 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1793 		rxd->erx_data.erxd_dd = 0;
1794 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
1795 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1796 
1797 		rxd->nqrx_data.nrxd_paddr =
1798 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1799 		/* Currently, split header is not supported. */
1800 		rxd->nqrx_data.nrxd_haddr = 0;
1801 	} else {
1802 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1803 
1804 		wm_set_dma_addr(&rxd->wrx_addr,
1805 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1806 		rxd->wrx_len = 0;
1807 		rxd->wrx_cksum = 0;
1808 		rxd->wrx_status = 0;
1809 		rxd->wrx_errors = 0;
1810 		rxd->wrx_special = 0;
1811 	}
1812 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1813 
1814 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1815 }
1816 
1817 /*
1818  * Device driver interface functions and commonly used functions.
1819  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1820  */
1821 
1822 /* Lookup supported device table */
1823 static const struct wm_product *
1824 wm_lookup(const struct pci_attach_args *pa)
1825 {
1826 	const struct wm_product *wmp;
1827 
1828 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1829 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1830 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1831 			return wmp;
1832 	}
1833 	return NULL;
1834 }
1835 
1836 /* The match function (ca_match) */
1837 static int
1838 wm_match(device_t parent, cfdata_t cf, void *aux)
1839 {
1840 	struct pci_attach_args *pa = aux;
1841 
1842 	if (wm_lookup(pa) != NULL)
1843 		return 1;
1844 
1845 	return 0;
1846 }
1847 
1848 /* The attach function (ca_attach) */
1849 static void
1850 wm_attach(device_t parent, device_t self, void *aux)
1851 {
1852 	struct wm_softc *sc = device_private(self);
1853 	struct pci_attach_args *pa = aux;
1854 	prop_dictionary_t dict;
1855 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1856 	pci_chipset_tag_t pc = pa->pa_pc;
1857 	int counts[PCI_INTR_TYPE_SIZE];
1858 	pci_intr_type_t max_type;
1859 	const char *eetype, *xname;
1860 	bus_space_tag_t memt;
1861 	bus_space_handle_t memh;
1862 	bus_size_t memsize;
1863 	int memh_valid;
1864 	int i, error;
1865 	const struct wm_product *wmp;
1866 	prop_data_t ea;
1867 	prop_number_t pn;
1868 	uint8_t enaddr[ETHER_ADDR_LEN];
1869 	char buf[256];
1870 	char wqname[MAXCOMLEN];
1871 	uint16_t cfg1, cfg2, swdpin, nvmword;
1872 	pcireg_t preg, memtype;
1873 	uint16_t eeprom_data, apme_mask;
1874 	bool force_clear_smbi;
1875 	uint32_t link_mode;
1876 	uint32_t reg;
1877 
1878 #if defined(WM_DEBUG) && defined(WM_DEBUG_DEFAULT)
1879 	sc->sc_debug = WM_DEBUG_DEFAULT;
1880 #endif
1881 	sc->sc_dev = self;
1882 	callout_init(&sc->sc_tick_ch, WM_CALLOUT_FLAGS);
1883 	callout_setfunc(&sc->sc_tick_ch, wm_tick, sc);
1884 	sc->sc_core_stopping = false;
1885 
1886 	wmp = wm_lookup(pa);
1887 #ifdef DIAGNOSTIC
1888 	if (wmp == NULL) {
1889 		printf("\n");
1890 		panic("wm_attach: impossible");
1891 	}
1892 #endif
1893 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1894 
1895 	sc->sc_pc = pa->pa_pc;
1896 	sc->sc_pcitag = pa->pa_tag;
1897 
1898 	if (pci_dma64_available(pa))
1899 		sc->sc_dmat = pa->pa_dmat64;
1900 	else
1901 		sc->sc_dmat = pa->pa_dmat;
1902 
1903 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1904 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1905 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1906 
1907 	sc->sc_type = wmp->wmp_type;
1908 
1909 	/* Set default function pointers */
1910 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
1911 	sc->phy.release = sc->nvm.release = wm_put_null;
1912 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
1913 
1914 	if (sc->sc_type < WM_T_82543) {
1915 		if (sc->sc_rev < 2) {
1916 			aprint_error_dev(sc->sc_dev,
1917 			    "i82542 must be at least rev. 2\n");
1918 			return;
1919 		}
1920 		if (sc->sc_rev < 3)
1921 			sc->sc_type = WM_T_82542_2_0;
1922 	}
1923 
1924 	/*
1925 	 * Disable MSI for Errata:
1926 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1927 	 *
1928 	 *  82544: Errata 25
1929 	 *  82540: Errata  6 (easy to reproduce device timeout)
1930 	 *  82545: Errata  4 (easy to reproduce device timeout)
1931 	 *  82546: Errata 26 (easy to reproduce device timeout)
1932 	 *  82541: Errata  7 (easy to reproduce device timeout)
1933 	 *
1934 	 * "Byte Enables 2 and 3 are not set on MSI writes"
1935 	 *
1936 	 *  82571 & 82572: Errata 63
1937 	 */
1938 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1939 	    || (sc->sc_type == WM_T_82572))
1940 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1941 
1942 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1943 	    || (sc->sc_type == WM_T_82580)
1944 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1945 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1946 		sc->sc_flags |= WM_F_NEWQUEUE;
1947 
1948 	/* Set device properties (mactype) */
1949 	dict = device_properties(sc->sc_dev);
1950 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1951 
1952 	/*
1953 	 * Map the device.  All devices support memory-mapped acccess,
1954 	 * and it is really required for normal operation.
1955 	 */
1956 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1957 	switch (memtype) {
1958 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1959 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1960 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1961 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1962 		break;
1963 	default:
1964 		memh_valid = 0;
1965 		break;
1966 	}
1967 
1968 	if (memh_valid) {
1969 		sc->sc_st = memt;
1970 		sc->sc_sh = memh;
1971 		sc->sc_ss = memsize;
1972 	} else {
1973 		aprint_error_dev(sc->sc_dev,
1974 		    "unable to map device registers\n");
1975 		return;
1976 	}
1977 
1978 	/*
1979 	 * In addition, i82544 and later support I/O mapped indirect
1980 	 * register access.  It is not desirable (nor supported in
1981 	 * this driver) to use it for normal operation, though it is
1982 	 * required to work around bugs in some chip versions.
1983 	 */
1984 	if (sc->sc_type >= WM_T_82544) {
1985 		/* First we have to find the I/O BAR. */
1986 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1987 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1988 			if (memtype == PCI_MAPREG_TYPE_IO)
1989 				break;
1990 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
1991 			    PCI_MAPREG_MEM_TYPE_64BIT)
1992 				i += 4;	/* skip high bits, too */
1993 		}
1994 		if (i < PCI_MAPREG_END) {
1995 			/*
1996 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1997 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1998 			 * It's no problem because newer chips has no this
1999 			 * bug.
2000 			 *
2001 			 * The i8254x doesn't apparently respond when the
2002 			 * I/O BAR is 0, which looks somewhat like it's not
2003 			 * been configured.
2004 			 */
2005 			preg = pci_conf_read(pc, pa->pa_tag, i);
2006 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
2007 				aprint_error_dev(sc->sc_dev,
2008 				    "WARNING: I/O BAR at zero.\n");
2009 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
2010 					0, &sc->sc_iot, &sc->sc_ioh,
2011 					NULL, &sc->sc_ios) == 0) {
2012 				sc->sc_flags |= WM_F_IOH_VALID;
2013 			} else
2014 				aprint_error_dev(sc->sc_dev,
2015 				    "WARNING: unable to map I/O space\n");
2016 		}
2017 
2018 	}
2019 
2020 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
2021 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
2022 	preg |= PCI_COMMAND_MASTER_ENABLE;
2023 	if (sc->sc_type < WM_T_82542_2_1)
2024 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
2025 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
2026 
2027 	/* Power up chip */
2028 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
2029 	    && error != EOPNOTSUPP) {
2030 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
2031 		return;
2032 	}
2033 
2034 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
2035 	/*
2036 	 *  Don't use MSI-X if we can use only one queue to save interrupt
2037 	 * resource.
2038 	 */
2039 	if (sc->sc_nqueues > 1) {
2040 		max_type = PCI_INTR_TYPE_MSIX;
2041 		/*
2042 		 *  82583 has a MSI-X capability in the PCI configuration space
2043 		 * but it doesn't support it. At least the document doesn't
2044 		 * say anything about MSI-X.
2045 		 */
2046 		counts[PCI_INTR_TYPE_MSIX]
2047 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
2048 	} else {
2049 		max_type = PCI_INTR_TYPE_MSI;
2050 		counts[PCI_INTR_TYPE_MSIX] = 0;
2051 	}
2052 
2053 	/* Allocation settings */
2054 	counts[PCI_INTR_TYPE_MSI] = 1;
2055 	counts[PCI_INTR_TYPE_INTX] = 1;
2056 	/* overridden by disable flags */
2057 	if (wm_disable_msi != 0) {
2058 		counts[PCI_INTR_TYPE_MSI] = 0;
2059 		if (wm_disable_msix != 0) {
2060 			max_type = PCI_INTR_TYPE_INTX;
2061 			counts[PCI_INTR_TYPE_MSIX] = 0;
2062 		}
2063 	} else if (wm_disable_msix != 0) {
2064 		max_type = PCI_INTR_TYPE_MSI;
2065 		counts[PCI_INTR_TYPE_MSIX] = 0;
2066 	}
2067 
2068 alloc_retry:
2069 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
2070 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
2071 		return;
2072 	}
2073 
2074 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
2075 		error = wm_setup_msix(sc);
2076 		if (error) {
2077 			pci_intr_release(pc, sc->sc_intrs,
2078 			    counts[PCI_INTR_TYPE_MSIX]);
2079 
2080 			/* Setup for MSI: Disable MSI-X */
2081 			max_type = PCI_INTR_TYPE_MSI;
2082 			counts[PCI_INTR_TYPE_MSI] = 1;
2083 			counts[PCI_INTR_TYPE_INTX] = 1;
2084 			goto alloc_retry;
2085 		}
2086 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
2087 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
2088 		error = wm_setup_legacy(sc);
2089 		if (error) {
2090 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
2091 			    counts[PCI_INTR_TYPE_MSI]);
2092 
2093 			/* The next try is for INTx: Disable MSI */
2094 			max_type = PCI_INTR_TYPE_INTX;
2095 			counts[PCI_INTR_TYPE_INTX] = 1;
2096 			goto alloc_retry;
2097 		}
2098 	} else {
2099 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
2100 		error = wm_setup_legacy(sc);
2101 		if (error) {
2102 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
2103 			    counts[PCI_INTR_TYPE_INTX]);
2104 			return;
2105 		}
2106 	}
2107 
2108 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
2109 	error = workqueue_create(&sc->sc_queue_wq, wqname,
2110 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
2111 	    WM_WORKQUEUE_FLAGS);
2112 	if (error) {
2113 		aprint_error_dev(sc->sc_dev,
2114 		    "unable to create workqueue\n");
2115 		goto out;
2116 	}
2117 
2118 	/*
2119 	 * Check the function ID (unit number of the chip).
2120 	 */
2121 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
2122 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
2123 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2124 	    || (sc->sc_type == WM_T_82580)
2125 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
2126 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
2127 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
2128 	else
2129 		sc->sc_funcid = 0;
2130 
2131 	/*
2132 	 * Determine a few things about the bus we're connected to.
2133 	 */
2134 	if (sc->sc_type < WM_T_82543) {
2135 		/* We don't really know the bus characteristics here. */
2136 		sc->sc_bus_speed = 33;
2137 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
2138 		/*
2139 		 * CSA (Communication Streaming Architecture) is about as fast
2140 		 * a 32-bit 66MHz PCI Bus.
2141 		 */
2142 		sc->sc_flags |= WM_F_CSA;
2143 		sc->sc_bus_speed = 66;
2144 		aprint_verbose_dev(sc->sc_dev,
2145 		    "Communication Streaming Architecture\n");
2146 		if (sc->sc_type == WM_T_82547) {
2147 			callout_init(&sc->sc_txfifo_ch, WM_CALLOUT_FLAGS);
2148 			callout_setfunc(&sc->sc_txfifo_ch,
2149 			    wm_82547_txfifo_stall, sc);
2150 			aprint_verbose_dev(sc->sc_dev,
2151 			    "using 82547 Tx FIFO stall work-around\n");
2152 		}
2153 	} else if (sc->sc_type >= WM_T_82571) {
2154 		sc->sc_flags |= WM_F_PCIE;
2155 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
2156 		    && (sc->sc_type != WM_T_ICH10)
2157 		    && (sc->sc_type != WM_T_PCH)
2158 		    && (sc->sc_type != WM_T_PCH2)
2159 		    && (sc->sc_type != WM_T_PCH_LPT)
2160 		    && (sc->sc_type != WM_T_PCH_SPT)
2161 		    && (sc->sc_type != WM_T_PCH_CNP)) {
2162 			/* ICH* and PCH* have no PCIe capability registers */
2163 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2164 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
2165 				NULL) == 0)
2166 				aprint_error_dev(sc->sc_dev,
2167 				    "unable to find PCIe capability\n");
2168 		}
2169 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
2170 	} else {
2171 		reg = CSR_READ(sc, WMREG_STATUS);
2172 		if (reg & STATUS_BUS64)
2173 			sc->sc_flags |= WM_F_BUS64;
2174 		if ((reg & STATUS_PCIX_MODE) != 0) {
2175 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
2176 
2177 			sc->sc_flags |= WM_F_PCIX;
2178 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2179 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
2180 				aprint_error_dev(sc->sc_dev,
2181 				    "unable to find PCIX capability\n");
2182 			else if (sc->sc_type != WM_T_82545_3 &&
2183 				 sc->sc_type != WM_T_82546_3) {
2184 				/*
2185 				 * Work around a problem caused by the BIOS
2186 				 * setting the max memory read byte count
2187 				 * incorrectly.
2188 				 */
2189 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
2190 				    sc->sc_pcixe_capoff + PCIX_CMD);
2191 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
2192 				    sc->sc_pcixe_capoff + PCIX_STATUS);
2193 
2194 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
2195 				    PCIX_CMD_BYTECNT_SHIFT;
2196 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
2197 				    PCIX_STATUS_MAXB_SHIFT;
2198 				if (bytecnt > maxb) {
2199 					aprint_verbose_dev(sc->sc_dev,
2200 					    "resetting PCI-X MMRBC: %d -> %d\n",
2201 					    512 << bytecnt, 512 << maxb);
2202 					pcix_cmd = (pcix_cmd &
2203 					    ~PCIX_CMD_BYTECNT_MASK) |
2204 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
2205 					pci_conf_write(pa->pa_pc, pa->pa_tag,
2206 					    sc->sc_pcixe_capoff + PCIX_CMD,
2207 					    pcix_cmd);
2208 				}
2209 			}
2210 		}
2211 		/*
2212 		 * The quad port adapter is special; it has a PCIX-PCIX
2213 		 * bridge on the board, and can run the secondary bus at
2214 		 * a higher speed.
2215 		 */
2216 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
2217 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
2218 								      : 66;
2219 		} else if (sc->sc_flags & WM_F_PCIX) {
2220 			switch (reg & STATUS_PCIXSPD_MASK) {
2221 			case STATUS_PCIXSPD_50_66:
2222 				sc->sc_bus_speed = 66;
2223 				break;
2224 			case STATUS_PCIXSPD_66_100:
2225 				sc->sc_bus_speed = 100;
2226 				break;
2227 			case STATUS_PCIXSPD_100_133:
2228 				sc->sc_bus_speed = 133;
2229 				break;
2230 			default:
2231 				aprint_error_dev(sc->sc_dev,
2232 				    "unknown PCIXSPD %d; assuming 66MHz\n",
2233 				    reg & STATUS_PCIXSPD_MASK);
2234 				sc->sc_bus_speed = 66;
2235 				break;
2236 			}
2237 		} else
2238 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
2239 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
2240 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
2241 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
2242 	}
2243 
2244 	/* clear interesting stat counters */
2245 	CSR_READ(sc, WMREG_COLC);
2246 	CSR_READ(sc, WMREG_RXERRC);
2247 
2248 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
2249 	    || (sc->sc_type >= WM_T_ICH8))
2250 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2251 	if (sc->sc_type >= WM_T_ICH8)
2252 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2253 
2254 	/* Set PHY, NVM mutex related stuff */
2255 	switch (sc->sc_type) {
2256 	case WM_T_82542_2_0:
2257 	case WM_T_82542_2_1:
2258 	case WM_T_82543:
2259 	case WM_T_82544:
2260 		/* Microwire */
2261 		sc->nvm.read = wm_nvm_read_uwire;
2262 		sc->sc_nvm_wordsize = 64;
2263 		sc->sc_nvm_addrbits = 6;
2264 		break;
2265 	case WM_T_82540:
2266 	case WM_T_82545:
2267 	case WM_T_82545_3:
2268 	case WM_T_82546:
2269 	case WM_T_82546_3:
2270 		/* Microwire */
2271 		sc->nvm.read = wm_nvm_read_uwire;
2272 		reg = CSR_READ(sc, WMREG_EECD);
2273 		if (reg & EECD_EE_SIZE) {
2274 			sc->sc_nvm_wordsize = 256;
2275 			sc->sc_nvm_addrbits = 8;
2276 		} else {
2277 			sc->sc_nvm_wordsize = 64;
2278 			sc->sc_nvm_addrbits = 6;
2279 		}
2280 		sc->sc_flags |= WM_F_LOCK_EECD;
2281 		sc->nvm.acquire = wm_get_eecd;
2282 		sc->nvm.release = wm_put_eecd;
2283 		break;
2284 	case WM_T_82541:
2285 	case WM_T_82541_2:
2286 	case WM_T_82547:
2287 	case WM_T_82547_2:
2288 		reg = CSR_READ(sc, WMREG_EECD);
2289 		/*
2290 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
2291 		 * on 8254[17], so set flags and functios before calling it.
2292 		 */
2293 		sc->sc_flags |= WM_F_LOCK_EECD;
2294 		sc->nvm.acquire = wm_get_eecd;
2295 		sc->nvm.release = wm_put_eecd;
2296 		if (reg & EECD_EE_TYPE) {
2297 			/* SPI */
2298 			sc->nvm.read = wm_nvm_read_spi;
2299 			sc->sc_flags |= WM_F_EEPROM_SPI;
2300 			wm_nvm_set_addrbits_size_eecd(sc);
2301 		} else {
2302 			/* Microwire */
2303 			sc->nvm.read = wm_nvm_read_uwire;
2304 			if ((reg & EECD_EE_ABITS) != 0) {
2305 				sc->sc_nvm_wordsize = 256;
2306 				sc->sc_nvm_addrbits = 8;
2307 			} else {
2308 				sc->sc_nvm_wordsize = 64;
2309 				sc->sc_nvm_addrbits = 6;
2310 			}
2311 		}
2312 		break;
2313 	case WM_T_82571:
2314 	case WM_T_82572:
2315 		/* SPI */
2316 		sc->nvm.read = wm_nvm_read_eerd;
2317 		/* Not use WM_F_LOCK_EECD because we use EERD */
2318 		sc->sc_flags |= WM_F_EEPROM_SPI;
2319 		wm_nvm_set_addrbits_size_eecd(sc);
2320 		sc->phy.acquire = wm_get_swsm_semaphore;
2321 		sc->phy.release = wm_put_swsm_semaphore;
2322 		sc->nvm.acquire = wm_get_nvm_82571;
2323 		sc->nvm.release = wm_put_nvm_82571;
2324 		break;
2325 	case WM_T_82573:
2326 	case WM_T_82574:
2327 	case WM_T_82583:
2328 		sc->nvm.read = wm_nvm_read_eerd;
2329 		/* Not use WM_F_LOCK_EECD because we use EERD */
2330 		if (sc->sc_type == WM_T_82573) {
2331 			sc->phy.acquire = wm_get_swsm_semaphore;
2332 			sc->phy.release = wm_put_swsm_semaphore;
2333 			sc->nvm.acquire = wm_get_nvm_82571;
2334 			sc->nvm.release = wm_put_nvm_82571;
2335 		} else {
2336 			/* Both PHY and NVM use the same semaphore. */
2337 			sc->phy.acquire = sc->nvm.acquire
2338 			    = wm_get_swfwhw_semaphore;
2339 			sc->phy.release = sc->nvm.release
2340 			    = wm_put_swfwhw_semaphore;
2341 		}
2342 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2343 			sc->sc_flags |= WM_F_EEPROM_FLASH;
2344 			sc->sc_nvm_wordsize = 2048;
2345 		} else {
2346 			/* SPI */
2347 			sc->sc_flags |= WM_F_EEPROM_SPI;
2348 			wm_nvm_set_addrbits_size_eecd(sc);
2349 		}
2350 		break;
2351 	case WM_T_82575:
2352 	case WM_T_82576:
2353 	case WM_T_82580:
2354 	case WM_T_I350:
2355 	case WM_T_I354:
2356 	case WM_T_80003:
2357 		/* SPI */
2358 		sc->sc_flags |= WM_F_EEPROM_SPI;
2359 		wm_nvm_set_addrbits_size_eecd(sc);
2360 		if ((sc->sc_type == WM_T_80003)
2361 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
2362 			sc->nvm.read = wm_nvm_read_eerd;
2363 			/* Don't use WM_F_LOCK_EECD because we use EERD */
2364 		} else {
2365 			sc->nvm.read = wm_nvm_read_spi;
2366 			sc->sc_flags |= WM_F_LOCK_EECD;
2367 		}
2368 		sc->phy.acquire = wm_get_phy_82575;
2369 		sc->phy.release = wm_put_phy_82575;
2370 		sc->nvm.acquire = wm_get_nvm_80003;
2371 		sc->nvm.release = wm_put_nvm_80003;
2372 		break;
2373 	case WM_T_ICH8:
2374 	case WM_T_ICH9:
2375 	case WM_T_ICH10:
2376 	case WM_T_PCH:
2377 	case WM_T_PCH2:
2378 	case WM_T_PCH_LPT:
2379 		sc->nvm.read = wm_nvm_read_ich8;
2380 		/* FLASH */
2381 		sc->sc_flags |= WM_F_EEPROM_FLASH;
2382 		sc->sc_nvm_wordsize = 2048;
2383 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2384 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2385 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2386 			aprint_error_dev(sc->sc_dev,
2387 			    "can't map FLASH registers\n");
2388 			goto out;
2389 		}
2390 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2391 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2392 		    ICH_FLASH_SECTOR_SIZE;
2393 		sc->sc_ich8_flash_bank_size =
2394 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2395 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2396 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2397 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2398 		sc->sc_flashreg_offset = 0;
2399 		sc->phy.acquire = wm_get_swflag_ich8lan;
2400 		sc->phy.release = wm_put_swflag_ich8lan;
2401 		sc->nvm.acquire = wm_get_nvm_ich8lan;
2402 		sc->nvm.release = wm_put_nvm_ich8lan;
2403 		break;
2404 	case WM_T_PCH_SPT:
2405 	case WM_T_PCH_CNP:
2406 		sc->nvm.read = wm_nvm_read_spt;
2407 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
2408 		sc->sc_flags |= WM_F_EEPROM_FLASH;
2409 		sc->sc_flasht = sc->sc_st;
2410 		sc->sc_flashh = sc->sc_sh;
2411 		sc->sc_ich8_flash_base = 0;
2412 		sc->sc_nvm_wordsize =
2413 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2414 		    * NVM_SIZE_MULTIPLIER;
2415 		/* It is size in bytes, we want words */
2416 		sc->sc_nvm_wordsize /= 2;
2417 		/* Assume 2 banks */
2418 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2419 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2420 		sc->phy.acquire = wm_get_swflag_ich8lan;
2421 		sc->phy.release = wm_put_swflag_ich8lan;
2422 		sc->nvm.acquire = wm_get_nvm_ich8lan;
2423 		sc->nvm.release = wm_put_nvm_ich8lan;
2424 		break;
2425 	case WM_T_I210:
2426 	case WM_T_I211:
2427 		/* Allow a single clear of the SW semaphore on I210 and newer*/
2428 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
2429 		if (wm_nvm_flash_presence_i210(sc)) {
2430 			sc->nvm.read = wm_nvm_read_eerd;
2431 			/* Don't use WM_F_LOCK_EECD because we use EERD */
2432 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2433 			wm_nvm_set_addrbits_size_eecd(sc);
2434 		} else {
2435 			sc->nvm.read = wm_nvm_read_invm;
2436 			sc->sc_flags |= WM_F_EEPROM_INVM;
2437 			sc->sc_nvm_wordsize = INVM_SIZE;
2438 		}
2439 		sc->phy.acquire = wm_get_phy_82575;
2440 		sc->phy.release = wm_put_phy_82575;
2441 		sc->nvm.acquire = wm_get_nvm_80003;
2442 		sc->nvm.release = wm_put_nvm_80003;
2443 		break;
2444 	default:
2445 		break;
2446 	}
2447 
2448 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
2449 	switch (sc->sc_type) {
2450 	case WM_T_82571:
2451 	case WM_T_82572:
2452 		reg = CSR_READ(sc, WMREG_SWSM2);
2453 		if ((reg & SWSM2_LOCK) == 0) {
2454 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2455 			force_clear_smbi = true;
2456 		} else
2457 			force_clear_smbi = false;
2458 		break;
2459 	case WM_T_82573:
2460 	case WM_T_82574:
2461 	case WM_T_82583:
2462 		force_clear_smbi = true;
2463 		break;
2464 	default:
2465 		force_clear_smbi = false;
2466 		break;
2467 	}
2468 	if (force_clear_smbi) {
2469 		reg = CSR_READ(sc, WMREG_SWSM);
2470 		if ((reg & SWSM_SMBI) != 0)
2471 			aprint_error_dev(sc->sc_dev,
2472 			    "Please update the Bootagent\n");
2473 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2474 	}
2475 
2476 	/*
2477 	 * Defer printing the EEPROM type until after verifying the checksum
2478 	 * This allows the EEPROM type to be printed correctly in the case
2479 	 * that no EEPROM is attached.
2480 	 */
2481 	/*
2482 	 * Validate the EEPROM checksum. If the checksum fails, flag
2483 	 * this for later, so we can fail future reads from the EEPROM.
2484 	 */
2485 	if (wm_nvm_validate_checksum(sc)) {
2486 		/*
2487 		 * Read twice again because some PCI-e parts fail the
2488 		 * first check due to the link being in sleep state.
2489 		 */
2490 		if (wm_nvm_validate_checksum(sc))
2491 			sc->sc_flags |= WM_F_EEPROM_INVALID;
2492 	}
2493 
2494 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
2495 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2496 	else {
2497 		aprint_verbose_dev(sc->sc_dev, "%u words ",
2498 		    sc->sc_nvm_wordsize);
2499 		if (sc->sc_flags & WM_F_EEPROM_INVM)
2500 			aprint_verbose("iNVM");
2501 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2502 			aprint_verbose("FLASH(HW)");
2503 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2504 			aprint_verbose("FLASH");
2505 		else {
2506 			if (sc->sc_flags & WM_F_EEPROM_SPI)
2507 				eetype = "SPI";
2508 			else
2509 				eetype = "MicroWire";
2510 			aprint_verbose("(%d address bits) %s EEPROM",
2511 			    sc->sc_nvm_addrbits, eetype);
2512 		}
2513 	}
2514 	wm_nvm_version(sc);
2515 	aprint_verbose("\n");
2516 
2517 	/*
2518 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
2519 	 * incorrect.
2520 	 */
2521 	wm_gmii_setup_phytype(sc, 0, 0);
2522 
2523 	/* Check for WM_F_WOL on some chips before wm_reset() */
2524 	switch (sc->sc_type) {
2525 	case WM_T_ICH8:
2526 	case WM_T_ICH9:
2527 	case WM_T_ICH10:
2528 	case WM_T_PCH:
2529 	case WM_T_PCH2:
2530 	case WM_T_PCH_LPT:
2531 	case WM_T_PCH_SPT:
2532 	case WM_T_PCH_CNP:
2533 		apme_mask = WUC_APME;
2534 		eeprom_data = CSR_READ(sc, WMREG_WUC);
2535 		if ((eeprom_data & apme_mask) != 0)
2536 			sc->sc_flags |= WM_F_WOL;
2537 		break;
2538 	default:
2539 		break;
2540 	}
2541 
2542 	/* Reset the chip to a known state. */
2543 	wm_reset(sc);
2544 
2545 	/*
2546 	 * Check for I21[01] PLL workaround.
2547 	 *
2548 	 * Three cases:
2549 	 * a) Chip is I211.
2550 	 * b) Chip is I210 and it uses INVM (not FLASH).
2551 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
2552 	 */
2553 	if (sc->sc_type == WM_T_I211)
2554 		sc->sc_flags |= WM_F_PLL_WA_I210;
2555 	if (sc->sc_type == WM_T_I210) {
2556 		if (!wm_nvm_flash_presence_i210(sc))
2557 			sc->sc_flags |= WM_F_PLL_WA_I210;
2558 		else if ((sc->sc_nvm_ver_major < 3)
2559 		    || ((sc->sc_nvm_ver_major == 3)
2560 			&& (sc->sc_nvm_ver_minor < 25))) {
2561 			aprint_verbose_dev(sc->sc_dev,
2562 			    "ROM image version %d.%d is older than 3.25\n",
2563 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2564 			sc->sc_flags |= WM_F_PLL_WA_I210;
2565 		}
2566 	}
2567 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2568 		wm_pll_workaround_i210(sc);
2569 
2570 	wm_get_wakeup(sc);
2571 
2572 	/* Non-AMT based hardware can now take control from firmware */
2573 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2574 		wm_get_hw_control(sc);
2575 
2576 	/*
2577 	 * Read the Ethernet address from the EEPROM, if not first found
2578 	 * in device properties.
2579 	 */
2580 	ea = prop_dictionary_get(dict, "mac-address");
2581 	if (ea != NULL) {
2582 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2583 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2584 		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
2585 	} else {
2586 		if (wm_read_mac_addr(sc, enaddr) != 0) {
2587 			aprint_error_dev(sc->sc_dev,
2588 			    "unable to read Ethernet address\n");
2589 			goto out;
2590 		}
2591 	}
2592 
2593 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2594 	    ether_sprintf(enaddr));
2595 
2596 	/*
2597 	 * Read the config info from the EEPROM, and set up various
2598 	 * bits in the control registers based on their contents.
2599 	 */
2600 	pn = prop_dictionary_get(dict, "i82543-cfg1");
2601 	if (pn != NULL) {
2602 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2603 		cfg1 = (uint16_t) prop_number_signed_value(pn);
2604 	} else {
2605 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2606 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2607 			goto out;
2608 		}
2609 	}
2610 
2611 	pn = prop_dictionary_get(dict, "i82543-cfg2");
2612 	if (pn != NULL) {
2613 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2614 		cfg2 = (uint16_t) prop_number_signed_value(pn);
2615 	} else {
2616 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2617 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2618 			goto out;
2619 		}
2620 	}
2621 
2622 	/* check for WM_F_WOL */
2623 	switch (sc->sc_type) {
2624 	case WM_T_82542_2_0:
2625 	case WM_T_82542_2_1:
2626 	case WM_T_82543:
2627 		/* dummy? */
2628 		eeprom_data = 0;
2629 		apme_mask = NVM_CFG3_APME;
2630 		break;
2631 	case WM_T_82544:
2632 		apme_mask = NVM_CFG2_82544_APM_EN;
2633 		eeprom_data = cfg2;
2634 		break;
2635 	case WM_T_82546:
2636 	case WM_T_82546_3:
2637 	case WM_T_82571:
2638 	case WM_T_82572:
2639 	case WM_T_82573:
2640 	case WM_T_82574:
2641 	case WM_T_82583:
2642 	case WM_T_80003:
2643 	case WM_T_82575:
2644 	case WM_T_82576:
2645 		apme_mask = NVM_CFG3_APME;
2646 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2647 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2648 		break;
2649 	case WM_T_82580:
2650 	case WM_T_I350:
2651 	case WM_T_I354:
2652 	case WM_T_I210:
2653 	case WM_T_I211:
2654 		apme_mask = NVM_CFG3_APME;
2655 		wm_nvm_read(sc,
2656 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2657 		    1, &eeprom_data);
2658 		break;
2659 	case WM_T_ICH8:
2660 	case WM_T_ICH9:
2661 	case WM_T_ICH10:
2662 	case WM_T_PCH:
2663 	case WM_T_PCH2:
2664 	case WM_T_PCH_LPT:
2665 	case WM_T_PCH_SPT:
2666 	case WM_T_PCH_CNP:
2667 		/* Already checked before wm_reset () */
2668 		apme_mask = eeprom_data = 0;
2669 		break;
2670 	default: /* XXX 82540 */
2671 		apme_mask = NVM_CFG3_APME;
2672 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2673 		break;
2674 	}
2675 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2676 	if ((eeprom_data & apme_mask) != 0)
2677 		sc->sc_flags |= WM_F_WOL;
2678 
2679 	/*
2680 	 * We have the eeprom settings, now apply the special cases
2681 	 * where the eeprom may be wrong or the board won't support
2682 	 * wake on lan on a particular port
2683 	 */
2684 	switch (sc->sc_pcidevid) {
2685 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
2686 		sc->sc_flags &= ~WM_F_WOL;
2687 		break;
2688 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
2689 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
2690 		/* Wake events only supported on port A for dual fiber
2691 		 * regardless of eeprom setting */
2692 		if (sc->sc_funcid == 1)
2693 			sc->sc_flags &= ~WM_F_WOL;
2694 		break;
2695 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
2696 		/* If quad port adapter, disable WoL on all but port A */
2697 		if (sc->sc_funcid != 0)
2698 			sc->sc_flags &= ~WM_F_WOL;
2699 		break;
2700 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
2701 		/* Wake events only supported on port A for dual fiber
2702 		 * regardless of eeprom setting */
2703 		if (sc->sc_funcid == 1)
2704 			sc->sc_flags &= ~WM_F_WOL;
2705 		break;
2706 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
2707 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
2708 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
2709 		/* If quad port adapter, disable WoL on all but port A */
2710 		if (sc->sc_funcid != 0)
2711 			sc->sc_flags &= ~WM_F_WOL;
2712 		break;
2713 	}
2714 
2715 	if (sc->sc_type >= WM_T_82575) {
2716 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2717 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
2718 			    nvmword);
2719 			if ((sc->sc_type == WM_T_82575) ||
2720 			    (sc->sc_type == WM_T_82576)) {
2721 				/* Check NVM for autonegotiation */
2722 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
2723 				    != 0)
2724 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2725 			}
2726 			if ((sc->sc_type == WM_T_82575) ||
2727 			    (sc->sc_type == WM_T_I350)) {
2728 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
2729 					sc->sc_flags |= WM_F_MAS;
2730 			}
2731 		}
2732 	}
2733 
2734 	/*
2735 	 * XXX need special handling for some multiple port cards
2736 	 * to disable a paticular port.
2737 	 */
2738 
2739 	if (sc->sc_type >= WM_T_82544) {
2740 		pn = prop_dictionary_get(dict, "i82543-swdpin");
2741 		if (pn != NULL) {
2742 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2743 			swdpin = (uint16_t) prop_number_signed_value(pn);
2744 		} else {
2745 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2746 				aprint_error_dev(sc->sc_dev,
2747 				    "unable to read SWDPIN\n");
2748 				goto out;
2749 			}
2750 		}
2751 	}
2752 
2753 	if (cfg1 & NVM_CFG1_ILOS)
2754 		sc->sc_ctrl |= CTRL_ILOS;
2755 
2756 	/*
2757 	 * XXX
2758 	 * This code isn't correct because pin 2 and 3 are located
2759 	 * in different position on newer chips. Check all datasheet.
2760 	 *
2761 	 * Until resolve this problem, check if a chip < 82580
2762 	 */
2763 	if (sc->sc_type <= WM_T_82580) {
2764 		if (sc->sc_type >= WM_T_82544) {
2765 			sc->sc_ctrl |=
2766 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2767 			    CTRL_SWDPIO_SHIFT;
2768 			sc->sc_ctrl |=
2769 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2770 			    CTRL_SWDPINS_SHIFT;
2771 		} else {
2772 			sc->sc_ctrl |=
2773 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2774 			    CTRL_SWDPIO_SHIFT;
2775 		}
2776 	}
2777 
2778 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
2779 		wm_nvm_read(sc,
2780 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
2781 		    1, &nvmword);
2782 		if (nvmword & NVM_CFG3_ILOS)
2783 			sc->sc_ctrl |= CTRL_ILOS;
2784 	}
2785 
2786 #if 0
2787 	if (sc->sc_type >= WM_T_82544) {
2788 		if (cfg1 & NVM_CFG1_IPS0)
2789 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2790 		if (cfg1 & NVM_CFG1_IPS1)
2791 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2792 		sc->sc_ctrl_ext |=
2793 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2794 		    CTRL_EXT_SWDPIO_SHIFT;
2795 		sc->sc_ctrl_ext |=
2796 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2797 		    CTRL_EXT_SWDPINS_SHIFT;
2798 	} else {
2799 		sc->sc_ctrl_ext |=
2800 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2801 		    CTRL_EXT_SWDPIO_SHIFT;
2802 	}
2803 #endif
2804 
2805 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2806 #if 0
2807 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2808 #endif
2809 
2810 	if (sc->sc_type == WM_T_PCH) {
2811 		uint16_t val;
2812 
2813 		/* Save the NVM K1 bit setting */
2814 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2815 
2816 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2817 			sc->sc_nvm_k1_enabled = 1;
2818 		else
2819 			sc->sc_nvm_k1_enabled = 0;
2820 	}
2821 
2822 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
2823 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2824 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2825 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2826 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
2827 	    || sc->sc_type == WM_T_82573
2828 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2829 		/* Copper only */
2830 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2831 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2832 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2833 	    || (sc->sc_type ==WM_T_I211)) {
2834 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
2835 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2836 		switch (link_mode) {
2837 		case CTRL_EXT_LINK_MODE_1000KX:
2838 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
2839 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2840 			break;
2841 		case CTRL_EXT_LINK_MODE_SGMII:
2842 			if (wm_sgmii_uses_mdio(sc)) {
2843 				aprint_normal_dev(sc->sc_dev,
2844 				    "SGMII(MDIO)\n");
2845 				sc->sc_flags |= WM_F_SGMII;
2846 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2847 				break;
2848 			}
2849 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2850 			/*FALLTHROUGH*/
2851 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2852 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
2853 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2854 				if (link_mode
2855 				    == CTRL_EXT_LINK_MODE_SGMII) {
2856 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2857 					sc->sc_flags |= WM_F_SGMII;
2858 					aprint_verbose_dev(sc->sc_dev,
2859 					    "SGMII\n");
2860 				} else {
2861 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2862 					aprint_verbose_dev(sc->sc_dev,
2863 					    "SERDES\n");
2864 				}
2865 				break;
2866 			}
2867 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2868 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
2869 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2870 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
2871 				sc->sc_flags |= WM_F_SGMII;
2872 			}
2873 			/* Do not change link mode for 100BaseFX */
2874 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
2875 				break;
2876 
2877 			/* Change current link mode setting */
2878 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
2879 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2880 				reg |= CTRL_EXT_LINK_MODE_SGMII;
2881 			else
2882 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2883 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2884 			break;
2885 		case CTRL_EXT_LINK_MODE_GMII:
2886 		default:
2887 			aprint_normal_dev(sc->sc_dev, "Copper\n");
2888 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2889 			break;
2890 		}
2891 
2892 		reg &= ~CTRL_EXT_I2C_ENA;
2893 		if ((sc->sc_flags & WM_F_SGMII) != 0)
2894 			reg |= CTRL_EXT_I2C_ENA;
2895 		else
2896 			reg &= ~CTRL_EXT_I2C_ENA;
2897 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2898 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
2899 			if (!wm_sgmii_uses_mdio(sc))
2900 				wm_gmii_setup_phytype(sc, 0, 0);
2901 			wm_reset_mdicnfg_82580(sc);
2902 		}
2903 	} else if (sc->sc_type < WM_T_82543 ||
2904 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2905 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2906 			aprint_error_dev(sc->sc_dev,
2907 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
2908 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2909 		}
2910 	} else {
2911 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
2912 			aprint_error_dev(sc->sc_dev,
2913 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2914 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2915 		}
2916 	}
2917 
2918 	if (sc->sc_type >= WM_T_PCH2)
2919 		sc->sc_flags |= WM_F_EEE;
2920 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
2921 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
2922 		/* XXX: Need special handling for I354. (not yet) */
2923 		if (sc->sc_type != WM_T_I354)
2924 			sc->sc_flags |= WM_F_EEE;
2925 	}
2926 
2927 	/*
2928 	 * The I350 has a bug where it always strips the CRC whether
2929 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
2930 	 */
2931 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
2932 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
2933 		sc->sc_flags |= WM_F_CRC_STRIP;
2934 
2935 	/* Set device properties (macflags) */
2936 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2937 
2938 	if (sc->sc_flags != 0) {
2939 		snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
2940 		aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
2941 	}
2942 
2943 #ifdef WM_MPSAFE
2944 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2945 #else
2946 	sc->sc_core_lock = NULL;
2947 #endif
2948 
2949 	/* Initialize the media structures accordingly. */
2950 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2951 		wm_gmii_mediainit(sc, wmp->wmp_product);
2952 	else
2953 		wm_tbi_mediainit(sc); /* All others */
2954 
2955 	ifp = &sc->sc_ethercom.ec_if;
2956 	xname = device_xname(sc->sc_dev);
2957 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2958 	ifp->if_softc = sc;
2959 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2960 #ifdef WM_MPSAFE
2961 	ifp->if_extflags = IFEF_MPSAFE;
2962 #endif
2963 	ifp->if_ioctl = wm_ioctl;
2964 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
2965 		ifp->if_start = wm_nq_start;
2966 		/*
2967 		 * When the number of CPUs is one and the controller can use
2968 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
2969 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
2970 		 * and the other is used for link status changing.
2971 		 * In this situation, wm_nq_transmit() is disadvantageous
2972 		 * because of wm_select_txqueue() and pcq(9) overhead.
2973 		 */
2974 		if (wm_is_using_multiqueue(sc))
2975 			ifp->if_transmit = wm_nq_transmit;
2976 	} else {
2977 		ifp->if_start = wm_start;
2978 		/*
2979 		 * wm_transmit() has the same disadvantage as wm_transmit().
2980 		 */
2981 		if (wm_is_using_multiqueue(sc))
2982 			ifp->if_transmit = wm_transmit;
2983 	}
2984 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
2985 	ifp->if_init = wm_init;
2986 	ifp->if_stop = wm_stop;
2987 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
2988 	IFQ_SET_READY(&ifp->if_snd);
2989 
2990 	/* Check for jumbo frame */
2991 	switch (sc->sc_type) {
2992 	case WM_T_82573:
2993 		/* XXX limited to 9234 if ASPM is disabled */
2994 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2995 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2996 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2997 		break;
2998 	case WM_T_82571:
2999 	case WM_T_82572:
3000 	case WM_T_82574:
3001 	case WM_T_82583:
3002 	case WM_T_82575:
3003 	case WM_T_82576:
3004 	case WM_T_82580:
3005 	case WM_T_I350:
3006 	case WM_T_I354:
3007 	case WM_T_I210:
3008 	case WM_T_I211:
3009 	case WM_T_80003:
3010 	case WM_T_ICH9:
3011 	case WM_T_ICH10:
3012 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
3013 	case WM_T_PCH_LPT:
3014 	case WM_T_PCH_SPT:
3015 	case WM_T_PCH_CNP:
3016 		/* XXX limited to 9234 */
3017 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3018 		break;
3019 	case WM_T_PCH:
3020 		/* XXX limited to 4096 */
3021 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3022 		break;
3023 	case WM_T_82542_2_0:
3024 	case WM_T_82542_2_1:
3025 	case WM_T_ICH8:
3026 		/* No support for jumbo frame */
3027 		break;
3028 	default:
3029 		/* ETHER_MAX_LEN_JUMBO */
3030 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
3031 		break;
3032 	}
3033 
3034 	/* If we're a i82543 or greater, we can support VLANs. */
3035 	if (sc->sc_type >= WM_T_82543) {
3036 		sc->sc_ethercom.ec_capabilities |=
3037 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
3038 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
3039 	}
3040 
3041 	if ((sc->sc_flags & WM_F_EEE) != 0)
3042 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
3043 
3044 	/*
3045 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
3046 	 * on i82543 and later.
3047 	 */
3048 	if (sc->sc_type >= WM_T_82543) {
3049 		ifp->if_capabilities |=
3050 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
3051 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
3052 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
3053 		    IFCAP_CSUM_TCPv6_Tx |
3054 		    IFCAP_CSUM_UDPv6_Tx;
3055 	}
3056 
3057 	/*
3058 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
3059 	 *
3060 	 *	82541GI (8086:1076) ... no
3061 	 *	82572EI (8086:10b9) ... yes
3062 	 */
3063 	if (sc->sc_type >= WM_T_82571) {
3064 		ifp->if_capabilities |=
3065 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
3066 	}
3067 
3068 	/*
3069 	 * If we're a i82544 or greater (except i82547), we can do
3070 	 * TCP segmentation offload.
3071 	 */
3072 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
3073 		ifp->if_capabilities |= IFCAP_TSOv4;
3074 	}
3075 
3076 	if (sc->sc_type >= WM_T_82571) {
3077 		ifp->if_capabilities |= IFCAP_TSOv6;
3078 	}
3079 
3080 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
3081 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
3082 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
3083 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
3084 
3085 	/* Attach the interface. */
3086 	error = if_initialize(ifp);
3087 	if (error != 0) {
3088 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
3089 		    error);
3090 		return; /* Error */
3091 	}
3092 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
3093 	ether_ifattach(ifp, enaddr);
3094 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
3095 	if_register(ifp);
3096 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
3097 	    RND_FLAG_DEFAULT);
3098 
3099 #ifdef WM_EVENT_COUNTERS
3100 	/* Attach event counters. */
3101 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
3102 	    NULL, xname, "linkintr");
3103 
3104 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
3105 	    NULL, xname, "tx_xoff");
3106 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
3107 	    NULL, xname, "tx_xon");
3108 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
3109 	    NULL, xname, "rx_xoff");
3110 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
3111 	    NULL, xname, "rx_xon");
3112 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
3113 	    NULL, xname, "rx_macctl");
3114 #endif /* WM_EVENT_COUNTERS */
3115 
3116 	sc->sc_txrx_use_workqueue = false;
3117 
3118 	if (wm_phy_need_linkdown_discard(sc))
3119 		wm_set_linkdown_discard(sc);
3120 
3121 	wm_init_sysctls(sc);
3122 
3123 	if (pmf_device_register(self, wm_suspend, wm_resume))
3124 		pmf_class_network_register(self, ifp);
3125 	else
3126 		aprint_error_dev(self, "couldn't establish power handler\n");
3127 
3128 	sc->sc_flags |= WM_F_ATTACHED;
3129 out:
3130 	return;
3131 }
3132 
3133 /* The detach function (ca_detach) */
3134 static int
3135 wm_detach(device_t self, int flags __unused)
3136 {
3137 	struct wm_softc *sc = device_private(self);
3138 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3139 	int i;
3140 
3141 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
3142 		return 0;
3143 
3144 	/* Stop the interface. Callouts are stopped in it. */
3145 	wm_stop(ifp, 1);
3146 
3147 	pmf_device_deregister(self);
3148 
3149 	sysctl_teardown(&sc->sc_sysctllog);
3150 
3151 #ifdef WM_EVENT_COUNTERS
3152 	evcnt_detach(&sc->sc_ev_linkintr);
3153 
3154 	evcnt_detach(&sc->sc_ev_tx_xoff);
3155 	evcnt_detach(&sc->sc_ev_tx_xon);
3156 	evcnt_detach(&sc->sc_ev_rx_xoff);
3157 	evcnt_detach(&sc->sc_ev_rx_xon);
3158 	evcnt_detach(&sc->sc_ev_rx_macctl);
3159 #endif /* WM_EVENT_COUNTERS */
3160 
3161 	rnd_detach_source(&sc->rnd_source);
3162 
3163 	/* Tell the firmware about the release */
3164 	WM_CORE_LOCK(sc);
3165 	wm_release_manageability(sc);
3166 	wm_release_hw_control(sc);
3167 	wm_enable_wakeup(sc);
3168 	WM_CORE_UNLOCK(sc);
3169 
3170 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
3171 
3172 	ether_ifdetach(ifp);
3173 	if_detach(ifp);
3174 	if_percpuq_destroy(sc->sc_ipq);
3175 
3176 	/* Delete all remaining media. */
3177 	ifmedia_fini(&sc->sc_mii.mii_media);
3178 
3179 	/* Unload RX dmamaps and free mbufs */
3180 	for (i = 0; i < sc->sc_nqueues; i++) {
3181 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
3182 		mutex_enter(rxq->rxq_lock);
3183 		wm_rxdrain(rxq);
3184 		mutex_exit(rxq->rxq_lock);
3185 	}
3186 	/* Must unlock here */
3187 
3188 	/* Disestablish the interrupt handler */
3189 	for (i = 0; i < sc->sc_nintrs; i++) {
3190 		if (sc->sc_ihs[i] != NULL) {
3191 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
3192 			sc->sc_ihs[i] = NULL;
3193 		}
3194 	}
3195 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
3196 
3197 	/* wm_stop() ensure workqueue is stopped. */
3198 	workqueue_destroy(sc->sc_queue_wq);
3199 
3200 	for (i = 0; i < sc->sc_nqueues; i++)
3201 		softint_disestablish(sc->sc_queue[i].wmq_si);
3202 
3203 	wm_free_txrx_queues(sc);
3204 
3205 	/* Unmap the registers */
3206 	if (sc->sc_ss) {
3207 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
3208 		sc->sc_ss = 0;
3209 	}
3210 	if (sc->sc_ios) {
3211 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
3212 		sc->sc_ios = 0;
3213 	}
3214 	if (sc->sc_flashs) {
3215 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
3216 		sc->sc_flashs = 0;
3217 	}
3218 
3219 	if (sc->sc_core_lock)
3220 		mutex_obj_free(sc->sc_core_lock);
3221 	if (sc->sc_ich_phymtx)
3222 		mutex_obj_free(sc->sc_ich_phymtx);
3223 	if (sc->sc_ich_nvmmtx)
3224 		mutex_obj_free(sc->sc_ich_nvmmtx);
3225 
3226 	return 0;
3227 }
3228 
3229 static bool
3230 wm_suspend(device_t self, const pmf_qual_t *qual)
3231 {
3232 	struct wm_softc *sc = device_private(self);
3233 
3234 	wm_release_manageability(sc);
3235 	wm_release_hw_control(sc);
3236 	wm_enable_wakeup(sc);
3237 
3238 	return true;
3239 }
3240 
3241 static bool
3242 wm_resume(device_t self, const pmf_qual_t *qual)
3243 {
3244 	struct wm_softc *sc = device_private(self);
3245 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3246 	pcireg_t reg;
3247 	char buf[256];
3248 
3249 	reg = CSR_READ(sc, WMREG_WUS);
3250 	if (reg != 0) {
3251 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
3252 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
3253 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
3254 	}
3255 
3256 	if (sc->sc_type >= WM_T_PCH2)
3257 		wm_resume_workarounds_pchlan(sc);
3258 	if ((ifp->if_flags & IFF_UP) == 0) {
3259 		wm_reset(sc);
3260 		/* Non-AMT based hardware can now take control from firmware */
3261 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
3262 			wm_get_hw_control(sc);
3263 		wm_init_manageability(sc);
3264 	} else {
3265 		/*
3266 		 * We called pmf_class_network_register(), so if_init() is
3267 		 * automatically called when IFF_UP. wm_reset(),
3268 		 * wm_get_hw_control() and wm_init_manageability() are called
3269 		 * via wm_init().
3270 		 */
3271 	}
3272 
3273 	return true;
3274 }
3275 
3276 /*
3277  * wm_watchdog:		[ifnet interface function]
3278  *
3279  *	Watchdog timer handler.
3280  */
3281 static void
3282 wm_watchdog(struct ifnet *ifp)
3283 {
3284 	int qid;
3285 	struct wm_softc *sc = ifp->if_softc;
3286 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
3287 
3288 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
3289 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
3290 
3291 		wm_watchdog_txq(ifp, txq, &hang_queue);
3292 	}
3293 
3294 	/* IF any of queues hanged up, reset the interface. */
3295 	if (hang_queue != 0) {
3296 		(void)wm_init(ifp);
3297 
3298 		/*
3299 		 * There are still some upper layer processing which call
3300 		 * ifp->if_start(). e.g. ALTQ or one CPU system
3301 		 */
3302 		/* Try to get more packets going. */
3303 		ifp->if_start(ifp);
3304 	}
3305 }
3306 
3307 
3308 static void
3309 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
3310 {
3311 
3312 	mutex_enter(txq->txq_lock);
3313 	if (txq->txq_sending &&
3314 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
3315 		wm_watchdog_txq_locked(ifp, txq, hang);
3316 
3317 	mutex_exit(txq->txq_lock);
3318 }
3319 
3320 static void
3321 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
3322     uint16_t *hang)
3323 {
3324 	struct wm_softc *sc = ifp->if_softc;
3325 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
3326 
3327 	KASSERT(mutex_owned(txq->txq_lock));
3328 
3329 	/*
3330 	 * Since we're using delayed interrupts, sweep up
3331 	 * before we report an error.
3332 	 */
3333 	wm_txeof(txq, UINT_MAX);
3334 
3335 	if (txq->txq_sending)
3336 		*hang |= __BIT(wmq->wmq_id);
3337 
3338 	if (txq->txq_free == WM_NTXDESC(txq)) {
3339 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
3340 		    device_xname(sc->sc_dev));
3341 	} else {
3342 #ifdef WM_DEBUG
3343 		int i, j;
3344 		struct wm_txsoft *txs;
3345 #endif
3346 		log(LOG_ERR,
3347 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3348 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
3349 		    txq->txq_next);
3350 		if_statinc(ifp, if_oerrors);
3351 #ifdef WM_DEBUG
3352 		for (i = txq->txq_sdirty; i != txq->txq_snext;
3353 		    i = WM_NEXTTXS(txq, i)) {
3354 			txs = &txq->txq_soft[i];
3355 			printf("txs %d tx %d -> %d\n",
3356 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
3357 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
3358 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3359 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3360 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
3361 					printf("\t %#08x%08x\n",
3362 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
3363 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
3364 				} else {
3365 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3366 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
3367 					    txq->txq_descs[j].wtx_addr.wa_low);
3368 					printf("\t %#04x%02x%02x%08x\n",
3369 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
3370 					    txq->txq_descs[j].wtx_fields.wtxu_options,
3371 					    txq->txq_descs[j].wtx_fields.wtxu_status,
3372 					    txq->txq_descs[j].wtx_cmdlen);
3373 				}
3374 				if (j == txs->txs_lastdesc)
3375 					break;
3376 			}
3377 		}
3378 #endif
3379 	}
3380 }
3381 
3382 /*
3383  * wm_tick:
3384  *
3385  *	One second timer, used to check link status, sweep up
3386  *	completed transmit jobs, etc.
3387  */
3388 static void
3389 wm_tick(void *arg)
3390 {
3391 	struct wm_softc *sc = arg;
3392 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3393 #ifndef WM_MPSAFE
3394 	int s = splnet();
3395 #endif
3396 
3397 	WM_CORE_LOCK(sc);
3398 
3399 	if (sc->sc_core_stopping) {
3400 		WM_CORE_UNLOCK(sc);
3401 #ifndef WM_MPSAFE
3402 		splx(s);
3403 #endif
3404 		return;
3405 	}
3406 
3407 	if (sc->sc_type >= WM_T_82542_2_1) {
3408 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3409 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3410 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3411 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3412 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3413 	}
3414 
3415 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
3416 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
3417 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
3418 	    + CSR_READ(sc, WMREG_CRCERRS)
3419 	    + CSR_READ(sc, WMREG_ALGNERRC)
3420 	    + CSR_READ(sc, WMREG_SYMERRC)
3421 	    + CSR_READ(sc, WMREG_RXERRC)
3422 	    + CSR_READ(sc, WMREG_SEC)
3423 	    + CSR_READ(sc, WMREG_CEXTERR)
3424 	    + CSR_READ(sc, WMREG_RLEC));
3425 	/*
3426 	 * WMREG_RNBC is incremented when there is no available buffers in host
3427 	 * memory. It does not mean the number of dropped packet. Because
3428 	 * ethernet controller can receive packets in such case if there is
3429 	 * space in phy's FIFO.
3430 	 *
3431 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
3432 	 * own EVCNT instead of if_iqdrops.
3433 	 */
3434 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
3435 	IF_STAT_PUTREF(ifp);
3436 
3437 	if (sc->sc_flags & WM_F_HAS_MII)
3438 		mii_tick(&sc->sc_mii);
3439 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
3440 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3441 		wm_serdes_tick(sc);
3442 	else
3443 		wm_tbi_tick(sc);
3444 
3445 	WM_CORE_UNLOCK(sc);
3446 
3447 	wm_watchdog(ifp);
3448 
3449 	callout_schedule(&sc->sc_tick_ch, hz);
3450 }
3451 
3452 static int
3453 wm_ifflags_cb(struct ethercom *ec)
3454 {
3455 	struct ifnet *ifp = &ec->ec_if;
3456 	struct wm_softc *sc = ifp->if_softc;
3457 	u_short iffchange;
3458 	int ecchange;
3459 	bool needreset = false;
3460 	int rc = 0;
3461 
3462 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
3463 		device_xname(sc->sc_dev), __func__));
3464 
3465 	WM_CORE_LOCK(sc);
3466 
3467 	/*
3468 	 * Check for if_flags.
3469 	 * Main usage is to prevent linkdown when opening bpf.
3470 	 */
3471 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
3472 	sc->sc_if_flags = ifp->if_flags;
3473 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
3474 		needreset = true;
3475 		goto ec;
3476 	}
3477 
3478 	/* iff related updates */
3479 	if ((iffchange & IFF_PROMISC) != 0)
3480 		wm_set_filter(sc);
3481 
3482 	wm_set_vlan(sc);
3483 
3484 ec:
3485 	/* Check for ec_capenable. */
3486 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
3487 	sc->sc_ec_capenable = ec->ec_capenable;
3488 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
3489 		needreset = true;
3490 		goto out;
3491 	}
3492 
3493 	/* ec related updates */
3494 	wm_set_eee(sc);
3495 
3496 out:
3497 	if (needreset)
3498 		rc = ENETRESET;
3499 	WM_CORE_UNLOCK(sc);
3500 
3501 	return rc;
3502 }
3503 
3504 static bool
3505 wm_phy_need_linkdown_discard(struct wm_softc *sc)
3506 {
3507 
3508 	switch (sc->sc_phytype) {
3509 	case WMPHY_82577: /* ihphy */
3510 	case WMPHY_82578: /* atphy */
3511 	case WMPHY_82579: /* ihphy */
3512 	case WMPHY_I217: /* ihphy */
3513 	case WMPHY_82580: /* ihphy */
3514 	case WMPHY_I350: /* ihphy */
3515 		return true;
3516 	default:
3517 		return false;
3518 	}
3519 }
3520 
3521 static void
3522 wm_set_linkdown_discard(struct wm_softc *sc)
3523 {
3524 
3525 	for (int i = 0; i < sc->sc_nqueues; i++) {
3526 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3527 
3528 		mutex_enter(txq->txq_lock);
3529 		txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
3530 		mutex_exit(txq->txq_lock);
3531 	}
3532 }
3533 
3534 static void
3535 wm_clear_linkdown_discard(struct wm_softc *sc)
3536 {
3537 
3538 	for (int i = 0; i < sc->sc_nqueues; i++) {
3539 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3540 
3541 		mutex_enter(txq->txq_lock);
3542 		txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
3543 		mutex_exit(txq->txq_lock);
3544 	}
3545 }
3546 
3547 /*
3548  * wm_ioctl:		[ifnet interface function]
3549  *
3550  *	Handle control requests from the operator.
3551  */
3552 static int
3553 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3554 {
3555 	struct wm_softc *sc = ifp->if_softc;
3556 	struct ifreq *ifr = (struct ifreq *)data;
3557 	struct ifaddr *ifa = (struct ifaddr *)data;
3558 	struct sockaddr_dl *sdl;
3559 	int s, error;
3560 
3561 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
3562 		device_xname(sc->sc_dev), __func__));
3563 
3564 #ifndef WM_MPSAFE
3565 	s = splnet();
3566 #endif
3567 	switch (cmd) {
3568 	case SIOCSIFMEDIA:
3569 		WM_CORE_LOCK(sc);
3570 		/* Flow control requires full-duplex mode. */
3571 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3572 		    (ifr->ifr_media & IFM_FDX) == 0)
3573 			ifr->ifr_media &= ~IFM_ETH_FMASK;
3574 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3575 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3576 				/* We can do both TXPAUSE and RXPAUSE. */
3577 				ifr->ifr_media |=
3578 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3579 			}
3580 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3581 		}
3582 		WM_CORE_UNLOCK(sc);
3583 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3584 		if (error == 0 && wm_phy_need_linkdown_discard(sc)) {
3585 			if (IFM_SUBTYPE(ifr->ifr_media) == IFM_NONE)
3586 				wm_set_linkdown_discard(sc);
3587 			else
3588 				wm_clear_linkdown_discard(sc);
3589 		}
3590 		break;
3591 	case SIOCINITIFADDR:
3592 		WM_CORE_LOCK(sc);
3593 		if (ifa->ifa_addr->sa_family == AF_LINK) {
3594 			sdl = satosdl(ifp->if_dl->ifa_addr);
3595 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3596 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3597 			/* Unicast address is the first multicast entry */
3598 			wm_set_filter(sc);
3599 			error = 0;
3600 			WM_CORE_UNLOCK(sc);
3601 			break;
3602 		}
3603 		WM_CORE_UNLOCK(sc);
3604 		if (((ifp->if_flags & IFF_UP) == 0) && wm_phy_need_linkdown_discard(sc))
3605 			wm_clear_linkdown_discard(sc);
3606 		/*FALLTHROUGH*/
3607 	default:
3608 		if (cmd == SIOCSIFFLAGS && wm_phy_need_linkdown_discard(sc)) {
3609 			if (((ifp->if_flags & IFF_UP) == 0) && ((ifr->ifr_flags & IFF_UP) != 0)) {
3610 				wm_clear_linkdown_discard(sc);
3611 			} else if (((ifp->if_flags & IFF_UP) != 0) && ((ifr->ifr_flags & IFF_UP) == 0)) {
3612 				wm_set_linkdown_discard(sc);
3613 			}
3614 		}
3615 #ifdef WM_MPSAFE
3616 		s = splnet();
3617 #endif
3618 		/* It may call wm_start, so unlock here */
3619 		error = ether_ioctl(ifp, cmd, data);
3620 #ifdef WM_MPSAFE
3621 		splx(s);
3622 #endif
3623 		if (error != ENETRESET)
3624 			break;
3625 
3626 		error = 0;
3627 
3628 		if (cmd == SIOCSIFCAP)
3629 			error = (*ifp->if_init)(ifp);
3630 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3631 			;
3632 		else if (ifp->if_flags & IFF_RUNNING) {
3633 			/*
3634 			 * Multicast list has changed; set the hardware filter
3635 			 * accordingly.
3636 			 */
3637 			WM_CORE_LOCK(sc);
3638 			wm_set_filter(sc);
3639 			WM_CORE_UNLOCK(sc);
3640 		}
3641 		break;
3642 	}
3643 
3644 #ifndef WM_MPSAFE
3645 	splx(s);
3646 #endif
3647 	return error;
3648 }
3649 
3650 /* MAC address related */
3651 
3652 /*
3653  * Get the offset of MAC address and return it.
3654  * If error occured, use offset 0.
3655  */
3656 static uint16_t
3657 wm_check_alt_mac_addr(struct wm_softc *sc)
3658 {
3659 	uint16_t myea[ETHER_ADDR_LEN / 2];
3660 	uint16_t offset = NVM_OFF_MACADDR;
3661 
3662 	/* Try to read alternative MAC address pointer */
3663 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3664 		return 0;
3665 
3666 	/* Check pointer if it's valid or not. */
3667 	if ((offset == 0x0000) || (offset == 0xffff))
3668 		return 0;
3669 
3670 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3671 	/*
3672 	 * Check whether alternative MAC address is valid or not.
3673 	 * Some cards have non 0xffff pointer but those don't use
3674 	 * alternative MAC address in reality.
3675 	 *
3676 	 * Check whether the broadcast bit is set or not.
3677 	 */
3678 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
3679 		if (((myea[0] & 0xff) & 0x01) == 0)
3680 			return offset; /* Found */
3681 
3682 	/* Not found */
3683 	return 0;
3684 }
3685 
3686 static int
3687 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3688 {
3689 	uint16_t myea[ETHER_ADDR_LEN / 2];
3690 	uint16_t offset = NVM_OFF_MACADDR;
3691 	int do_invert = 0;
3692 
3693 	switch (sc->sc_type) {
3694 	case WM_T_82580:
3695 	case WM_T_I350:
3696 	case WM_T_I354:
3697 		/* EEPROM Top Level Partitioning */
3698 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3699 		break;
3700 	case WM_T_82571:
3701 	case WM_T_82575:
3702 	case WM_T_82576:
3703 	case WM_T_80003:
3704 	case WM_T_I210:
3705 	case WM_T_I211:
3706 		offset = wm_check_alt_mac_addr(sc);
3707 		if (offset == 0)
3708 			if ((sc->sc_funcid & 0x01) == 1)
3709 				do_invert = 1;
3710 		break;
3711 	default:
3712 		if ((sc->sc_funcid & 0x01) == 1)
3713 			do_invert = 1;
3714 		break;
3715 	}
3716 
3717 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
3718 		goto bad;
3719 
3720 	enaddr[0] = myea[0] & 0xff;
3721 	enaddr[1] = myea[0] >> 8;
3722 	enaddr[2] = myea[1] & 0xff;
3723 	enaddr[3] = myea[1] >> 8;
3724 	enaddr[4] = myea[2] & 0xff;
3725 	enaddr[5] = myea[2] >> 8;
3726 
3727 	/*
3728 	 * Toggle the LSB of the MAC address on the second port
3729 	 * of some dual port cards.
3730 	 */
3731 	if (do_invert != 0)
3732 		enaddr[5] ^= 1;
3733 
3734 	return 0;
3735 
3736  bad:
3737 	return -1;
3738 }
3739 
3740 /*
3741  * wm_set_ral:
3742  *
3743  *	Set an entery in the receive address list.
3744  */
3745 static void
3746 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3747 {
3748 	uint32_t ral_lo, ral_hi, addrl, addrh;
3749 	uint32_t wlock_mac;
3750 	int rv;
3751 
3752 	if (enaddr != NULL) {
3753 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
3754 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
3755 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
3756 		ral_hi |= RAL_AV;
3757 	} else {
3758 		ral_lo = 0;
3759 		ral_hi = 0;
3760 	}
3761 
3762 	switch (sc->sc_type) {
3763 	case WM_T_82542_2_0:
3764 	case WM_T_82542_2_1:
3765 	case WM_T_82543:
3766 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
3767 		CSR_WRITE_FLUSH(sc);
3768 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
3769 		CSR_WRITE_FLUSH(sc);
3770 		break;
3771 	case WM_T_PCH2:
3772 	case WM_T_PCH_LPT:
3773 	case WM_T_PCH_SPT:
3774 	case WM_T_PCH_CNP:
3775 		if (idx == 0) {
3776 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
3777 			CSR_WRITE_FLUSH(sc);
3778 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
3779 			CSR_WRITE_FLUSH(sc);
3780 			return;
3781 		}
3782 		if (sc->sc_type != WM_T_PCH2) {
3783 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
3784 			    FWSM_WLOCK_MAC);
3785 			addrl = WMREG_SHRAL(idx - 1);
3786 			addrh = WMREG_SHRAH(idx - 1);
3787 		} else {
3788 			wlock_mac = 0;
3789 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
3790 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
3791 		}
3792 
3793 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
3794 			rv = wm_get_swflag_ich8lan(sc);
3795 			if (rv != 0)
3796 				return;
3797 			CSR_WRITE(sc, addrl, ral_lo);
3798 			CSR_WRITE_FLUSH(sc);
3799 			CSR_WRITE(sc, addrh, ral_hi);
3800 			CSR_WRITE_FLUSH(sc);
3801 			wm_put_swflag_ich8lan(sc);
3802 		}
3803 
3804 		break;
3805 	default:
3806 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
3807 		CSR_WRITE_FLUSH(sc);
3808 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
3809 		CSR_WRITE_FLUSH(sc);
3810 		break;
3811 	}
3812 }
3813 
3814 /*
3815  * wm_mchash:
3816  *
3817  *	Compute the hash of the multicast address for the 4096-bit
3818  *	multicast filter.
3819  */
3820 static uint32_t
3821 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3822 {
3823 	static const int lo_shift[4] = { 4, 3, 2, 0 };
3824 	static const int hi_shift[4] = { 4, 5, 6, 8 };
3825 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3826 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3827 	uint32_t hash;
3828 
3829 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3830 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3831 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3832 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
3833 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3834 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3835 		return (hash & 0x3ff);
3836 	}
3837 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3838 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3839 
3840 	return (hash & 0xfff);
3841 }
3842 
3843 /*
3844  *
3845  *
3846  */
3847 static int
3848 wm_rar_count(struct wm_softc *sc)
3849 {
3850 	int size;
3851 
3852 	switch (sc->sc_type) {
3853 	case WM_T_ICH8:
3854 		size = WM_RAL_TABSIZE_ICH8 -1;
3855 		break;
3856 	case WM_T_ICH9:
3857 	case WM_T_ICH10:
3858 	case WM_T_PCH:
3859 		size = WM_RAL_TABSIZE_ICH8;
3860 		break;
3861 	case WM_T_PCH2:
3862 		size = WM_RAL_TABSIZE_PCH2;
3863 		break;
3864 	case WM_T_PCH_LPT:
3865 	case WM_T_PCH_SPT:
3866 	case WM_T_PCH_CNP:
3867 		size = WM_RAL_TABSIZE_PCH_LPT;
3868 		break;
3869 	case WM_T_82575:
3870 	case WM_T_I210:
3871 	case WM_T_I211:
3872 		size = WM_RAL_TABSIZE_82575;
3873 		break;
3874 	case WM_T_82576:
3875 	case WM_T_82580:
3876 		size = WM_RAL_TABSIZE_82576;
3877 		break;
3878 	case WM_T_I350:
3879 	case WM_T_I354:
3880 		size = WM_RAL_TABSIZE_I350;
3881 		break;
3882 	default:
3883 		size = WM_RAL_TABSIZE;
3884 	}
3885 
3886 	return size;
3887 }
3888 
3889 /*
3890  * wm_set_filter:
3891  *
3892  *	Set up the receive filter.
3893  */
3894 static void
3895 wm_set_filter(struct wm_softc *sc)
3896 {
3897 	struct ethercom *ec = &sc->sc_ethercom;
3898 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3899 	struct ether_multi *enm;
3900 	struct ether_multistep step;
3901 	bus_addr_t mta_reg;
3902 	uint32_t hash, reg, bit;
3903 	int i, size, ralmax, rv;
3904 
3905 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
3906 		device_xname(sc->sc_dev), __func__));
3907 
3908 	if (sc->sc_type >= WM_T_82544)
3909 		mta_reg = WMREG_CORDOVA_MTA;
3910 	else
3911 		mta_reg = WMREG_MTA;
3912 
3913 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3914 
3915 	if (ifp->if_flags & IFF_BROADCAST)
3916 		sc->sc_rctl |= RCTL_BAM;
3917 	if (ifp->if_flags & IFF_PROMISC) {
3918 		sc->sc_rctl |= RCTL_UPE;
3919 		ETHER_LOCK(ec);
3920 		ec->ec_flags |= ETHER_F_ALLMULTI;
3921 		ETHER_UNLOCK(ec);
3922 		goto allmulti;
3923 	}
3924 
3925 	/*
3926 	 * Set the station address in the first RAL slot, and
3927 	 * clear the remaining slots.
3928 	 */
3929 	size = wm_rar_count(sc);
3930 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3931 
3932 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
3933 	    || (sc->sc_type == WM_T_PCH_CNP)) {
3934 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3935 		switch (i) {
3936 		case 0:
3937 			/* We can use all entries */
3938 			ralmax = size;
3939 			break;
3940 		case 1:
3941 			/* Only RAR[0] */
3942 			ralmax = 1;
3943 			break;
3944 		default:
3945 			/* Available SHRA + RAR[0] */
3946 			ralmax = i + 1;
3947 		}
3948 	} else
3949 		ralmax = size;
3950 	for (i = 1; i < size; i++) {
3951 		if (i < ralmax)
3952 			wm_set_ral(sc, NULL, i);
3953 	}
3954 
3955 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3956 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3957 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3958 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
3959 		size = WM_ICH8_MC_TABSIZE;
3960 	else
3961 		size = WM_MC_TABSIZE;
3962 	/* Clear out the multicast table. */
3963 	for (i = 0; i < size; i++) {
3964 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
3965 		CSR_WRITE_FLUSH(sc);
3966 	}
3967 
3968 	ETHER_LOCK(ec);
3969 	ETHER_FIRST_MULTI(step, ec, enm);
3970 	while (enm != NULL) {
3971 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3972 			ec->ec_flags |= ETHER_F_ALLMULTI;
3973 			ETHER_UNLOCK(ec);
3974 			/*
3975 			 * We must listen to a range of multicast addresses.
3976 			 * For now, just accept all multicasts, rather than
3977 			 * trying to set only those filter bits needed to match
3978 			 * the range.  (At this time, the only use of address
3979 			 * ranges is for IP multicast routing, for which the
3980 			 * range is big enough to require all bits set.)
3981 			 */
3982 			goto allmulti;
3983 		}
3984 
3985 		hash = wm_mchash(sc, enm->enm_addrlo);
3986 
3987 		reg = (hash >> 5);
3988 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3989 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3990 		    || (sc->sc_type == WM_T_PCH2)
3991 		    || (sc->sc_type == WM_T_PCH_LPT)
3992 		    || (sc->sc_type == WM_T_PCH_SPT)
3993 		    || (sc->sc_type == WM_T_PCH_CNP))
3994 			reg &= 0x1f;
3995 		else
3996 			reg &= 0x7f;
3997 		bit = hash & 0x1f;
3998 
3999 		hash = CSR_READ(sc, mta_reg + (reg << 2));
4000 		hash |= 1U << bit;
4001 
4002 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
4003 			/*
4004 			 * 82544 Errata 9: Certain register cannot be written
4005 			 * with particular alignments in PCI-X bus operation
4006 			 * (FCAH, MTA and VFTA).
4007 			 */
4008 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4009 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4010 			CSR_WRITE_FLUSH(sc);
4011 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4012 			CSR_WRITE_FLUSH(sc);
4013 		} else {
4014 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4015 			CSR_WRITE_FLUSH(sc);
4016 		}
4017 
4018 		ETHER_NEXT_MULTI(step, enm);
4019 	}
4020 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
4021 	ETHER_UNLOCK(ec);
4022 
4023 	goto setit;
4024 
4025  allmulti:
4026 	sc->sc_rctl |= RCTL_MPE;
4027 
4028  setit:
4029 	if (sc->sc_type >= WM_T_PCH2) {
4030 		if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4031 		    && (ifp->if_mtu > ETHERMTU))
4032 			rv = wm_lv_jumbo_workaround_ich8lan(sc, true);
4033 		else
4034 			rv = wm_lv_jumbo_workaround_ich8lan(sc, false);
4035 		if (rv != 0)
4036 			device_printf(sc->sc_dev,
4037 			    "Failed to do workaround for jumbo frame.\n");
4038 	}
4039 
4040 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4041 }
4042 
4043 /* Reset and init related */
4044 
4045 static void
4046 wm_set_vlan(struct wm_softc *sc)
4047 {
4048 
4049 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4050 		device_xname(sc->sc_dev), __func__));
4051 
4052 	/* Deal with VLAN enables. */
4053 	if (VLAN_ATTACHED(&sc->sc_ethercom))
4054 		sc->sc_ctrl |= CTRL_VME;
4055 	else
4056 		sc->sc_ctrl &= ~CTRL_VME;
4057 
4058 	/* Write the control registers. */
4059 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4060 }
4061 
4062 static void
4063 wm_set_pcie_completion_timeout(struct wm_softc *sc)
4064 {
4065 	uint32_t gcr;
4066 	pcireg_t ctrl2;
4067 
4068 	gcr = CSR_READ(sc, WMREG_GCR);
4069 
4070 	/* Only take action if timeout value is defaulted to 0 */
4071 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
4072 		goto out;
4073 
4074 	if ((gcr & GCR_CAP_VER2) == 0) {
4075 		gcr |= GCR_CMPL_TMOUT_10MS;
4076 		goto out;
4077 	}
4078 
4079 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
4080 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
4081 	ctrl2 |= WM_PCIE_DCSR2_16MS;
4082 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
4083 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
4084 
4085 out:
4086 	/* Disable completion timeout resend */
4087 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
4088 
4089 	CSR_WRITE(sc, WMREG_GCR, gcr);
4090 }
4091 
4092 void
4093 wm_get_auto_rd_done(struct wm_softc *sc)
4094 {
4095 	int i;
4096 
4097 	/* wait for eeprom to reload */
4098 	switch (sc->sc_type) {
4099 	case WM_T_82571:
4100 	case WM_T_82572:
4101 	case WM_T_82573:
4102 	case WM_T_82574:
4103 	case WM_T_82583:
4104 	case WM_T_82575:
4105 	case WM_T_82576:
4106 	case WM_T_82580:
4107 	case WM_T_I350:
4108 	case WM_T_I354:
4109 	case WM_T_I210:
4110 	case WM_T_I211:
4111 	case WM_T_80003:
4112 	case WM_T_ICH8:
4113 	case WM_T_ICH9:
4114 		for (i = 0; i < 10; i++) {
4115 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4116 				break;
4117 			delay(1000);
4118 		}
4119 		if (i == 10) {
4120 			log(LOG_ERR, "%s: auto read from eeprom failed to "
4121 			    "complete\n", device_xname(sc->sc_dev));
4122 		}
4123 		break;
4124 	default:
4125 		break;
4126 	}
4127 }
4128 
4129 void
4130 wm_lan_init_done(struct wm_softc *sc)
4131 {
4132 	uint32_t reg = 0;
4133 	int i;
4134 
4135 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4136 		device_xname(sc->sc_dev), __func__));
4137 
4138 	/* Wait for eeprom to reload */
4139 	switch (sc->sc_type) {
4140 	case WM_T_ICH10:
4141 	case WM_T_PCH:
4142 	case WM_T_PCH2:
4143 	case WM_T_PCH_LPT:
4144 	case WM_T_PCH_SPT:
4145 	case WM_T_PCH_CNP:
4146 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4147 			reg = CSR_READ(sc, WMREG_STATUS);
4148 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
4149 				break;
4150 			delay(100);
4151 		}
4152 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4153 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
4154 			    "complete\n", device_xname(sc->sc_dev), __func__);
4155 		}
4156 		break;
4157 	default:
4158 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4159 		    __func__);
4160 		break;
4161 	}
4162 
4163 	reg &= ~STATUS_LAN_INIT_DONE;
4164 	CSR_WRITE(sc, WMREG_STATUS, reg);
4165 }
4166 
4167 void
4168 wm_get_cfg_done(struct wm_softc *sc)
4169 {
4170 	int mask;
4171 	uint32_t reg;
4172 	int i;
4173 
4174 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4175 		device_xname(sc->sc_dev), __func__));
4176 
4177 	/* Wait for eeprom to reload */
4178 	switch (sc->sc_type) {
4179 	case WM_T_82542_2_0:
4180 	case WM_T_82542_2_1:
4181 		/* null */
4182 		break;
4183 	case WM_T_82543:
4184 	case WM_T_82544:
4185 	case WM_T_82540:
4186 	case WM_T_82545:
4187 	case WM_T_82545_3:
4188 	case WM_T_82546:
4189 	case WM_T_82546_3:
4190 	case WM_T_82541:
4191 	case WM_T_82541_2:
4192 	case WM_T_82547:
4193 	case WM_T_82547_2:
4194 	case WM_T_82573:
4195 	case WM_T_82574:
4196 	case WM_T_82583:
4197 		/* generic */
4198 		delay(10*1000);
4199 		break;
4200 	case WM_T_80003:
4201 	case WM_T_82571:
4202 	case WM_T_82572:
4203 	case WM_T_82575:
4204 	case WM_T_82576:
4205 	case WM_T_82580:
4206 	case WM_T_I350:
4207 	case WM_T_I354:
4208 	case WM_T_I210:
4209 	case WM_T_I211:
4210 		if (sc->sc_type == WM_T_82571) {
4211 			/* Only 82571 shares port 0 */
4212 			mask = EEMNGCTL_CFGDONE_0;
4213 		} else
4214 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4215 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4216 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4217 				break;
4218 			delay(1000);
4219 		}
4220 		if (i >= WM_PHY_CFG_TIMEOUT)
4221 			DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s failed\n",
4222 				device_xname(sc->sc_dev), __func__));
4223 		break;
4224 	case WM_T_ICH8:
4225 	case WM_T_ICH9:
4226 	case WM_T_ICH10:
4227 	case WM_T_PCH:
4228 	case WM_T_PCH2:
4229 	case WM_T_PCH_LPT:
4230 	case WM_T_PCH_SPT:
4231 	case WM_T_PCH_CNP:
4232 		delay(10*1000);
4233 		if (sc->sc_type >= WM_T_ICH10)
4234 			wm_lan_init_done(sc);
4235 		else
4236 			wm_get_auto_rd_done(sc);
4237 
4238 		/* Clear PHY Reset Asserted bit */
4239 		reg = CSR_READ(sc, WMREG_STATUS);
4240 		if ((reg & STATUS_PHYRA) != 0)
4241 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
4242 		break;
4243 	default:
4244 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4245 		    __func__);
4246 		break;
4247 	}
4248 }
4249 
4250 int
4251 wm_phy_post_reset(struct wm_softc *sc)
4252 {
4253 	device_t dev = sc->sc_dev;
4254 	uint16_t reg;
4255 	int rv = 0;
4256 
4257 	/* This function is only for ICH8 and newer. */
4258 	if (sc->sc_type < WM_T_ICH8)
4259 		return 0;
4260 
4261 	if (wm_phy_resetisblocked(sc)) {
4262 		/* XXX */
4263 		device_printf(dev, "PHY is blocked\n");
4264 		return -1;
4265 	}
4266 
4267 	/* Allow time for h/w to get to quiescent state after reset */
4268 	delay(10*1000);
4269 
4270 	/* Perform any necessary post-reset workarounds */
4271 	if (sc->sc_type == WM_T_PCH)
4272 		rv = wm_hv_phy_workarounds_ich8lan(sc);
4273 	else if (sc->sc_type == WM_T_PCH2)
4274 		rv = wm_lv_phy_workarounds_ich8lan(sc);
4275 	if (rv != 0)
4276 		return rv;
4277 
4278 	/* Clear the host wakeup bit after lcd reset */
4279 	if (sc->sc_type >= WM_T_PCH) {
4280 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
4281 		reg &= ~BM_WUC_HOST_WU_BIT;
4282 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
4283 	}
4284 
4285 	/* Configure the LCD with the extended configuration region in NVM */
4286 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
4287 		return rv;
4288 
4289 	/* Configure the LCD with the OEM bits in NVM */
4290 	rv = wm_oem_bits_config_ich8lan(sc, true);
4291 
4292 	if (sc->sc_type == WM_T_PCH2) {
4293 		/* Ungate automatic PHY configuration on non-managed 82579 */
4294 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
4295 			delay(10 * 1000);
4296 			wm_gate_hw_phy_config_ich8lan(sc, false);
4297 		}
4298 		/* Set EEE LPI Update Timer to 200usec */
4299 		rv = sc->phy.acquire(sc);
4300 		if (rv)
4301 			return rv;
4302 		rv = wm_write_emi_reg_locked(dev,
4303 		    I82579_LPI_UPDATE_TIMER, 0x1387);
4304 		sc->phy.release(sc);
4305 	}
4306 
4307 	return rv;
4308 }
4309 
4310 /* Only for PCH and newer */
4311 static int
4312 wm_write_smbus_addr(struct wm_softc *sc)
4313 {
4314 	uint32_t strap, freq;
4315 	uint16_t phy_data;
4316 	int rv;
4317 
4318 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4319 		device_xname(sc->sc_dev), __func__));
4320 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
4321 
4322 	strap = CSR_READ(sc, WMREG_STRAP);
4323 	freq = __SHIFTOUT(strap, STRAP_FREQ);
4324 
4325 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
4326 	if (rv != 0)
4327 		return -1;
4328 
4329 	phy_data &= ~HV_SMB_ADDR_ADDR;
4330 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
4331 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
4332 
4333 	if (sc->sc_phytype == WMPHY_I217) {
4334 		/* Restore SMBus frequency */
4335 		if (freq --) {
4336 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
4337 			    | HV_SMB_ADDR_FREQ_HIGH);
4338 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
4339 			    HV_SMB_ADDR_FREQ_LOW);
4340 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
4341 			    HV_SMB_ADDR_FREQ_HIGH);
4342 		} else
4343 			DPRINTF(sc, WM_DEBUG_INIT,
4344 			    ("%s: %s Unsupported SMB frequency in PHY\n",
4345 				device_xname(sc->sc_dev), __func__));
4346 	}
4347 
4348 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
4349 	    phy_data);
4350 }
4351 
4352 static int
4353 wm_init_lcd_from_nvm(struct wm_softc *sc)
4354 {
4355 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
4356 	uint16_t phy_page = 0;
4357 	int rv = 0;
4358 
4359 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4360 		device_xname(sc->sc_dev), __func__));
4361 
4362 	switch (sc->sc_type) {
4363 	case WM_T_ICH8:
4364 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
4365 		    || (sc->sc_phytype != WMPHY_IGP_3))
4366 			return 0;
4367 
4368 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
4369 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
4370 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
4371 			break;
4372 		}
4373 		/* FALLTHROUGH */
4374 	case WM_T_PCH:
4375 	case WM_T_PCH2:
4376 	case WM_T_PCH_LPT:
4377 	case WM_T_PCH_SPT:
4378 	case WM_T_PCH_CNP:
4379 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
4380 		break;
4381 	default:
4382 		return 0;
4383 	}
4384 
4385 	if ((rv = sc->phy.acquire(sc)) != 0)
4386 		return rv;
4387 
4388 	reg = CSR_READ(sc, WMREG_FEXTNVM);
4389 	if ((reg & sw_cfg_mask) == 0)
4390 		goto release;
4391 
4392 	/*
4393 	 * Make sure HW does not configure LCD from PHY extended configuration
4394 	 * before SW configuration
4395 	 */
4396 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
4397 	if ((sc->sc_type < WM_T_PCH2)
4398 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
4399 		goto release;
4400 
4401 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
4402 		device_xname(sc->sc_dev), __func__));
4403 	/* word_addr is in DWORD */
4404 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
4405 
4406 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
4407 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
4408 	if (cnf_size == 0)
4409 		goto release;
4410 
4411 	if (((sc->sc_type == WM_T_PCH)
4412 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
4413 	    || (sc->sc_type > WM_T_PCH)) {
4414 		/*
4415 		 * HW configures the SMBus address and LEDs when the OEM and
4416 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
4417 		 * are cleared, SW will configure them instead.
4418 		 */
4419 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
4420 			device_xname(sc->sc_dev), __func__));
4421 		if ((rv = wm_write_smbus_addr(sc)) != 0)
4422 			goto release;
4423 
4424 		reg = CSR_READ(sc, WMREG_LEDCTL);
4425 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
4426 		    (uint16_t)reg);
4427 		if (rv != 0)
4428 			goto release;
4429 	}
4430 
4431 	/* Configure LCD from extended configuration region. */
4432 	for (i = 0; i < cnf_size; i++) {
4433 		uint16_t reg_data, reg_addr;
4434 
4435 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
4436 			goto release;
4437 
4438 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
4439 			goto release;
4440 
4441 		if (reg_addr == IGPHY_PAGE_SELECT)
4442 			phy_page = reg_data;
4443 
4444 		reg_addr &= IGPHY_MAXREGADDR;
4445 		reg_addr |= phy_page;
4446 
4447 		KASSERT(sc->phy.writereg_locked != NULL);
4448 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
4449 		    reg_data);
4450 	}
4451 
4452 release:
4453 	sc->phy.release(sc);
4454 	return rv;
4455 }
4456 
4457 /*
4458  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
4459  *  @sc:       pointer to the HW structure
4460  *  @d0_state: boolean if entering d0 or d3 device state
4461  *
4462  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
4463  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
4464  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
4465  */
4466 int
4467 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
4468 {
4469 	uint32_t mac_reg;
4470 	uint16_t oem_reg;
4471 	int rv;
4472 
4473 	if (sc->sc_type < WM_T_PCH)
4474 		return 0;
4475 
4476 	rv = sc->phy.acquire(sc);
4477 	if (rv != 0)
4478 		return rv;
4479 
4480 	if (sc->sc_type == WM_T_PCH) {
4481 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4482 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
4483 			goto release;
4484 	}
4485 
4486 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
4487 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
4488 		goto release;
4489 
4490 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
4491 
4492 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
4493 	if (rv != 0)
4494 		goto release;
4495 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
4496 
4497 	if (d0_state) {
4498 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
4499 			oem_reg |= HV_OEM_BITS_A1KDIS;
4500 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
4501 			oem_reg |= HV_OEM_BITS_LPLU;
4502 	} else {
4503 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
4504 		    != 0)
4505 			oem_reg |= HV_OEM_BITS_A1KDIS;
4506 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
4507 		    != 0)
4508 			oem_reg |= HV_OEM_BITS_LPLU;
4509 	}
4510 
4511 	/* Set Restart auto-neg to activate the bits */
4512 	if ((d0_state || (sc->sc_type != WM_T_PCH))
4513 	    && (wm_phy_resetisblocked(sc) == false))
4514 		oem_reg |= HV_OEM_BITS_ANEGNOW;
4515 
4516 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
4517 
4518 release:
4519 	sc->phy.release(sc);
4520 
4521 	return rv;
4522 }
4523 
4524 /* Init hardware bits */
4525 void
4526 wm_initialize_hardware_bits(struct wm_softc *sc)
4527 {
4528 	uint32_t tarc0, tarc1, reg;
4529 
4530 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4531 		device_xname(sc->sc_dev), __func__));
4532 
4533 	/* For 82571 variant, 80003 and ICHs */
4534 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
4535 	    || (sc->sc_type >= WM_T_80003)) {
4536 
4537 		/* Transmit Descriptor Control 0 */
4538 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
4539 		reg |= TXDCTL_COUNT_DESC;
4540 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
4541 
4542 		/* Transmit Descriptor Control 1 */
4543 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
4544 		reg |= TXDCTL_COUNT_DESC;
4545 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
4546 
4547 		/* TARC0 */
4548 		tarc0 = CSR_READ(sc, WMREG_TARC0);
4549 		switch (sc->sc_type) {
4550 		case WM_T_82571:
4551 		case WM_T_82572:
4552 		case WM_T_82573:
4553 		case WM_T_82574:
4554 		case WM_T_82583:
4555 		case WM_T_80003:
4556 			/* Clear bits 30..27 */
4557 			tarc0 &= ~__BITS(30, 27);
4558 			break;
4559 		default:
4560 			break;
4561 		}
4562 
4563 		switch (sc->sc_type) {
4564 		case WM_T_82571:
4565 		case WM_T_82572:
4566 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
4567 
4568 			tarc1 = CSR_READ(sc, WMREG_TARC1);
4569 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
4570 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
4571 			/* 8257[12] Errata No.7 */
4572 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
4573 
4574 			/* TARC1 bit 28 */
4575 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4576 				tarc1 &= ~__BIT(28);
4577 			else
4578 				tarc1 |= __BIT(28);
4579 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
4580 
4581 			/*
4582 			 * 8257[12] Errata No.13
4583 			 * Disable Dyamic Clock Gating.
4584 			 */
4585 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4586 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
4587 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4588 			break;
4589 		case WM_T_82573:
4590 		case WM_T_82574:
4591 		case WM_T_82583:
4592 			if ((sc->sc_type == WM_T_82574)
4593 			    || (sc->sc_type == WM_T_82583))
4594 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
4595 
4596 			/* Extended Device Control */
4597 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4598 			reg &= ~__BIT(23);	/* Clear bit 23 */
4599 			reg |= __BIT(22);	/* Set bit 22 */
4600 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4601 
4602 			/* Device Control */
4603 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
4604 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4605 
4606 			/* PCIe Control Register */
4607 			/*
4608 			 * 82573 Errata (unknown).
4609 			 *
4610 			 * 82574 Errata 25 and 82583 Errata 12
4611 			 * "Dropped Rx Packets":
4612 			 *   NVM Image Version 2.1.4 and newer has no this bug.
4613 			 */
4614 			reg = CSR_READ(sc, WMREG_GCR);
4615 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
4616 			CSR_WRITE(sc, WMREG_GCR, reg);
4617 
4618 			if ((sc->sc_type == WM_T_82574)
4619 			    || (sc->sc_type == WM_T_82583)) {
4620 				/*
4621 				 * Document says this bit must be set for
4622 				 * proper operation.
4623 				 */
4624 				reg = CSR_READ(sc, WMREG_GCR);
4625 				reg |= __BIT(22);
4626 				CSR_WRITE(sc, WMREG_GCR, reg);
4627 
4628 				/*
4629 				 * Apply workaround for hardware errata
4630 				 * documented in errata docs Fixes issue where
4631 				 * some error prone or unreliable PCIe
4632 				 * completions are occurring, particularly
4633 				 * with ASPM enabled. Without fix, issue can
4634 				 * cause Tx timeouts.
4635 				 */
4636 				reg = CSR_READ(sc, WMREG_GCR2);
4637 				reg |= __BIT(0);
4638 				CSR_WRITE(sc, WMREG_GCR2, reg);
4639 			}
4640 			break;
4641 		case WM_T_80003:
4642 			/* TARC0 */
4643 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
4644 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
4645 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
4646 
4647 			/* TARC1 bit 28 */
4648 			tarc1 = CSR_READ(sc, WMREG_TARC1);
4649 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4650 				tarc1 &= ~__BIT(28);
4651 			else
4652 				tarc1 |= __BIT(28);
4653 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
4654 			break;
4655 		case WM_T_ICH8:
4656 		case WM_T_ICH9:
4657 		case WM_T_ICH10:
4658 		case WM_T_PCH:
4659 		case WM_T_PCH2:
4660 		case WM_T_PCH_LPT:
4661 		case WM_T_PCH_SPT:
4662 		case WM_T_PCH_CNP:
4663 			/* TARC0 */
4664 			if (sc->sc_type == WM_T_ICH8) {
4665 				/* Set TARC0 bits 29 and 28 */
4666 				tarc0 |= __BITS(29, 28);
4667 			} else if (sc->sc_type == WM_T_PCH_SPT) {
4668 				tarc0 |= __BIT(29);
4669 				/*
4670 				 *  Drop bit 28. From Linux.
4671 				 * See I218/I219 spec update
4672 				 * "5. Buffer Overrun While the I219 is
4673 				 * Processing DMA Transactions"
4674 				 */
4675 				tarc0 &= ~__BIT(28);
4676 			}
4677 			/* Set TARC0 bits 23,24,26,27 */
4678 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
4679 
4680 			/* CTRL_EXT */
4681 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4682 			reg |= __BIT(22);	/* Set bit 22 */
4683 			/*
4684 			 * Enable PHY low-power state when MAC is at D3
4685 			 * w/o WoL
4686 			 */
4687 			if (sc->sc_type >= WM_T_PCH)
4688 				reg |= CTRL_EXT_PHYPDEN;
4689 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4690 
4691 			/* TARC1 */
4692 			tarc1 = CSR_READ(sc, WMREG_TARC1);
4693 			/* bit 28 */
4694 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4695 				tarc1 &= ~__BIT(28);
4696 			else
4697 				tarc1 |= __BIT(28);
4698 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
4699 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
4700 
4701 			/* Device Status */
4702 			if (sc->sc_type == WM_T_ICH8) {
4703 				reg = CSR_READ(sc, WMREG_STATUS);
4704 				reg &= ~__BIT(31);
4705 				CSR_WRITE(sc, WMREG_STATUS, reg);
4706 
4707 			}
4708 
4709 			/* IOSFPC */
4710 			if (sc->sc_type == WM_T_PCH_SPT) {
4711 				reg = CSR_READ(sc, WMREG_IOSFPC);
4712 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
4713 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
4714 			}
4715 			/*
4716 			 * Work-around descriptor data corruption issue during
4717 			 * NFS v2 UDP traffic, just disable the NFS filtering
4718 			 * capability.
4719 			 */
4720 			reg = CSR_READ(sc, WMREG_RFCTL);
4721 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
4722 			CSR_WRITE(sc, WMREG_RFCTL, reg);
4723 			break;
4724 		default:
4725 			break;
4726 		}
4727 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
4728 
4729 		switch (sc->sc_type) {
4730 		/*
4731 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
4732 		 * Avoid RSS Hash Value bug.
4733 		 */
4734 		case WM_T_82571:
4735 		case WM_T_82572:
4736 		case WM_T_82573:
4737 		case WM_T_80003:
4738 		case WM_T_ICH8:
4739 			reg = CSR_READ(sc, WMREG_RFCTL);
4740 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
4741 			CSR_WRITE(sc, WMREG_RFCTL, reg);
4742 			break;
4743 		case WM_T_82574:
4744 			/* Use extened Rx descriptor. */
4745 			reg = CSR_READ(sc, WMREG_RFCTL);
4746 			reg |= WMREG_RFCTL_EXSTEN;
4747 			CSR_WRITE(sc, WMREG_RFCTL, reg);
4748 			break;
4749 		default:
4750 			break;
4751 		}
4752 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
4753 		/*
4754 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
4755 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
4756 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
4757 		 * Correctly by the Device"
4758 		 *
4759 		 * I354(C2000) Errata AVR53:
4760 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
4761 		 * Hang"
4762 		 */
4763 		reg = CSR_READ(sc, WMREG_RFCTL);
4764 		reg |= WMREG_RFCTL_IPV6EXDIS;
4765 		CSR_WRITE(sc, WMREG_RFCTL, reg);
4766 	}
4767 }
4768 
4769 static uint32_t
4770 wm_rxpbs_adjust_82580(uint32_t val)
4771 {
4772 	uint32_t rv = 0;
4773 
4774 	if (val < __arraycount(wm_82580_rxpbs_table))
4775 		rv = wm_82580_rxpbs_table[val];
4776 
4777 	return rv;
4778 }
4779 
4780 /*
4781  * wm_reset_phy:
4782  *
4783  *	generic PHY reset function.
4784  *	Same as e1000_phy_hw_reset_generic()
4785  */
4786 static int
4787 wm_reset_phy(struct wm_softc *sc)
4788 {
4789 	uint32_t reg;
4790 
4791 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4792 		device_xname(sc->sc_dev), __func__));
4793 	if (wm_phy_resetisblocked(sc))
4794 		return -1;
4795 
4796 	sc->phy.acquire(sc);
4797 
4798 	reg = CSR_READ(sc, WMREG_CTRL);
4799 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
4800 	CSR_WRITE_FLUSH(sc);
4801 
4802 	delay(sc->phy.reset_delay_us);
4803 
4804 	CSR_WRITE(sc, WMREG_CTRL, reg);
4805 	CSR_WRITE_FLUSH(sc);
4806 
4807 	delay(150);
4808 
4809 	sc->phy.release(sc);
4810 
4811 	wm_get_cfg_done(sc);
4812 	wm_phy_post_reset(sc);
4813 
4814 	return 0;
4815 }
4816 
4817 /*
4818  * Only used by WM_T_PCH_SPT which does not use multiqueue,
4819  * so it is enough to check sc->sc_queue[0] only.
4820  */
4821 static void
4822 wm_flush_desc_rings(struct wm_softc *sc)
4823 {
4824 	pcireg_t preg;
4825 	uint32_t reg;
4826 	struct wm_txqueue *txq;
4827 	wiseman_txdesc_t *txd;
4828 	int nexttx;
4829 	uint32_t rctl;
4830 
4831 	/* First, disable MULR fix in FEXTNVM11 */
4832 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
4833 	reg |= FEXTNVM11_DIS_MULRFIX;
4834 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
4835 
4836 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
4837 	reg = CSR_READ(sc, WMREG_TDLEN(0));
4838 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
4839 		return;
4840 
4841 	/* TX */
4842 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
4843 	    preg, reg);
4844 	reg = CSR_READ(sc, WMREG_TCTL);
4845 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
4846 
4847 	txq = &sc->sc_queue[0].wmq_txq;
4848 	nexttx = txq->txq_next;
4849 	txd = &txq->txq_descs[nexttx];
4850 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
4851 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
4852 	txd->wtx_fields.wtxu_status = 0;
4853 	txd->wtx_fields.wtxu_options = 0;
4854 	txd->wtx_fields.wtxu_vlan = 0;
4855 
4856 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
4857 	    BUS_SPACE_BARRIER_WRITE);
4858 
4859 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
4860 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
4861 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
4862 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
4863 	delay(250);
4864 
4865 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
4866 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
4867 		return;
4868 
4869 	/* RX */
4870 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
4871 	rctl = CSR_READ(sc, WMREG_RCTL);
4872 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
4873 	CSR_WRITE_FLUSH(sc);
4874 	delay(150);
4875 
4876 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
4877 	/* Zero the lower 14 bits (prefetch and host thresholds) */
4878 	reg &= 0xffffc000;
4879 	/*
4880 	 * Update thresholds: prefetch threshold to 31, host threshold
4881 	 * to 1 and make sure the granularity is "descriptors" and not
4882 	 * "cache lines"
4883 	 */
4884 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
4885 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
4886 
4887 	/* Momentarily enable the RX ring for the changes to take effect */
4888 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
4889 	CSR_WRITE_FLUSH(sc);
4890 	delay(150);
4891 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
4892 }
4893 
4894 /*
4895  * wm_reset:
4896  *
4897  *	Reset the i82542 chip.
4898  */
4899 static void
4900 wm_reset(struct wm_softc *sc)
4901 {
4902 	int phy_reset = 0;
4903 	int i, error = 0;
4904 	uint32_t reg;
4905 	uint16_t kmreg;
4906 	int rv;
4907 
4908 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
4909 		device_xname(sc->sc_dev), __func__));
4910 	KASSERT(sc->sc_type != 0);
4911 
4912 	/*
4913 	 * Allocate on-chip memory according to the MTU size.
4914 	 * The Packet Buffer Allocation register must be written
4915 	 * before the chip is reset.
4916 	 */
4917 	switch (sc->sc_type) {
4918 	case WM_T_82547:
4919 	case WM_T_82547_2:
4920 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4921 		    PBA_22K : PBA_30K;
4922 		for (i = 0; i < sc->sc_nqueues; i++) {
4923 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4924 			txq->txq_fifo_head = 0;
4925 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
4926 			txq->txq_fifo_size =
4927 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
4928 			txq->txq_fifo_stall = 0;
4929 		}
4930 		break;
4931 	case WM_T_82571:
4932 	case WM_T_82572:
4933 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
4934 	case WM_T_80003:
4935 		sc->sc_pba = PBA_32K;
4936 		break;
4937 	case WM_T_82573:
4938 		sc->sc_pba = PBA_12K;
4939 		break;
4940 	case WM_T_82574:
4941 	case WM_T_82583:
4942 		sc->sc_pba = PBA_20K;
4943 		break;
4944 	case WM_T_82576:
4945 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
4946 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
4947 		break;
4948 	case WM_T_82580:
4949 	case WM_T_I350:
4950 	case WM_T_I354:
4951 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
4952 		break;
4953 	case WM_T_I210:
4954 	case WM_T_I211:
4955 		sc->sc_pba = PBA_34K;
4956 		break;
4957 	case WM_T_ICH8:
4958 		/* Workaround for a bit corruption issue in FIFO memory */
4959 		sc->sc_pba = PBA_8K;
4960 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4961 		break;
4962 	case WM_T_ICH9:
4963 	case WM_T_ICH10:
4964 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
4965 		    PBA_14K : PBA_10K;
4966 		break;
4967 	case WM_T_PCH:
4968 	case WM_T_PCH2:	/* XXX 14K? */
4969 	case WM_T_PCH_LPT:
4970 	case WM_T_PCH_SPT:
4971 	case WM_T_PCH_CNP:
4972 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 1500 ?
4973 		    PBA_12K : PBA_26K;
4974 		break;
4975 	default:
4976 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4977 		    PBA_40K : PBA_48K;
4978 		break;
4979 	}
4980 	/*
4981 	 * Only old or non-multiqueue devices have the PBA register
4982 	 * XXX Need special handling for 82575.
4983 	 */
4984 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4985 	    || (sc->sc_type == WM_T_82575))
4986 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4987 
4988 	/* Prevent the PCI-E bus from sticking */
4989 	if (sc->sc_flags & WM_F_PCIE) {
4990 		int timeout = 800;
4991 
4992 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
4993 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4994 
4995 		while (timeout--) {
4996 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4997 			    == 0)
4998 				break;
4999 			delay(100);
5000 		}
5001 		if (timeout == 0)
5002 			device_printf(sc->sc_dev,
5003 			    "failed to disable busmastering\n");
5004 	}
5005 
5006 	/* Set the completion timeout for interface */
5007 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
5008 	    || (sc->sc_type == WM_T_82580)
5009 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5010 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
5011 		wm_set_pcie_completion_timeout(sc);
5012 
5013 	/* Clear interrupt */
5014 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5015 	if (wm_is_using_msix(sc)) {
5016 		if (sc->sc_type != WM_T_82574) {
5017 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5018 			CSR_WRITE(sc, WMREG_EIAC, 0);
5019 		} else
5020 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5021 	}
5022 
5023 	/* Stop the transmit and receive processes. */
5024 	CSR_WRITE(sc, WMREG_RCTL, 0);
5025 	sc->sc_rctl &= ~RCTL_EN;
5026 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
5027 	CSR_WRITE_FLUSH(sc);
5028 
5029 	/* XXX set_tbi_sbp_82543() */
5030 
5031 	delay(10*1000);
5032 
5033 	/* Must acquire the MDIO ownership before MAC reset */
5034 	switch (sc->sc_type) {
5035 	case WM_T_82573:
5036 	case WM_T_82574:
5037 	case WM_T_82583:
5038 		error = wm_get_hw_semaphore_82573(sc);
5039 		break;
5040 	default:
5041 		break;
5042 	}
5043 
5044 	/*
5045 	 * 82541 Errata 29? & 82547 Errata 28?
5046 	 * See also the description about PHY_RST bit in CTRL register
5047 	 * in 8254x_GBe_SDM.pdf.
5048 	 */
5049 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
5050 		CSR_WRITE(sc, WMREG_CTRL,
5051 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
5052 		CSR_WRITE_FLUSH(sc);
5053 		delay(5000);
5054 	}
5055 
5056 	switch (sc->sc_type) {
5057 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
5058 	case WM_T_82541:
5059 	case WM_T_82541_2:
5060 	case WM_T_82547:
5061 	case WM_T_82547_2:
5062 		/*
5063 		 * On some chipsets, a reset through a memory-mapped write
5064 		 * cycle can cause the chip to reset before completing the
5065 		 * write cycle. This causes major headache that can be avoided
5066 		 * by issuing the reset via indirect register writes through
5067 		 * I/O space.
5068 		 *
5069 		 * So, if we successfully mapped the I/O BAR at attach time,
5070 		 * use that. Otherwise, try our luck with a memory-mapped
5071 		 * reset.
5072 		 */
5073 		if (sc->sc_flags & WM_F_IOH_VALID)
5074 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
5075 		else
5076 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
5077 		break;
5078 	case WM_T_82545_3:
5079 	case WM_T_82546_3:
5080 		/* Use the shadow control register on these chips. */
5081 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
5082 		break;
5083 	case WM_T_80003:
5084 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5085 		sc->phy.acquire(sc);
5086 		CSR_WRITE(sc, WMREG_CTRL, reg);
5087 		sc->phy.release(sc);
5088 		break;
5089 	case WM_T_ICH8:
5090 	case WM_T_ICH9:
5091 	case WM_T_ICH10:
5092 	case WM_T_PCH:
5093 	case WM_T_PCH2:
5094 	case WM_T_PCH_LPT:
5095 	case WM_T_PCH_SPT:
5096 	case WM_T_PCH_CNP:
5097 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
5098 		if (wm_phy_resetisblocked(sc) == false) {
5099 			/*
5100 			 * Gate automatic PHY configuration by hardware on
5101 			 * non-managed 82579
5102 			 */
5103 			if ((sc->sc_type == WM_T_PCH2)
5104 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
5105 				== 0))
5106 				wm_gate_hw_phy_config_ich8lan(sc, true);
5107 
5108 			reg |= CTRL_PHY_RESET;
5109 			phy_reset = 1;
5110 		} else
5111 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
5112 		sc->phy.acquire(sc);
5113 		CSR_WRITE(sc, WMREG_CTRL, reg);
5114 		/* Don't insert a completion barrier when reset */
5115 		delay(20*1000);
5116 		mutex_exit(sc->sc_ich_phymtx);
5117 		break;
5118 	case WM_T_82580:
5119 	case WM_T_I350:
5120 	case WM_T_I354:
5121 	case WM_T_I210:
5122 	case WM_T_I211:
5123 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5124 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
5125 			CSR_WRITE_FLUSH(sc);
5126 		delay(5000);
5127 		break;
5128 	case WM_T_82542_2_0:
5129 	case WM_T_82542_2_1:
5130 	case WM_T_82543:
5131 	case WM_T_82540:
5132 	case WM_T_82545:
5133 	case WM_T_82546:
5134 	case WM_T_82571:
5135 	case WM_T_82572:
5136 	case WM_T_82573:
5137 	case WM_T_82574:
5138 	case WM_T_82575:
5139 	case WM_T_82576:
5140 	case WM_T_82583:
5141 	default:
5142 		/* Everything else can safely use the documented method. */
5143 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
5144 		break;
5145 	}
5146 
5147 	/* Must release the MDIO ownership after MAC reset */
5148 	switch (sc->sc_type) {
5149 	case WM_T_82573:
5150 	case WM_T_82574:
5151 	case WM_T_82583:
5152 		if (error == 0)
5153 			wm_put_hw_semaphore_82573(sc);
5154 		break;
5155 	default:
5156 		break;
5157 	}
5158 
5159 	/* Set Phy Config Counter to 50msec */
5160 	if (sc->sc_type == WM_T_PCH2) {
5161 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
5162 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
5163 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
5164 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
5165 	}
5166 
5167 	if (phy_reset != 0)
5168 		wm_get_cfg_done(sc);
5169 
5170 	/* Reload EEPROM */
5171 	switch (sc->sc_type) {
5172 	case WM_T_82542_2_0:
5173 	case WM_T_82542_2_1:
5174 	case WM_T_82543:
5175 	case WM_T_82544:
5176 		delay(10);
5177 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5178 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5179 		CSR_WRITE_FLUSH(sc);
5180 		delay(2000);
5181 		break;
5182 	case WM_T_82540:
5183 	case WM_T_82545:
5184 	case WM_T_82545_3:
5185 	case WM_T_82546:
5186 	case WM_T_82546_3:
5187 		delay(5*1000);
5188 		/* XXX Disable HW ARPs on ASF enabled adapters */
5189 		break;
5190 	case WM_T_82541:
5191 	case WM_T_82541_2:
5192 	case WM_T_82547:
5193 	case WM_T_82547_2:
5194 		delay(20000);
5195 		/* XXX Disable HW ARPs on ASF enabled adapters */
5196 		break;
5197 	case WM_T_82571:
5198 	case WM_T_82572:
5199 	case WM_T_82573:
5200 	case WM_T_82574:
5201 	case WM_T_82583:
5202 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
5203 			delay(10);
5204 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
5205 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5206 			CSR_WRITE_FLUSH(sc);
5207 		}
5208 		/* check EECD_EE_AUTORD */
5209 		wm_get_auto_rd_done(sc);
5210 		/*
5211 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
5212 		 * is set.
5213 		 */
5214 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
5215 		    || (sc->sc_type == WM_T_82583))
5216 			delay(25*1000);
5217 		break;
5218 	case WM_T_82575:
5219 	case WM_T_82576:
5220 	case WM_T_82580:
5221 	case WM_T_I350:
5222 	case WM_T_I354:
5223 	case WM_T_I210:
5224 	case WM_T_I211:
5225 	case WM_T_80003:
5226 		/* check EECD_EE_AUTORD */
5227 		wm_get_auto_rd_done(sc);
5228 		break;
5229 	case WM_T_ICH8:
5230 	case WM_T_ICH9:
5231 	case WM_T_ICH10:
5232 	case WM_T_PCH:
5233 	case WM_T_PCH2:
5234 	case WM_T_PCH_LPT:
5235 	case WM_T_PCH_SPT:
5236 	case WM_T_PCH_CNP:
5237 		break;
5238 	default:
5239 		panic("%s: unknown type\n", __func__);
5240 	}
5241 
5242 	/* Check whether EEPROM is present or not */
5243 	switch (sc->sc_type) {
5244 	case WM_T_82575:
5245 	case WM_T_82576:
5246 	case WM_T_82580:
5247 	case WM_T_I350:
5248 	case WM_T_I354:
5249 	case WM_T_ICH8:
5250 	case WM_T_ICH9:
5251 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
5252 			/* Not found */
5253 			sc->sc_flags |= WM_F_EEPROM_INVALID;
5254 			if (sc->sc_type == WM_T_82575)
5255 				wm_reset_init_script_82575(sc);
5256 		}
5257 		break;
5258 	default:
5259 		break;
5260 	}
5261 
5262 	if (phy_reset != 0)
5263 		wm_phy_post_reset(sc);
5264 
5265 	if ((sc->sc_type == WM_T_82580)
5266 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
5267 		/* Clear global device reset status bit */
5268 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
5269 	}
5270 
5271 	/* Clear any pending interrupt events. */
5272 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5273 	reg = CSR_READ(sc, WMREG_ICR);
5274 	if (wm_is_using_msix(sc)) {
5275 		if (sc->sc_type != WM_T_82574) {
5276 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5277 			CSR_WRITE(sc, WMREG_EIAC, 0);
5278 		} else
5279 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5280 	}
5281 
5282 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5283 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5284 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
5285 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
5286 		reg = CSR_READ(sc, WMREG_KABGTXD);
5287 		reg |= KABGTXD_BGSQLBIAS;
5288 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
5289 	}
5290 
5291 	/* Reload sc_ctrl */
5292 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5293 
5294 	wm_set_eee(sc);
5295 
5296 	/*
5297 	 * For PCH, this write will make sure that any noise will be detected
5298 	 * as a CRC error and be dropped rather than show up as a bad packet
5299 	 * to the DMA engine
5300 	 */
5301 	if (sc->sc_type == WM_T_PCH)
5302 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
5303 
5304 	if (sc->sc_type >= WM_T_82544)
5305 		CSR_WRITE(sc, WMREG_WUC, 0);
5306 
5307 	if (sc->sc_type < WM_T_82575)
5308 		wm_disable_aspm(sc); /* Workaround for some chips */
5309 
5310 	wm_reset_mdicnfg_82580(sc);
5311 
5312 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
5313 		wm_pll_workaround_i210(sc);
5314 
5315 	if (sc->sc_type == WM_T_80003) {
5316 		/* Default to TRUE to enable the MDIC W/A */
5317 		sc->sc_flags |= WM_F_80003_MDIC_WA;
5318 
5319 		rv = wm_kmrn_readreg(sc,
5320 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
5321 		if (rv == 0) {
5322 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
5323 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
5324 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
5325 			else
5326 				sc->sc_flags |= WM_F_80003_MDIC_WA;
5327 		}
5328 	}
5329 }
5330 
5331 /*
5332  * wm_add_rxbuf:
5333  *
5334  *	Add a receive buffer to the indiciated descriptor.
5335  */
5336 static int
5337 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
5338 {
5339 	struct wm_softc *sc = rxq->rxq_sc;
5340 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
5341 	struct mbuf *m;
5342 	int error;
5343 
5344 	KASSERT(mutex_owned(rxq->rxq_lock));
5345 
5346 	MGETHDR(m, M_DONTWAIT, MT_DATA);
5347 	if (m == NULL)
5348 		return ENOBUFS;
5349 
5350 	MCLGET(m, M_DONTWAIT);
5351 	if ((m->m_flags & M_EXT) == 0) {
5352 		m_freem(m);
5353 		return ENOBUFS;
5354 	}
5355 
5356 	if (rxs->rxs_mbuf != NULL)
5357 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5358 
5359 	rxs->rxs_mbuf = m;
5360 
5361 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5362 	/*
5363 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
5364 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
5365 	 */
5366 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
5367 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
5368 	if (error) {
5369 		/* XXX XXX XXX */
5370 		aprint_error_dev(sc->sc_dev,
5371 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
5372 		panic("wm_add_rxbuf");
5373 	}
5374 
5375 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5376 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5377 
5378 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5379 		if ((sc->sc_rctl & RCTL_EN) != 0)
5380 			wm_init_rxdesc(rxq, idx);
5381 	} else
5382 		wm_init_rxdesc(rxq, idx);
5383 
5384 	return 0;
5385 }
5386 
5387 /*
5388  * wm_rxdrain:
5389  *
5390  *	Drain the receive queue.
5391  */
5392 static void
5393 wm_rxdrain(struct wm_rxqueue *rxq)
5394 {
5395 	struct wm_softc *sc = rxq->rxq_sc;
5396 	struct wm_rxsoft *rxs;
5397 	int i;
5398 
5399 	KASSERT(mutex_owned(rxq->rxq_lock));
5400 
5401 	for (i = 0; i < WM_NRXDESC; i++) {
5402 		rxs = &rxq->rxq_soft[i];
5403 		if (rxs->rxs_mbuf != NULL) {
5404 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5405 			m_freem(rxs->rxs_mbuf);
5406 			rxs->rxs_mbuf = NULL;
5407 		}
5408 	}
5409 }
5410 
5411 /*
5412  * Setup registers for RSS.
5413  *
5414  * XXX not yet VMDq support
5415  */
5416 static void
5417 wm_init_rss(struct wm_softc *sc)
5418 {
5419 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
5420 	int i;
5421 
5422 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
5423 
5424 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
5425 		unsigned int qid, reta_ent;
5426 
5427 		qid  = i % sc->sc_nqueues;
5428 		switch (sc->sc_type) {
5429 		case WM_T_82574:
5430 			reta_ent = __SHIFTIN(qid,
5431 			    RETA_ENT_QINDEX_MASK_82574);
5432 			break;
5433 		case WM_T_82575:
5434 			reta_ent = __SHIFTIN(qid,
5435 			    RETA_ENT_QINDEX1_MASK_82575);
5436 			break;
5437 		default:
5438 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
5439 			break;
5440 		}
5441 
5442 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
5443 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
5444 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
5445 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
5446 	}
5447 
5448 	rss_getkey((uint8_t *)rss_key);
5449 	for (i = 0; i < RSSRK_NUM_REGS; i++)
5450 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
5451 
5452 	if (sc->sc_type == WM_T_82574)
5453 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
5454 	else
5455 		mrqc = MRQC_ENABLE_RSS_MQ;
5456 
5457 	/*
5458 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
5459 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
5460 	 */
5461 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
5462 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
5463 #if 0
5464 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
5465 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
5466 #endif
5467 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
5468 
5469 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
5470 }
5471 
5472 /*
5473  * Adjust TX and RX queue numbers which the system actulally uses.
5474  *
5475  * The numbers are affected by below parameters.
5476  *     - The nubmer of hardware queues
5477  *     - The number of MSI-X vectors (= "nvectors" argument)
5478  *     - ncpu
5479  */
5480 static void
5481 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
5482 {
5483 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
5484 
5485 	if (nvectors < 2) {
5486 		sc->sc_nqueues = 1;
5487 		return;
5488 	}
5489 
5490 	switch (sc->sc_type) {
5491 	case WM_T_82572:
5492 		hw_ntxqueues = 2;
5493 		hw_nrxqueues = 2;
5494 		break;
5495 	case WM_T_82574:
5496 		hw_ntxqueues = 2;
5497 		hw_nrxqueues = 2;
5498 		break;
5499 	case WM_T_82575:
5500 		hw_ntxqueues = 4;
5501 		hw_nrxqueues = 4;
5502 		break;
5503 	case WM_T_82576:
5504 		hw_ntxqueues = 16;
5505 		hw_nrxqueues = 16;
5506 		break;
5507 	case WM_T_82580:
5508 	case WM_T_I350:
5509 	case WM_T_I354:
5510 		hw_ntxqueues = 8;
5511 		hw_nrxqueues = 8;
5512 		break;
5513 	case WM_T_I210:
5514 		hw_ntxqueues = 4;
5515 		hw_nrxqueues = 4;
5516 		break;
5517 	case WM_T_I211:
5518 		hw_ntxqueues = 2;
5519 		hw_nrxqueues = 2;
5520 		break;
5521 		/*
5522 		 * As below ethernet controllers does not support MSI-X,
5523 		 * this driver let them not use multiqueue.
5524 		 *     - WM_T_80003
5525 		 *     - WM_T_ICH8
5526 		 *     - WM_T_ICH9
5527 		 *     - WM_T_ICH10
5528 		 *     - WM_T_PCH
5529 		 *     - WM_T_PCH2
5530 		 *     - WM_T_PCH_LPT
5531 		 */
5532 	default:
5533 		hw_ntxqueues = 1;
5534 		hw_nrxqueues = 1;
5535 		break;
5536 	}
5537 
5538 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
5539 
5540 	/*
5541 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
5542 	 * the number of queues used actually.
5543 	 */
5544 	if (nvectors < hw_nqueues + 1)
5545 		sc->sc_nqueues = nvectors - 1;
5546 	else
5547 		sc->sc_nqueues = hw_nqueues;
5548 
5549 	/*
5550 	 * As queues more then cpus cannot improve scaling, we limit
5551 	 * the number of queues used actually.
5552 	 */
5553 	if (ncpu < sc->sc_nqueues)
5554 		sc->sc_nqueues = ncpu;
5555 }
5556 
5557 static inline bool
5558 wm_is_using_msix(struct wm_softc *sc)
5559 {
5560 
5561 	return (sc->sc_nintrs > 1);
5562 }
5563 
5564 static inline bool
5565 wm_is_using_multiqueue(struct wm_softc *sc)
5566 {
5567 
5568 	return (sc->sc_nqueues > 1);
5569 }
5570 
5571 static int
5572 wm_softint_establish_queue(struct wm_softc *sc, int qidx, int intr_idx)
5573 {
5574 	struct wm_queue *wmq = &sc->sc_queue[qidx];
5575 
5576 	wmq->wmq_id = qidx;
5577 	wmq->wmq_intr_idx = intr_idx;
5578 	wmq->wmq_si = softint_establish(SOFTINT_NET | WM_SOFTINT_FLAGS,
5579 	    wm_handle_queue, wmq);
5580 	if (wmq->wmq_si != NULL)
5581 		return 0;
5582 
5583 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
5584 	    wmq->wmq_id);
5585 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
5586 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
5587 	return ENOMEM;
5588 }
5589 
5590 /*
5591  * Both single interrupt MSI and INTx can use this function.
5592  */
5593 static int
5594 wm_setup_legacy(struct wm_softc *sc)
5595 {
5596 	pci_chipset_tag_t pc = sc->sc_pc;
5597 	const char *intrstr = NULL;
5598 	char intrbuf[PCI_INTRSTR_LEN];
5599 	int error;
5600 
5601 	error = wm_alloc_txrx_queues(sc);
5602 	if (error) {
5603 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
5604 		    error);
5605 		return ENOMEM;
5606 	}
5607 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
5608 	    sizeof(intrbuf));
5609 #ifdef WM_MPSAFE
5610 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
5611 #endif
5612 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
5613 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
5614 	if (sc->sc_ihs[0] == NULL) {
5615 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
5616 		    (pci_intr_type(pc, sc->sc_intrs[0])
5617 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
5618 		return ENOMEM;
5619 	}
5620 
5621 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
5622 	sc->sc_nintrs = 1;
5623 
5624 	return wm_softint_establish_queue(sc, 0, 0);
5625 }
5626 
5627 static int
5628 wm_setup_msix(struct wm_softc *sc)
5629 {
5630 	void *vih;
5631 	kcpuset_t *affinity;
5632 	int qidx, error, intr_idx, txrx_established;
5633 	pci_chipset_tag_t pc = sc->sc_pc;
5634 	const char *intrstr = NULL;
5635 	char intrbuf[PCI_INTRSTR_LEN];
5636 	char intr_xname[INTRDEVNAMEBUF];
5637 
5638 	if (sc->sc_nqueues < ncpu) {
5639 		/*
5640 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
5641 		 * interrupts start from CPU#1.
5642 		 */
5643 		sc->sc_affinity_offset = 1;
5644 	} else {
5645 		/*
5646 		 * In this case, this device use all CPUs. So, we unify
5647 		 * affinitied cpu_index to msix vector number for readability.
5648 		 */
5649 		sc->sc_affinity_offset = 0;
5650 	}
5651 
5652 	error = wm_alloc_txrx_queues(sc);
5653 	if (error) {
5654 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
5655 		    error);
5656 		return ENOMEM;
5657 	}
5658 
5659 	kcpuset_create(&affinity, false);
5660 	intr_idx = 0;
5661 
5662 	/*
5663 	 * TX and RX
5664 	 */
5665 	txrx_established = 0;
5666 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5667 		struct wm_queue *wmq = &sc->sc_queue[qidx];
5668 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
5669 
5670 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
5671 		    sizeof(intrbuf));
5672 #ifdef WM_MPSAFE
5673 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
5674 		    PCI_INTR_MPSAFE, true);
5675 #endif
5676 		memset(intr_xname, 0, sizeof(intr_xname));
5677 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
5678 		    device_xname(sc->sc_dev), qidx);
5679 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
5680 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
5681 		if (vih == NULL) {
5682 			aprint_error_dev(sc->sc_dev,
5683 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
5684 			    intrstr ? " at " : "",
5685 			    intrstr ? intrstr : "");
5686 
5687 			goto fail;
5688 		}
5689 		kcpuset_zero(affinity);
5690 		/* Round-robin affinity */
5691 		kcpuset_set(affinity, affinity_to);
5692 		error = interrupt_distribute(vih, affinity, NULL);
5693 		if (error == 0) {
5694 			aprint_normal_dev(sc->sc_dev,
5695 			    "for TX and RX interrupting at %s affinity to %u\n",
5696 			    intrstr, affinity_to);
5697 		} else {
5698 			aprint_normal_dev(sc->sc_dev,
5699 			    "for TX and RX interrupting at %s\n", intrstr);
5700 		}
5701 		sc->sc_ihs[intr_idx] = vih;
5702 		if (wm_softint_establish_queue(sc, qidx, intr_idx) != 0)
5703 			goto fail;
5704 		txrx_established++;
5705 		intr_idx++;
5706 	}
5707 
5708 	/* LINK */
5709 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
5710 	    sizeof(intrbuf));
5711 #ifdef WM_MPSAFE
5712 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
5713 #endif
5714 	memset(intr_xname, 0, sizeof(intr_xname));
5715 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
5716 	    device_xname(sc->sc_dev));
5717 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
5718 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
5719 	if (vih == NULL) {
5720 		aprint_error_dev(sc->sc_dev,
5721 		    "unable to establish MSI-X(for LINK)%s%s\n",
5722 		    intrstr ? " at " : "",
5723 		    intrstr ? intrstr : "");
5724 
5725 		goto fail;
5726 	}
5727 	/* Keep default affinity to LINK interrupt */
5728 	aprint_normal_dev(sc->sc_dev,
5729 	    "for LINK interrupting at %s\n", intrstr);
5730 	sc->sc_ihs[intr_idx] = vih;
5731 	sc->sc_link_intr_idx = intr_idx;
5732 
5733 	sc->sc_nintrs = sc->sc_nqueues + 1;
5734 	kcpuset_destroy(affinity);
5735 	return 0;
5736 
5737  fail:
5738 	for (qidx = 0; qidx < txrx_established; qidx++) {
5739 		struct wm_queue *wmq = &sc->sc_queue[qidx];
5740 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
5741 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
5742 	}
5743 
5744 	kcpuset_destroy(affinity);
5745 	return ENOMEM;
5746 }
5747 
5748 static void
5749 wm_unset_stopping_flags(struct wm_softc *sc)
5750 {
5751 	int i;
5752 
5753 	KASSERT(WM_CORE_LOCKED(sc));
5754 
5755 	/* Must unset stopping flags in ascending order. */
5756 	for (i = 0; i < sc->sc_nqueues; i++) {
5757 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5758 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5759 
5760 		mutex_enter(txq->txq_lock);
5761 		txq->txq_stopping = false;
5762 		mutex_exit(txq->txq_lock);
5763 
5764 		mutex_enter(rxq->rxq_lock);
5765 		rxq->rxq_stopping = false;
5766 		mutex_exit(rxq->rxq_lock);
5767 	}
5768 
5769 	sc->sc_core_stopping = false;
5770 }
5771 
5772 static void
5773 wm_set_stopping_flags(struct wm_softc *sc)
5774 {
5775 	int i;
5776 
5777 	KASSERT(WM_CORE_LOCKED(sc));
5778 
5779 	sc->sc_core_stopping = true;
5780 
5781 	/* Must set stopping flags in ascending order. */
5782 	for (i = 0; i < sc->sc_nqueues; i++) {
5783 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5784 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5785 
5786 		mutex_enter(rxq->rxq_lock);
5787 		rxq->rxq_stopping = true;
5788 		mutex_exit(rxq->rxq_lock);
5789 
5790 		mutex_enter(txq->txq_lock);
5791 		txq->txq_stopping = true;
5792 		mutex_exit(txq->txq_lock);
5793 	}
5794 }
5795 
5796 /*
5797  * Write interrupt interval value to ITR or EITR
5798  */
5799 static void
5800 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
5801 {
5802 
5803 	if (!wmq->wmq_set_itr)
5804 		return;
5805 
5806 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5807 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
5808 
5809 		/*
5810 		 * 82575 doesn't have CNT_INGR field.
5811 		 * So, overwrite counter field by software.
5812 		 */
5813 		if (sc->sc_type == WM_T_82575)
5814 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
5815 		else
5816 			eitr |= EITR_CNT_INGR;
5817 
5818 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
5819 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
5820 		/*
5821 		 * 82574 has both ITR and EITR. SET EITR when we use
5822 		 * the multi queue function with MSI-X.
5823 		 */
5824 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
5825 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
5826 	} else {
5827 		KASSERT(wmq->wmq_id == 0);
5828 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
5829 	}
5830 
5831 	wmq->wmq_set_itr = false;
5832 }
5833 
5834 /*
5835  * TODO
5836  * Below dynamic calculation of itr is almost the same as linux igb,
5837  * however it does not fit to wm(4). So, we will have been disable AIM
5838  * until we will find appropriate calculation of itr.
5839  */
5840 /*
5841  * calculate interrupt interval value to be going to write register in
5842  * wm_itrs_writereg(). This function does not write ITR/EITR register.
5843  */
5844 static void
5845 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
5846 {
5847 #ifdef NOTYET
5848 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
5849 	struct wm_txqueue *txq = &wmq->wmq_txq;
5850 	uint32_t avg_size = 0;
5851 	uint32_t new_itr;
5852 
5853 	if (rxq->rxq_packets)
5854 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
5855 	if (txq->txq_packets)
5856 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
5857 
5858 	if (avg_size == 0) {
5859 		new_itr = 450; /* restore default value */
5860 		goto out;
5861 	}
5862 
5863 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
5864 	avg_size += 24;
5865 
5866 	/* Don't starve jumbo frames */
5867 	avg_size = uimin(avg_size, 3000);
5868 
5869 	/* Give a little boost to mid-size frames */
5870 	if ((avg_size > 300) && (avg_size < 1200))
5871 		new_itr = avg_size / 3;
5872 	else
5873 		new_itr = avg_size / 2;
5874 
5875 out:
5876 	/*
5877 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
5878 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
5879 	 */
5880 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
5881 		new_itr *= 4;
5882 
5883 	if (new_itr != wmq->wmq_itr) {
5884 		wmq->wmq_itr = new_itr;
5885 		wmq->wmq_set_itr = true;
5886 	} else
5887 		wmq->wmq_set_itr = false;
5888 
5889 	rxq->rxq_packets = 0;
5890 	rxq->rxq_bytes = 0;
5891 	txq->txq_packets = 0;
5892 	txq->txq_bytes = 0;
5893 #endif
5894 }
5895 
5896 static void
5897 wm_init_sysctls(struct wm_softc *sc)
5898 {
5899 	struct sysctllog **log;
5900 	const struct sysctlnode *rnode, *qnode, *cnode;
5901 	int i, rv;
5902 	const char *dvname;
5903 
5904 	log = &sc->sc_sysctllog;
5905 	dvname = device_xname(sc->sc_dev);
5906 
5907 	rv = sysctl_createv(log, 0, NULL, &rnode,
5908 	    0, CTLTYPE_NODE, dvname,
5909 	    SYSCTL_DESCR("wm information and settings"),
5910 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
5911 	if (rv != 0)
5912 		goto err;
5913 
5914 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
5915 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
5916 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
5917 	if (rv != 0)
5918 		goto teardown;
5919 
5920 	for (i = 0; i < sc->sc_nqueues; i++) {
5921 		struct wm_queue *wmq = &sc->sc_queue[i];
5922 		struct wm_txqueue *txq = &wmq->wmq_txq;
5923 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
5924 
5925 		snprintf(sc->sc_queue[i].sysctlname,
5926 		    sizeof(sc->sc_queue[i].sysctlname), "q%d", i);
5927 
5928 		if (sysctl_createv(log, 0, &rnode, &qnode,
5929 		    0, CTLTYPE_NODE,
5930 		    sc->sc_queue[i].sysctlname, SYSCTL_DESCR("Queue Name"),
5931 		    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL) != 0)
5932 			break;
5933 		if (sysctl_createv(log, 0, &qnode, &cnode,
5934 		    CTLFLAG_READONLY, CTLTYPE_INT,
5935 		    "txq_free", SYSCTL_DESCR("TX queue free"),
5936 		    NULL, 0, &txq->txq_free,
5937 		    0, CTL_CREATE, CTL_EOL) != 0)
5938 			break;
5939 		if (sysctl_createv(log, 0, &qnode, &cnode,
5940 		    CTLFLAG_READONLY, CTLTYPE_INT,
5941 		    "txq_next", SYSCTL_DESCR("TX queue next"),
5942 		    NULL, 0, &txq->txq_next,
5943 		    0, CTL_CREATE, CTL_EOL) != 0)
5944 			break;
5945 
5946 		if (sysctl_createv(log, 0, &qnode, &cnode,
5947 		    CTLFLAG_READONLY, CTLTYPE_INT,
5948 		    "rxq_ptr", SYSCTL_DESCR("RX queue pointer"),
5949 		    NULL, 0, &rxq->rxq_ptr,
5950 		    0, CTL_CREATE, CTL_EOL) != 0)
5951 			break;
5952 	}
5953 
5954 #ifdef WM_DEBUG
5955 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
5956 	    CTLTYPE_INT, "debug_flags",
5957 	    SYSCTL_DESCR(
5958 		    "Debug flags:\n"	\
5959 		    "\t0x01 LINK\n"	\
5960 		    "\t0x02 TX\n"	\
5961 		    "\t0x04 RX\n"	\
5962 		    "\t0x08 GMII\n"	\
5963 		    "\t0x10 MANAGE\n"	\
5964 		    "\t0x20 NVM\n"	\
5965 		    "\t0x40 INIT\n"	\
5966 		    "\t0x80 LOCK"),
5967 	    wm_sysctl_debug, 0, (void *)sc, 0, CTL_CREATE, CTL_EOL);
5968 	if (rv != 0)
5969 		goto teardown;
5970 #endif
5971 
5972 	return;
5973 
5974 teardown:
5975 	sysctl_teardown(log);
5976 err:
5977 	sc->sc_sysctllog = NULL;
5978 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
5979 	    __func__, rv);
5980 }
5981 
5982 /*
5983  * wm_init:		[ifnet interface function]
5984  *
5985  *	Initialize the interface.
5986  */
5987 static int
5988 wm_init(struct ifnet *ifp)
5989 {
5990 	struct wm_softc *sc = ifp->if_softc;
5991 	int ret;
5992 
5993 	WM_CORE_LOCK(sc);
5994 	ret = wm_init_locked(ifp);
5995 	WM_CORE_UNLOCK(sc);
5996 
5997 	return ret;
5998 }
5999 
6000 static int
6001 wm_init_locked(struct ifnet *ifp)
6002 {
6003 	struct wm_softc *sc = ifp->if_softc;
6004 	struct ethercom *ec = &sc->sc_ethercom;
6005 	int i, j, trynum, error = 0;
6006 	uint32_t reg, sfp_mask = 0;
6007 
6008 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
6009 		device_xname(sc->sc_dev), __func__));
6010 	KASSERT(WM_CORE_LOCKED(sc));
6011 
6012 	/*
6013 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
6014 	 * There is a small but measurable benefit to avoiding the adjusment
6015 	 * of the descriptor so that the headers are aligned, for normal mtu,
6016 	 * on such platforms.  One possibility is that the DMA itself is
6017 	 * slightly more efficient if the front of the entire packet (instead
6018 	 * of the front of the headers) is aligned.
6019 	 *
6020 	 * Note we must always set align_tweak to 0 if we are using
6021 	 * jumbo frames.
6022 	 */
6023 #ifdef __NO_STRICT_ALIGNMENT
6024 	sc->sc_align_tweak = 0;
6025 #else
6026 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
6027 		sc->sc_align_tweak = 0;
6028 	else
6029 		sc->sc_align_tweak = 2;
6030 #endif /* __NO_STRICT_ALIGNMENT */
6031 
6032 	/* Cancel any pending I/O. */
6033 	wm_stop_locked(ifp, false, false);
6034 
6035 	/* Update statistics before reset */
6036 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
6037 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
6038 
6039 	/* PCH_SPT hardware workaround */
6040 	if (sc->sc_type == WM_T_PCH_SPT)
6041 		wm_flush_desc_rings(sc);
6042 
6043 	/* Reset the chip to a known state. */
6044 	wm_reset(sc);
6045 
6046 	/*
6047 	 * AMT based hardware can now take control from firmware
6048 	 * Do this after reset.
6049 	 */
6050 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
6051 		wm_get_hw_control(sc);
6052 
6053 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
6054 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
6055 		wm_legacy_irq_quirk_spt(sc);
6056 
6057 	/* Init hardware bits */
6058 	wm_initialize_hardware_bits(sc);
6059 
6060 	/* Reset the PHY. */
6061 	if (sc->sc_flags & WM_F_HAS_MII)
6062 		wm_gmii_reset(sc);
6063 
6064 	if (sc->sc_type >= WM_T_ICH8) {
6065 		reg = CSR_READ(sc, WMREG_GCR);
6066 		/*
6067 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
6068 		 * default after reset.
6069 		 */
6070 		if (sc->sc_type == WM_T_ICH8)
6071 			reg |= GCR_NO_SNOOP_ALL;
6072 		else
6073 			reg &= ~GCR_NO_SNOOP_ALL;
6074 		CSR_WRITE(sc, WMREG_GCR, reg);
6075 	}
6076 
6077 	if ((sc->sc_type >= WM_T_ICH8)
6078 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
6079 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
6080 
6081 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
6082 		reg |= CTRL_EXT_RO_DIS;
6083 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6084 	}
6085 
6086 	/* Calculate (E)ITR value */
6087 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
6088 		/*
6089 		 * For NEWQUEUE's EITR (except for 82575).
6090 		 * 82575's EITR should be set same throttling value as other
6091 		 * old controllers' ITR because the interrupt/sec calculation
6092 		 * is the same, that is, 1,000,000,000 / (N * 256).
6093 		 *
6094 		 * 82574's EITR should be set same throttling value as ITR.
6095 		 *
6096 		 * For N interrupts/sec, set this value to:
6097 		 * 1,000,000 / N in contrast to ITR throttoling value.
6098 		 */
6099 		sc->sc_itr_init = 450;
6100 	} else if (sc->sc_type >= WM_T_82543) {
6101 		/*
6102 		 * Set up the interrupt throttling register (units of 256ns)
6103 		 * Note that a footnote in Intel's documentation says this
6104 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
6105 		 * or 10Mbit mode.  Empirically, it appears to be the case
6106 		 * that that is also true for the 1024ns units of the other
6107 		 * interrupt-related timer registers -- so, really, we ought
6108 		 * to divide this value by 4 when the link speed is low.
6109 		 *
6110 		 * XXX implement this division at link speed change!
6111 		 */
6112 
6113 		/*
6114 		 * For N interrupts/sec, set this value to:
6115 		 * 1,000,000,000 / (N * 256).  Note that we set the
6116 		 * absolute and packet timer values to this value
6117 		 * divided by 4 to get "simple timer" behavior.
6118 		 */
6119 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
6120 	}
6121 
6122 	error = wm_init_txrx_queues(sc);
6123 	if (error)
6124 		goto out;
6125 
6126 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
6127 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
6128 	    (sc->sc_type >= WM_T_82575))
6129 		wm_serdes_power_up_link_82575(sc);
6130 
6131 	/* Clear out the VLAN table -- we don't use it (yet). */
6132 	CSR_WRITE(sc, WMREG_VET, 0);
6133 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
6134 		trynum = 10; /* Due to hw errata */
6135 	else
6136 		trynum = 1;
6137 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
6138 		for (j = 0; j < trynum; j++)
6139 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
6140 
6141 	/*
6142 	 * Set up flow-control parameters.
6143 	 *
6144 	 * XXX Values could probably stand some tuning.
6145 	 */
6146 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
6147 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
6148 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
6149 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
6150 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
6151 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
6152 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
6153 	}
6154 
6155 	sc->sc_fcrtl = FCRTL_DFLT;
6156 	if (sc->sc_type < WM_T_82543) {
6157 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
6158 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
6159 	} else {
6160 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
6161 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
6162 	}
6163 
6164 	if (sc->sc_type == WM_T_80003)
6165 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
6166 	else
6167 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
6168 
6169 	/* Writes the control register. */
6170 	wm_set_vlan(sc);
6171 
6172 	if (sc->sc_flags & WM_F_HAS_MII) {
6173 		uint16_t kmreg;
6174 
6175 		switch (sc->sc_type) {
6176 		case WM_T_80003:
6177 		case WM_T_ICH8:
6178 		case WM_T_ICH9:
6179 		case WM_T_ICH10:
6180 		case WM_T_PCH:
6181 		case WM_T_PCH2:
6182 		case WM_T_PCH_LPT:
6183 		case WM_T_PCH_SPT:
6184 		case WM_T_PCH_CNP:
6185 			/*
6186 			 * Set the mac to wait the maximum time between each
6187 			 * iteration and increase the max iterations when
6188 			 * polling the phy; this fixes erroneous timeouts at
6189 			 * 10Mbps.
6190 			 */
6191 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
6192 			    0xFFFF);
6193 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6194 			    &kmreg);
6195 			kmreg |= 0x3F;
6196 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
6197 			    kmreg);
6198 			break;
6199 		default:
6200 			break;
6201 		}
6202 
6203 		if (sc->sc_type == WM_T_80003) {
6204 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
6205 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
6206 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6207 
6208 			/* Bypass RX and TX FIFO's */
6209 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
6210 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
6211 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
6212 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
6213 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
6214 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
6215 		}
6216 	}
6217 #if 0
6218 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
6219 #endif
6220 
6221 	/* Set up checksum offload parameters. */
6222 	reg = CSR_READ(sc, WMREG_RXCSUM);
6223 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
6224 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
6225 		reg |= RXCSUM_IPOFL;
6226 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
6227 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
6228 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
6229 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
6230 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
6231 
6232 	/* Set registers about MSI-X */
6233 	if (wm_is_using_msix(sc)) {
6234 		uint32_t ivar, qintr_idx;
6235 		struct wm_queue *wmq;
6236 		unsigned int qid;
6237 
6238 		if (sc->sc_type == WM_T_82575) {
6239 			/* Interrupt control */
6240 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
6241 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
6242 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6243 
6244 			/* TX and RX */
6245 			for (i = 0; i < sc->sc_nqueues; i++) {
6246 				wmq = &sc->sc_queue[i];
6247 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
6248 				    EITR_TX_QUEUE(wmq->wmq_id)
6249 				    | EITR_RX_QUEUE(wmq->wmq_id));
6250 			}
6251 			/* Link status */
6252 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
6253 			    EITR_OTHER);
6254 		} else if (sc->sc_type == WM_T_82574) {
6255 			/* Interrupt control */
6256 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
6257 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
6258 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6259 
6260 			/*
6261 			 * Workaround issue with spurious interrupts
6262 			 * in MSI-X mode.
6263 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
6264 			 * initialized yet. So re-initialize WMREG_RFCTL here.
6265 			 */
6266 			reg = CSR_READ(sc, WMREG_RFCTL);
6267 			reg |= WMREG_RFCTL_ACKDIS;
6268 			CSR_WRITE(sc, WMREG_RFCTL, reg);
6269 
6270 			ivar = 0;
6271 			/* TX and RX */
6272 			for (i = 0; i < sc->sc_nqueues; i++) {
6273 				wmq = &sc->sc_queue[i];
6274 				qid = wmq->wmq_id;
6275 				qintr_idx = wmq->wmq_intr_idx;
6276 
6277 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6278 				    IVAR_TX_MASK_Q_82574(qid));
6279 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
6280 				    IVAR_RX_MASK_Q_82574(qid));
6281 			}
6282 			/* Link status */
6283 			ivar |= __SHIFTIN((IVAR_VALID_82574
6284 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
6285 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
6286 		} else {
6287 			/* Interrupt control */
6288 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
6289 			    | GPIE_EIAME | GPIE_PBA);
6290 
6291 			switch (sc->sc_type) {
6292 			case WM_T_82580:
6293 			case WM_T_I350:
6294 			case WM_T_I354:
6295 			case WM_T_I210:
6296 			case WM_T_I211:
6297 				/* TX and RX */
6298 				for (i = 0; i < sc->sc_nqueues; i++) {
6299 					wmq = &sc->sc_queue[i];
6300 					qid = wmq->wmq_id;
6301 					qintr_idx = wmq->wmq_intr_idx;
6302 
6303 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
6304 					ivar &= ~IVAR_TX_MASK_Q(qid);
6305 					ivar |= __SHIFTIN((qintr_idx
6306 						| IVAR_VALID),
6307 					    IVAR_TX_MASK_Q(qid));
6308 					ivar &= ~IVAR_RX_MASK_Q(qid);
6309 					ivar |= __SHIFTIN((qintr_idx
6310 						| IVAR_VALID),
6311 					    IVAR_RX_MASK_Q(qid));
6312 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
6313 				}
6314 				break;
6315 			case WM_T_82576:
6316 				/* TX and RX */
6317 				for (i = 0; i < sc->sc_nqueues; i++) {
6318 					wmq = &sc->sc_queue[i];
6319 					qid = wmq->wmq_id;
6320 					qintr_idx = wmq->wmq_intr_idx;
6321 
6322 					ivar = CSR_READ(sc,
6323 					    WMREG_IVAR_Q_82576(qid));
6324 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
6325 					ivar |= __SHIFTIN((qintr_idx
6326 						| IVAR_VALID),
6327 					    IVAR_TX_MASK_Q_82576(qid));
6328 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
6329 					ivar |= __SHIFTIN((qintr_idx
6330 						| IVAR_VALID),
6331 					    IVAR_RX_MASK_Q_82576(qid));
6332 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
6333 					    ivar);
6334 				}
6335 				break;
6336 			default:
6337 				break;
6338 			}
6339 
6340 			/* Link status */
6341 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
6342 			    IVAR_MISC_OTHER);
6343 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
6344 		}
6345 
6346 		if (wm_is_using_multiqueue(sc)) {
6347 			wm_init_rss(sc);
6348 
6349 			/*
6350 			** NOTE: Receive Full-Packet Checksum Offload
6351 			** is mutually exclusive with Multiqueue. However
6352 			** this is not the same as TCP/IP checksums which
6353 			** still work.
6354 			*/
6355 			reg = CSR_READ(sc, WMREG_RXCSUM);
6356 			reg |= RXCSUM_PCSD;
6357 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
6358 		}
6359 	}
6360 
6361 	/* Set up the interrupt registers. */
6362 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
6363 
6364 	/* Enable SFP module insertion interrupt if it's required */
6365 	if ((sc->sc_flags & WM_F_SFP) != 0) {
6366 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
6367 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6368 		sfp_mask = ICR_GPI(0);
6369 	}
6370 
6371 	if (wm_is_using_msix(sc)) {
6372 		uint32_t mask;
6373 		struct wm_queue *wmq;
6374 
6375 		switch (sc->sc_type) {
6376 		case WM_T_82574:
6377 			mask = 0;
6378 			for (i = 0; i < sc->sc_nqueues; i++) {
6379 				wmq = &sc->sc_queue[i];
6380 				mask |= ICR_TXQ(wmq->wmq_id);
6381 				mask |= ICR_RXQ(wmq->wmq_id);
6382 			}
6383 			mask |= ICR_OTHER;
6384 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
6385 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
6386 			break;
6387 		default:
6388 			if (sc->sc_type == WM_T_82575) {
6389 				mask = 0;
6390 				for (i = 0; i < sc->sc_nqueues; i++) {
6391 					wmq = &sc->sc_queue[i];
6392 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
6393 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
6394 				}
6395 				mask |= EITR_OTHER;
6396 			} else {
6397 				mask = 0;
6398 				for (i = 0; i < sc->sc_nqueues; i++) {
6399 					wmq = &sc->sc_queue[i];
6400 					mask |= 1 << wmq->wmq_intr_idx;
6401 				}
6402 				mask |= 1 << sc->sc_link_intr_idx;
6403 			}
6404 			CSR_WRITE(sc, WMREG_EIAC, mask);
6405 			CSR_WRITE(sc, WMREG_EIAM, mask);
6406 			CSR_WRITE(sc, WMREG_EIMS, mask);
6407 
6408 			/* For other interrupts */
6409 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
6410 			break;
6411 		}
6412 	} else {
6413 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
6414 		    ICR_RXO | ICR_RXT0 | sfp_mask;
6415 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
6416 	}
6417 
6418 	/* Set up the inter-packet gap. */
6419 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6420 
6421 	if (sc->sc_type >= WM_T_82543) {
6422 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6423 			struct wm_queue *wmq = &sc->sc_queue[qidx];
6424 			wm_itrs_writereg(sc, wmq);
6425 		}
6426 		/*
6427 		 * Link interrupts occur much less than TX
6428 		 * interrupts and RX interrupts. So, we don't
6429 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
6430 		 * FreeBSD's if_igb.
6431 		 */
6432 	}
6433 
6434 	/* Set the VLAN ethernetype. */
6435 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
6436 
6437 	/*
6438 	 * Set up the transmit control register; we start out with
6439 	 * a collision distance suitable for FDX, but update it whe
6440 	 * we resolve the media type.
6441 	 */
6442 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
6443 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
6444 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6445 	if (sc->sc_type >= WM_T_82571)
6446 		sc->sc_tctl |= TCTL_MULR;
6447 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6448 
6449 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6450 		/* Write TDT after TCTL.EN is set. See the document. */
6451 		CSR_WRITE(sc, WMREG_TDT(0), 0);
6452 	}
6453 
6454 	if (sc->sc_type == WM_T_80003) {
6455 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
6456 		reg &= ~TCTL_EXT_GCEX_MASK;
6457 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
6458 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
6459 	}
6460 
6461 	/* Set the media. */
6462 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
6463 		goto out;
6464 
6465 	/* Configure for OS presence */
6466 	wm_init_manageability(sc);
6467 
6468 	/*
6469 	 * Set up the receive control register; we actually program the
6470 	 * register when we set the receive filter. Use multicast address
6471 	 * offset type 0.
6472 	 *
6473 	 * Only the i82544 has the ability to strip the incoming CRC, so we
6474 	 * don't enable that feature.
6475 	 */
6476 	sc->sc_mchash_type = 0;
6477 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
6478 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
6479 
6480 	/* 82574 use one buffer extended Rx descriptor. */
6481 	if (sc->sc_type == WM_T_82574)
6482 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
6483 
6484 	if ((sc->sc_flags & WM_F_CRC_STRIP) != 0)
6485 		sc->sc_rctl |= RCTL_SECRC;
6486 
6487 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
6488 	    && (ifp->if_mtu > ETHERMTU)) {
6489 		sc->sc_rctl |= RCTL_LPE;
6490 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6491 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
6492 	}
6493 
6494 	if (MCLBYTES == 2048)
6495 		sc->sc_rctl |= RCTL_2k;
6496 	else {
6497 		if (sc->sc_type >= WM_T_82543) {
6498 			switch (MCLBYTES) {
6499 			case 4096:
6500 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
6501 				break;
6502 			case 8192:
6503 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
6504 				break;
6505 			case 16384:
6506 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
6507 				break;
6508 			default:
6509 				panic("wm_init: MCLBYTES %d unsupported",
6510 				    MCLBYTES);
6511 				break;
6512 			}
6513 		} else
6514 			panic("wm_init: i82542 requires MCLBYTES = 2048");
6515 	}
6516 
6517 	/* Enable ECC */
6518 	switch (sc->sc_type) {
6519 	case WM_T_82571:
6520 		reg = CSR_READ(sc, WMREG_PBA_ECC);
6521 		reg |= PBA_ECC_CORR_EN;
6522 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
6523 		break;
6524 	case WM_T_PCH_LPT:
6525 	case WM_T_PCH_SPT:
6526 	case WM_T_PCH_CNP:
6527 		reg = CSR_READ(sc, WMREG_PBECCSTS);
6528 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
6529 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
6530 
6531 		sc->sc_ctrl |= CTRL_MEHE;
6532 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6533 		break;
6534 	default:
6535 		break;
6536 	}
6537 
6538 	/*
6539 	 * Set the receive filter.
6540 	 *
6541 	 * For 82575 and 82576, the RX descriptors must be initialized after
6542 	 * the setting of RCTL.EN in wm_set_filter()
6543 	 */
6544 	wm_set_filter(sc);
6545 
6546 	/* On 575 and later set RDT only if RX enabled */
6547 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6548 		int qidx;
6549 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6550 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
6551 			for (i = 0; i < WM_NRXDESC; i++) {
6552 				mutex_enter(rxq->rxq_lock);
6553 				wm_init_rxdesc(rxq, i);
6554 				mutex_exit(rxq->rxq_lock);
6555 
6556 			}
6557 		}
6558 	}
6559 
6560 	wm_unset_stopping_flags(sc);
6561 
6562 	/* Start the one second link check clock. */
6563 	callout_schedule(&sc->sc_tick_ch, hz);
6564 
6565 	/* ...all done! */
6566 	ifp->if_flags |= IFF_RUNNING;
6567 
6568  out:
6569 	/* Save last flags for the callback */
6570 	sc->sc_if_flags = ifp->if_flags;
6571 	sc->sc_ec_capenable = ec->ec_capenable;
6572 	if (error)
6573 		log(LOG_ERR, "%s: interface not running\n",
6574 		    device_xname(sc->sc_dev));
6575 	return error;
6576 }
6577 
6578 /*
6579  * wm_stop:		[ifnet interface function]
6580  *
6581  *	Stop transmission on the interface.
6582  */
6583 static void
6584 wm_stop(struct ifnet *ifp, int disable)
6585 {
6586 	struct wm_softc *sc = ifp->if_softc;
6587 
6588 	ASSERT_SLEEPABLE();
6589 
6590 	WM_CORE_LOCK(sc);
6591 	wm_stop_locked(ifp, disable ? true : false, true);
6592 	WM_CORE_UNLOCK(sc);
6593 
6594 	/*
6595 	 * After wm_set_stopping_flags(), it is guaranteed
6596 	 * wm_handle_queue_work() does not call workqueue_enqueue().
6597 	 * However, workqueue_wait() cannot call in wm_stop_locked()
6598 	 * because it can sleep...
6599 	 * so, call workqueue_wait() here.
6600 	 */
6601 	for (int i = 0; i < sc->sc_nqueues; i++)
6602 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
6603 }
6604 
6605 static void
6606 wm_stop_locked(struct ifnet *ifp, bool disable, bool wait)
6607 {
6608 	struct wm_softc *sc = ifp->if_softc;
6609 	struct wm_txsoft *txs;
6610 	int i, qidx;
6611 
6612 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
6613 		device_xname(sc->sc_dev), __func__));
6614 	KASSERT(WM_CORE_LOCKED(sc));
6615 
6616 	wm_set_stopping_flags(sc);
6617 
6618 	if (sc->sc_flags & WM_F_HAS_MII) {
6619 		/* Down the MII. */
6620 		mii_down(&sc->sc_mii);
6621 	} else {
6622 #if 0
6623 		/* Should we clear PHY's status properly? */
6624 		wm_reset(sc);
6625 #endif
6626 	}
6627 
6628 	/* Stop the transmit and receive processes. */
6629 	CSR_WRITE(sc, WMREG_TCTL, 0);
6630 	CSR_WRITE(sc, WMREG_RCTL, 0);
6631 	sc->sc_rctl &= ~RCTL_EN;
6632 
6633 	/*
6634 	 * Clear the interrupt mask to ensure the device cannot assert its
6635 	 * interrupt line.
6636 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
6637 	 * service any currently pending or shared interrupt.
6638 	 */
6639 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
6640 	sc->sc_icr = 0;
6641 	if (wm_is_using_msix(sc)) {
6642 		if (sc->sc_type != WM_T_82574) {
6643 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
6644 			CSR_WRITE(sc, WMREG_EIAC, 0);
6645 		} else
6646 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
6647 	}
6648 
6649 	/*
6650 	 * Stop callouts after interrupts are disabled; if we have
6651 	 * to wait for them, we will be releasing the CORE_LOCK
6652 	 * briefly, which will unblock interrupts on the current CPU.
6653 	 */
6654 
6655 	/* Stop the one second clock. */
6656 	if (wait)
6657 		callout_halt(&sc->sc_tick_ch, sc->sc_core_lock);
6658 	else
6659 		callout_stop(&sc->sc_tick_ch);
6660 
6661 	/* Stop the 82547 Tx FIFO stall check timer. */
6662 	if (sc->sc_type == WM_T_82547) {
6663 		if (wait)
6664 			callout_halt(&sc->sc_txfifo_ch, sc->sc_core_lock);
6665 		else
6666 			callout_stop(&sc->sc_txfifo_ch);
6667 	}
6668 
6669 	/* Release any queued transmit buffers. */
6670 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6671 		struct wm_queue *wmq = &sc->sc_queue[qidx];
6672 		struct wm_txqueue *txq = &wmq->wmq_txq;
6673 		struct mbuf *m;
6674 
6675 		mutex_enter(txq->txq_lock);
6676 		txq->txq_sending = false; /* Ensure watchdog disabled */
6677 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6678 			txs = &txq->txq_soft[i];
6679 			if (txs->txs_mbuf != NULL) {
6680 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
6681 				m_freem(txs->txs_mbuf);
6682 				txs->txs_mbuf = NULL;
6683 			}
6684 		}
6685 		/* Drain txq_interq */
6686 		while ((m = pcq_get(txq->txq_interq)) != NULL)
6687 			m_freem(m);
6688 		mutex_exit(txq->txq_lock);
6689 	}
6690 
6691 	/* Mark the interface as down and cancel the watchdog timer. */
6692 	ifp->if_flags &= ~IFF_RUNNING;
6693 
6694 	if (disable) {
6695 		for (i = 0; i < sc->sc_nqueues; i++) {
6696 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6697 			mutex_enter(rxq->rxq_lock);
6698 			wm_rxdrain(rxq);
6699 			mutex_exit(rxq->rxq_lock);
6700 		}
6701 	}
6702 
6703 #if 0 /* notyet */
6704 	if (sc->sc_type >= WM_T_82544)
6705 		CSR_WRITE(sc, WMREG_WUC, 0);
6706 #endif
6707 }
6708 
6709 static void
6710 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
6711 {
6712 	struct mbuf *m;
6713 	int i;
6714 
6715 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
6716 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
6717 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
6718 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
6719 		    m->m_data, m->m_len, m->m_flags);
6720 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
6721 	    i, i == 1 ? "" : "s");
6722 }
6723 
6724 /*
6725  * wm_82547_txfifo_stall:
6726  *
6727  *	Callout used to wait for the 82547 Tx FIFO to drain,
6728  *	reset the FIFO pointers, and restart packet transmission.
6729  */
6730 static void
6731 wm_82547_txfifo_stall(void *arg)
6732 {
6733 	struct wm_softc *sc = arg;
6734 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6735 
6736 	mutex_enter(txq->txq_lock);
6737 
6738 	if (txq->txq_stopping)
6739 		goto out;
6740 
6741 	if (txq->txq_fifo_stall) {
6742 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
6743 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
6744 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
6745 			/*
6746 			 * Packets have drained.  Stop transmitter, reset
6747 			 * FIFO pointers, restart transmitter, and kick
6748 			 * the packet queue.
6749 			 */
6750 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
6751 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
6752 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
6753 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
6754 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
6755 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
6756 			CSR_WRITE(sc, WMREG_TCTL, tctl);
6757 			CSR_WRITE_FLUSH(sc);
6758 
6759 			txq->txq_fifo_head = 0;
6760 			txq->txq_fifo_stall = 0;
6761 			wm_start_locked(&sc->sc_ethercom.ec_if);
6762 		} else {
6763 			/*
6764 			 * Still waiting for packets to drain; try again in
6765 			 * another tick.
6766 			 */
6767 			callout_schedule(&sc->sc_txfifo_ch, 1);
6768 		}
6769 	}
6770 
6771 out:
6772 	mutex_exit(txq->txq_lock);
6773 }
6774 
6775 /*
6776  * wm_82547_txfifo_bugchk:
6777  *
6778  *	Check for bug condition in the 82547 Tx FIFO.  We need to
6779  *	prevent enqueueing a packet that would wrap around the end
6780  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
6781  *
6782  *	We do this by checking the amount of space before the end
6783  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
6784  *	the Tx FIFO, wait for all remaining packets to drain, reset
6785  *	the internal FIFO pointers to the beginning, and restart
6786  *	transmission on the interface.
6787  */
6788 #define	WM_FIFO_HDR		0x10
6789 #define	WM_82547_PAD_LEN	0x3e0
6790 static int
6791 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
6792 {
6793 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6794 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
6795 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
6796 
6797 	/* Just return if already stalled. */
6798 	if (txq->txq_fifo_stall)
6799 		return 1;
6800 
6801 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
6802 		/* Stall only occurs in half-duplex mode. */
6803 		goto send_packet;
6804 	}
6805 
6806 	if (len >= WM_82547_PAD_LEN + space) {
6807 		txq->txq_fifo_stall = 1;
6808 		callout_schedule(&sc->sc_txfifo_ch, 1);
6809 		return 1;
6810 	}
6811 
6812  send_packet:
6813 	txq->txq_fifo_head += len;
6814 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
6815 		txq->txq_fifo_head -= txq->txq_fifo_size;
6816 
6817 	return 0;
6818 }
6819 
6820 static int
6821 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
6822 {
6823 	int error;
6824 
6825 	/*
6826 	 * Allocate the control data structures, and create and load the
6827 	 * DMA map for it.
6828 	 *
6829 	 * NOTE: All Tx descriptors must be in the same 4G segment of
6830 	 * memory.  So must Rx descriptors.  We simplify by allocating
6831 	 * both sets within the same 4G segment.
6832 	 */
6833 	if (sc->sc_type < WM_T_82544)
6834 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
6835 	else
6836 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
6837 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6838 		txq->txq_descsize = sizeof(nq_txdesc_t);
6839 	else
6840 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
6841 
6842 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
6843 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
6844 		    1, &txq->txq_desc_rseg, 0)) != 0) {
6845 		aprint_error_dev(sc->sc_dev,
6846 		    "unable to allocate TX control data, error = %d\n",
6847 		    error);
6848 		goto fail_0;
6849 	}
6850 
6851 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
6852 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
6853 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
6854 		aprint_error_dev(sc->sc_dev,
6855 		    "unable to map TX control data, error = %d\n", error);
6856 		goto fail_1;
6857 	}
6858 
6859 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
6860 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
6861 		aprint_error_dev(sc->sc_dev,
6862 		    "unable to create TX control data DMA map, error = %d\n",
6863 		    error);
6864 		goto fail_2;
6865 	}
6866 
6867 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
6868 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
6869 		aprint_error_dev(sc->sc_dev,
6870 		    "unable to load TX control data DMA map, error = %d\n",
6871 		    error);
6872 		goto fail_3;
6873 	}
6874 
6875 	return 0;
6876 
6877  fail_3:
6878 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
6879  fail_2:
6880 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
6881 	    WM_TXDESCS_SIZE(txq));
6882  fail_1:
6883 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
6884  fail_0:
6885 	return error;
6886 }
6887 
6888 static void
6889 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
6890 {
6891 
6892 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
6893 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
6894 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
6895 	    WM_TXDESCS_SIZE(txq));
6896 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
6897 }
6898 
6899 static int
6900 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
6901 {
6902 	int error;
6903 	size_t rxq_descs_size;
6904 
6905 	/*
6906 	 * Allocate the control data structures, and create and load the
6907 	 * DMA map for it.
6908 	 *
6909 	 * NOTE: All Tx descriptors must be in the same 4G segment of
6910 	 * memory.  So must Rx descriptors.  We simplify by allocating
6911 	 * both sets within the same 4G segment.
6912 	 */
6913 	rxq->rxq_ndesc = WM_NRXDESC;
6914 	if (sc->sc_type == WM_T_82574)
6915 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
6916 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6917 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
6918 	else
6919 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
6920 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
6921 
6922 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
6923 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
6924 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
6925 		aprint_error_dev(sc->sc_dev,
6926 		    "unable to allocate RX control data, error = %d\n",
6927 		    error);
6928 		goto fail_0;
6929 	}
6930 
6931 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
6932 		    rxq->rxq_desc_rseg, rxq_descs_size,
6933 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
6934 		aprint_error_dev(sc->sc_dev,
6935 		    "unable to map RX control data, error = %d\n", error);
6936 		goto fail_1;
6937 	}
6938 
6939 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
6940 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
6941 		aprint_error_dev(sc->sc_dev,
6942 		    "unable to create RX control data DMA map, error = %d\n",
6943 		    error);
6944 		goto fail_2;
6945 	}
6946 
6947 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
6948 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
6949 		aprint_error_dev(sc->sc_dev,
6950 		    "unable to load RX control data DMA map, error = %d\n",
6951 		    error);
6952 		goto fail_3;
6953 	}
6954 
6955 	return 0;
6956 
6957  fail_3:
6958 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
6959  fail_2:
6960 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
6961 	    rxq_descs_size);
6962  fail_1:
6963 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
6964  fail_0:
6965 	return error;
6966 }
6967 
6968 static void
6969 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
6970 {
6971 
6972 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
6973 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
6974 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
6975 	    rxq->rxq_descsize * rxq->rxq_ndesc);
6976 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
6977 }
6978 
6979 
6980 static int
6981 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
6982 {
6983 	int i, error;
6984 
6985 	/* Create the transmit buffer DMA maps. */
6986 	WM_TXQUEUELEN(txq) =
6987 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
6988 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
6989 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6990 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
6991 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
6992 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
6993 			aprint_error_dev(sc->sc_dev,
6994 			    "unable to create Tx DMA map %d, error = %d\n",
6995 			    i, error);
6996 			goto fail;
6997 		}
6998 	}
6999 
7000 	return 0;
7001 
7002  fail:
7003 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7004 		if (txq->txq_soft[i].txs_dmamap != NULL)
7005 			bus_dmamap_destroy(sc->sc_dmat,
7006 			    txq->txq_soft[i].txs_dmamap);
7007 	}
7008 	return error;
7009 }
7010 
7011 static void
7012 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
7013 {
7014 	int i;
7015 
7016 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7017 		if (txq->txq_soft[i].txs_dmamap != NULL)
7018 			bus_dmamap_destroy(sc->sc_dmat,
7019 			    txq->txq_soft[i].txs_dmamap);
7020 	}
7021 }
7022 
7023 static int
7024 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7025 {
7026 	int i, error;
7027 
7028 	/* Create the receive buffer DMA maps. */
7029 	for (i = 0; i < rxq->rxq_ndesc; i++) {
7030 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
7031 			    MCLBYTES, 0, 0,
7032 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
7033 			aprint_error_dev(sc->sc_dev,
7034 			    "unable to create Rx DMA map %d error = %d\n",
7035 			    i, error);
7036 			goto fail;
7037 		}
7038 		rxq->rxq_soft[i].rxs_mbuf = NULL;
7039 	}
7040 
7041 	return 0;
7042 
7043  fail:
7044 	for (i = 0; i < rxq->rxq_ndesc; i++) {
7045 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
7046 			bus_dmamap_destroy(sc->sc_dmat,
7047 			    rxq->rxq_soft[i].rxs_dmamap);
7048 	}
7049 	return error;
7050 }
7051 
7052 static void
7053 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7054 {
7055 	int i;
7056 
7057 	for (i = 0; i < rxq->rxq_ndesc; i++) {
7058 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
7059 			bus_dmamap_destroy(sc->sc_dmat,
7060 			    rxq->rxq_soft[i].rxs_dmamap);
7061 	}
7062 }
7063 
7064 /*
7065  * wm_alloc_quques:
7066  *	Allocate {tx,rx}descs and {tx,rx} buffers
7067  */
7068 static int
7069 wm_alloc_txrx_queues(struct wm_softc *sc)
7070 {
7071 	int i, error, tx_done, rx_done;
7072 
7073 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
7074 	    KM_SLEEP);
7075 	if (sc->sc_queue == NULL) {
7076 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
7077 		error = ENOMEM;
7078 		goto fail_0;
7079 	}
7080 
7081 	/* For transmission */
7082 	error = 0;
7083 	tx_done = 0;
7084 	for (i = 0; i < sc->sc_nqueues; i++) {
7085 #ifdef WM_EVENT_COUNTERS
7086 		int j;
7087 		const char *xname;
7088 #endif
7089 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7090 		txq->txq_sc = sc;
7091 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
7092 
7093 		error = wm_alloc_tx_descs(sc, txq);
7094 		if (error)
7095 			break;
7096 		error = wm_alloc_tx_buffer(sc, txq);
7097 		if (error) {
7098 			wm_free_tx_descs(sc, txq);
7099 			break;
7100 		}
7101 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
7102 		if (txq->txq_interq == NULL) {
7103 			wm_free_tx_descs(sc, txq);
7104 			wm_free_tx_buffer(sc, txq);
7105 			error = ENOMEM;
7106 			break;
7107 		}
7108 
7109 #ifdef WM_EVENT_COUNTERS
7110 		xname = device_xname(sc->sc_dev);
7111 
7112 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
7113 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
7114 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
7115 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
7116 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
7117 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
7118 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
7119 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
7120 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
7121 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
7122 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
7123 
7124 		for (j = 0; j < WM_NTXSEGS; j++) {
7125 			snprintf(txq->txq_txseg_evcnt_names[j],
7126 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
7127 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
7128 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
7129 		}
7130 
7131 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
7132 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
7133 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
7134 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
7135 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
7136 		WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
7137 #endif /* WM_EVENT_COUNTERS */
7138 
7139 		tx_done++;
7140 	}
7141 	if (error)
7142 		goto fail_1;
7143 
7144 	/* For receive */
7145 	error = 0;
7146 	rx_done = 0;
7147 	for (i = 0; i < sc->sc_nqueues; i++) {
7148 #ifdef WM_EVENT_COUNTERS
7149 		const char *xname;
7150 #endif
7151 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7152 		rxq->rxq_sc = sc;
7153 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
7154 
7155 		error = wm_alloc_rx_descs(sc, rxq);
7156 		if (error)
7157 			break;
7158 
7159 		error = wm_alloc_rx_buffer(sc, rxq);
7160 		if (error) {
7161 			wm_free_rx_descs(sc, rxq);
7162 			break;
7163 		}
7164 
7165 #ifdef WM_EVENT_COUNTERS
7166 		xname = device_xname(sc->sc_dev);
7167 
7168 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
7169 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
7170 
7171 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
7172 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
7173 #endif /* WM_EVENT_COUNTERS */
7174 
7175 		rx_done++;
7176 	}
7177 	if (error)
7178 		goto fail_2;
7179 
7180 	return 0;
7181 
7182  fail_2:
7183 	for (i = 0; i < rx_done; i++) {
7184 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7185 		wm_free_rx_buffer(sc, rxq);
7186 		wm_free_rx_descs(sc, rxq);
7187 		if (rxq->rxq_lock)
7188 			mutex_obj_free(rxq->rxq_lock);
7189 	}
7190  fail_1:
7191 	for (i = 0; i < tx_done; i++) {
7192 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7193 		pcq_destroy(txq->txq_interq);
7194 		wm_free_tx_buffer(sc, txq);
7195 		wm_free_tx_descs(sc, txq);
7196 		if (txq->txq_lock)
7197 			mutex_obj_free(txq->txq_lock);
7198 	}
7199 
7200 	kmem_free(sc->sc_queue,
7201 	    sizeof(struct wm_queue) * sc->sc_nqueues);
7202  fail_0:
7203 	return error;
7204 }
7205 
7206 /*
7207  * wm_free_quques:
7208  *	Free {tx,rx}descs and {tx,rx} buffers
7209  */
7210 static void
7211 wm_free_txrx_queues(struct wm_softc *sc)
7212 {
7213 	int i;
7214 
7215 	for (i = 0; i < sc->sc_nqueues; i++) {
7216 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
7217 
7218 #ifdef WM_EVENT_COUNTERS
7219 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
7220 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
7221 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
7222 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
7223 #endif /* WM_EVENT_COUNTERS */
7224 
7225 		wm_free_rx_buffer(sc, rxq);
7226 		wm_free_rx_descs(sc, rxq);
7227 		if (rxq->rxq_lock)
7228 			mutex_obj_free(rxq->rxq_lock);
7229 	}
7230 
7231 	for (i = 0; i < sc->sc_nqueues; i++) {
7232 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
7233 		struct mbuf *m;
7234 #ifdef WM_EVENT_COUNTERS
7235 		int j;
7236 
7237 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
7238 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
7239 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
7240 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
7241 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
7242 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
7243 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
7244 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
7245 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
7246 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
7247 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
7248 
7249 		for (j = 0; j < WM_NTXSEGS; j++)
7250 			evcnt_detach(&txq->txq_ev_txseg[j]);
7251 
7252 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
7253 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
7254 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
7255 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
7256 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
7257 		WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
7258 #endif /* WM_EVENT_COUNTERS */
7259 
7260 		/* Drain txq_interq */
7261 		while ((m = pcq_get(txq->txq_interq)) != NULL)
7262 			m_freem(m);
7263 		pcq_destroy(txq->txq_interq);
7264 
7265 		wm_free_tx_buffer(sc, txq);
7266 		wm_free_tx_descs(sc, txq);
7267 		if (txq->txq_lock)
7268 			mutex_obj_free(txq->txq_lock);
7269 	}
7270 
7271 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
7272 }
7273 
7274 static void
7275 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7276 {
7277 
7278 	KASSERT(mutex_owned(txq->txq_lock));
7279 
7280 	/* Initialize the transmit descriptor ring. */
7281 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
7282 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
7283 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7284 	txq->txq_free = WM_NTXDESC(txq);
7285 	txq->txq_next = 0;
7286 }
7287 
7288 static void
7289 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7290     struct wm_txqueue *txq)
7291 {
7292 
7293 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7294 		device_xname(sc->sc_dev), __func__));
7295 	KASSERT(mutex_owned(txq->txq_lock));
7296 
7297 	if (sc->sc_type < WM_T_82543) {
7298 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
7299 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
7300 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
7301 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
7302 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
7303 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
7304 	} else {
7305 		int qid = wmq->wmq_id;
7306 
7307 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
7308 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
7309 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
7310 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
7311 
7312 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7313 			/*
7314 			 * Don't write TDT before TCTL.EN is set.
7315 			 * See the document.
7316 			 */
7317 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
7318 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
7319 			    | TXDCTL_WTHRESH(0));
7320 		else {
7321 			/* XXX should update with AIM? */
7322 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
7323 			if (sc->sc_type >= WM_T_82540) {
7324 				/* Should be the same */
7325 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
7326 			}
7327 
7328 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
7329 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
7330 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
7331 		}
7332 	}
7333 }
7334 
7335 static void
7336 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
7337 {
7338 	int i;
7339 
7340 	KASSERT(mutex_owned(txq->txq_lock));
7341 
7342 	/* Initialize the transmit job descriptors. */
7343 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
7344 		txq->txq_soft[i].txs_mbuf = NULL;
7345 	txq->txq_sfree = WM_TXQUEUELEN(txq);
7346 	txq->txq_snext = 0;
7347 	txq->txq_sdirty = 0;
7348 }
7349 
7350 static void
7351 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
7352     struct wm_txqueue *txq)
7353 {
7354 
7355 	KASSERT(mutex_owned(txq->txq_lock));
7356 
7357 	/*
7358 	 * Set up some register offsets that are different between
7359 	 * the i82542 and the i82543 and later chips.
7360 	 */
7361 	if (sc->sc_type < WM_T_82543)
7362 		txq->txq_tdt_reg = WMREG_OLD_TDT;
7363 	else
7364 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
7365 
7366 	wm_init_tx_descs(sc, txq);
7367 	wm_init_tx_regs(sc, wmq, txq);
7368 	wm_init_tx_buffer(sc, txq);
7369 
7370 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
7371 	txq->txq_sending = false;
7372 }
7373 
7374 static void
7375 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
7376     struct wm_rxqueue *rxq)
7377 {
7378 
7379 	KASSERT(mutex_owned(rxq->rxq_lock));
7380 
7381 	/*
7382 	 * Initialize the receive descriptor and receive job
7383 	 * descriptor rings.
7384 	 */
7385 	if (sc->sc_type < WM_T_82543) {
7386 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
7387 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
7388 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
7389 		    rxq->rxq_descsize * rxq->rxq_ndesc);
7390 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
7391 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
7392 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
7393 
7394 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
7395 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
7396 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
7397 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
7398 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
7399 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
7400 	} else {
7401 		int qid = wmq->wmq_id;
7402 
7403 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
7404 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
7405 		CSR_WRITE(sc, WMREG_RDLEN(qid),
7406 		    rxq->rxq_descsize * rxq->rxq_ndesc);
7407 
7408 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7409 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
7410 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
7411 
7412 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
7413 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
7414 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
7415 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
7416 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
7417 			    | RXDCTL_WTHRESH(1));
7418 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
7419 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
7420 		} else {
7421 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
7422 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
7423 			/* XXX should update with AIM? */
7424 			CSR_WRITE(sc, WMREG_RDTR,
7425 			    (wmq->wmq_itr / 4) | RDTR_FPD);
7426 			/* MUST be same */
7427 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
7428 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
7429 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
7430 		}
7431 	}
7432 }
7433 
7434 static int
7435 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
7436 {
7437 	struct wm_rxsoft *rxs;
7438 	int error, i;
7439 
7440 	KASSERT(mutex_owned(rxq->rxq_lock));
7441 
7442 	for (i = 0; i < rxq->rxq_ndesc; i++) {
7443 		rxs = &rxq->rxq_soft[i];
7444 		if (rxs->rxs_mbuf == NULL) {
7445 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
7446 				log(LOG_ERR, "%s: unable to allocate or map "
7447 				    "rx buffer %d, error = %d\n",
7448 				    device_xname(sc->sc_dev), i, error);
7449 				/*
7450 				 * XXX Should attempt to run with fewer receive
7451 				 * XXX buffers instead of just failing.
7452 				 */
7453 				wm_rxdrain(rxq);
7454 				return ENOMEM;
7455 			}
7456 		} else {
7457 			/*
7458 			 * For 82575 and 82576, the RX descriptors must be
7459 			 * initialized after the setting of RCTL.EN in
7460 			 * wm_set_filter()
7461 			 */
7462 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
7463 				wm_init_rxdesc(rxq, i);
7464 		}
7465 	}
7466 	rxq->rxq_ptr = 0;
7467 	rxq->rxq_discard = 0;
7468 	WM_RXCHAIN_RESET(rxq);
7469 
7470 	return 0;
7471 }
7472 
7473 static int
7474 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
7475     struct wm_rxqueue *rxq)
7476 {
7477 
7478 	KASSERT(mutex_owned(rxq->rxq_lock));
7479 
7480 	/*
7481 	 * Set up some register offsets that are different between
7482 	 * the i82542 and the i82543 and later chips.
7483 	 */
7484 	if (sc->sc_type < WM_T_82543)
7485 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
7486 	else
7487 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
7488 
7489 	wm_init_rx_regs(sc, wmq, rxq);
7490 	return wm_init_rx_buffer(sc, rxq);
7491 }
7492 
7493 /*
7494  * wm_init_quques:
7495  *	Initialize {tx,rx}descs and {tx,rx} buffers
7496  */
7497 static int
7498 wm_init_txrx_queues(struct wm_softc *sc)
7499 {
7500 	int i, error = 0;
7501 
7502 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
7503 		device_xname(sc->sc_dev), __func__));
7504 
7505 	for (i = 0; i < sc->sc_nqueues; i++) {
7506 		struct wm_queue *wmq = &sc->sc_queue[i];
7507 		struct wm_txqueue *txq = &wmq->wmq_txq;
7508 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
7509 
7510 		/*
7511 		 * TODO
7512 		 * Currently, use constant variable instead of AIM.
7513 		 * Furthermore, the interrupt interval of multiqueue which use
7514 		 * polling mode is less than default value.
7515 		 * More tuning and AIM are required.
7516 		 */
7517 		if (wm_is_using_multiqueue(sc))
7518 			wmq->wmq_itr = 50;
7519 		else
7520 			wmq->wmq_itr = sc->sc_itr_init;
7521 		wmq->wmq_set_itr = true;
7522 
7523 		mutex_enter(txq->txq_lock);
7524 		wm_init_tx_queue(sc, wmq, txq);
7525 		mutex_exit(txq->txq_lock);
7526 
7527 		mutex_enter(rxq->rxq_lock);
7528 		error = wm_init_rx_queue(sc, wmq, rxq);
7529 		mutex_exit(rxq->rxq_lock);
7530 		if (error)
7531 			break;
7532 	}
7533 
7534 	return error;
7535 }
7536 
7537 /*
7538  * wm_tx_offload:
7539  *
7540  *	Set up TCP/IP checksumming parameters for the
7541  *	specified packet.
7542  */
7543 static void
7544 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
7545     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
7546 {
7547 	struct mbuf *m0 = txs->txs_mbuf;
7548 	struct livengood_tcpip_ctxdesc *t;
7549 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
7550 	uint32_t ipcse;
7551 	struct ether_header *eh;
7552 	int offset, iphl;
7553 	uint8_t fields;
7554 
7555 	/*
7556 	 * XXX It would be nice if the mbuf pkthdr had offset
7557 	 * fields for the protocol headers.
7558 	 */
7559 
7560 	eh = mtod(m0, struct ether_header *);
7561 	switch (htons(eh->ether_type)) {
7562 	case ETHERTYPE_IP:
7563 	case ETHERTYPE_IPV6:
7564 		offset = ETHER_HDR_LEN;
7565 		break;
7566 
7567 	case ETHERTYPE_VLAN:
7568 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
7569 		break;
7570 
7571 	default:
7572 		/* Don't support this protocol or encapsulation. */
7573 		txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
7574 		txq->txq_last_hw_ipcs = 0;
7575 		txq->txq_last_hw_tucs = 0;
7576 		*fieldsp = 0;
7577 		*cmdp = 0;
7578 		return;
7579 	}
7580 
7581 	if ((m0->m_pkthdr.csum_flags &
7582 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
7583 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
7584 	} else
7585 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
7586 
7587 	ipcse = offset + iphl - 1;
7588 
7589 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
7590 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
7591 	seg = 0;
7592 	fields = 0;
7593 
7594 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
7595 		int hlen = offset + iphl;
7596 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
7597 
7598 		if (__predict_false(m0->m_len <
7599 				    (hlen + sizeof(struct tcphdr)))) {
7600 			/*
7601 			 * TCP/IP headers are not in the first mbuf; we need
7602 			 * to do this the slow and painful way. Let's just
7603 			 * hope this doesn't happen very often.
7604 			 */
7605 			struct tcphdr th;
7606 
7607 			WM_Q_EVCNT_INCR(txq, tsopain);
7608 
7609 			m_copydata(m0, hlen, sizeof(th), &th);
7610 			if (v4) {
7611 				struct ip ip;
7612 
7613 				m_copydata(m0, offset, sizeof(ip), &ip);
7614 				ip.ip_len = 0;
7615 				m_copyback(m0,
7616 				    offset + offsetof(struct ip, ip_len),
7617 				    sizeof(ip.ip_len), &ip.ip_len);
7618 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
7619 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
7620 			} else {
7621 				struct ip6_hdr ip6;
7622 
7623 				m_copydata(m0, offset, sizeof(ip6), &ip6);
7624 				ip6.ip6_plen = 0;
7625 				m_copyback(m0,
7626 				    offset + offsetof(struct ip6_hdr, ip6_plen),
7627 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
7628 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
7629 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
7630 			}
7631 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
7632 			    sizeof(th.th_sum), &th.th_sum);
7633 
7634 			hlen += th.th_off << 2;
7635 		} else {
7636 			/*
7637 			 * TCP/IP headers are in the first mbuf; we can do
7638 			 * this the easy way.
7639 			 */
7640 			struct tcphdr *th;
7641 
7642 			if (v4) {
7643 				struct ip *ip =
7644 				    (void *)(mtod(m0, char *) + offset);
7645 				th = (void *)(mtod(m0, char *) + hlen);
7646 
7647 				ip->ip_len = 0;
7648 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
7649 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
7650 			} else {
7651 				struct ip6_hdr *ip6 =
7652 				    (void *)(mtod(m0, char *) + offset);
7653 				th = (void *)(mtod(m0, char *) + hlen);
7654 
7655 				ip6->ip6_plen = 0;
7656 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
7657 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
7658 			}
7659 			hlen += th->th_off << 2;
7660 		}
7661 
7662 		if (v4) {
7663 			WM_Q_EVCNT_INCR(txq, tso);
7664 			cmdlen |= WTX_TCPIP_CMD_IP;
7665 		} else {
7666 			WM_Q_EVCNT_INCR(txq, tso6);
7667 			ipcse = 0;
7668 		}
7669 		cmd |= WTX_TCPIP_CMD_TSE;
7670 		cmdlen |= WTX_TCPIP_CMD_TSE |
7671 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
7672 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
7673 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
7674 	}
7675 
7676 	/*
7677 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
7678 	 * offload feature, if we load the context descriptor, we
7679 	 * MUST provide valid values for IPCSS and TUCSS fields.
7680 	 */
7681 
7682 	ipcs = WTX_TCPIP_IPCSS(offset) |
7683 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
7684 	    WTX_TCPIP_IPCSE(ipcse);
7685 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
7686 		WM_Q_EVCNT_INCR(txq, ipsum);
7687 		fields |= WTX_IXSM;
7688 	}
7689 
7690 	offset += iphl;
7691 
7692 	if (m0->m_pkthdr.csum_flags &
7693 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
7694 		WM_Q_EVCNT_INCR(txq, tusum);
7695 		fields |= WTX_TXSM;
7696 		tucs = WTX_TCPIP_TUCSS(offset) |
7697 		    WTX_TCPIP_TUCSO(offset +
7698 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
7699 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
7700 	} else if ((m0->m_pkthdr.csum_flags &
7701 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
7702 		WM_Q_EVCNT_INCR(txq, tusum6);
7703 		fields |= WTX_TXSM;
7704 		tucs = WTX_TCPIP_TUCSS(offset) |
7705 		    WTX_TCPIP_TUCSO(offset +
7706 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
7707 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
7708 	} else {
7709 		/* Just initialize it to a valid TCP context. */
7710 		tucs = WTX_TCPIP_TUCSS(offset) |
7711 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
7712 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
7713 	}
7714 
7715 	*cmdp = cmd;
7716 	*fieldsp = fields;
7717 
7718 	/*
7719 	 * We don't have to write context descriptor for every packet
7720 	 * except for 82574. For 82574, we must write context descriptor
7721 	 * for every packet when we use two descriptor queues.
7722 	 *
7723 	 * The 82574L can only remember the *last* context used
7724 	 * regardless of queue that it was use for.  We cannot reuse
7725 	 * contexts on this hardware platform and must generate a new
7726 	 * context every time.  82574L hardware spec, section 7.2.6,
7727 	 * second note.
7728 	 */
7729 	if (sc->sc_nqueues < 2) {
7730 		/*
7731 		 * Setting up new checksum offload context for every
7732 		 * frames takes a lot of processing time for hardware.
7733 		 * This also reduces performance a lot for small sized
7734 		 * frames so avoid it if driver can use previously
7735 		 * configured checksum offload context.
7736 		 * For TSO, in theory we can use the same TSO context only if
7737 		 * frame is the same type(IP/TCP) and the same MSS. However
7738 		 * checking whether a frame has the same IP/TCP structure is
7739 		 * hard thing so just ignore that and always restablish a
7740 		 * new TSO context.
7741 		 */
7742 		if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6))
7743 		    == 0) {
7744 			if (txq->txq_last_hw_cmd == cmd &&
7745 			    txq->txq_last_hw_fields == fields &&
7746 			    txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
7747 			    txq->txq_last_hw_tucs == (tucs & 0xffff)) {
7748 				WM_Q_EVCNT_INCR(txq, skipcontext);
7749 				return;
7750 			}
7751 		}
7752 
7753 		txq->txq_last_hw_cmd = cmd;
7754 		txq->txq_last_hw_fields = fields;
7755 		txq->txq_last_hw_ipcs = (ipcs & 0xffff);
7756 		txq->txq_last_hw_tucs = (tucs & 0xffff);
7757 	}
7758 
7759 	/* Fill in the context descriptor. */
7760 	t = (struct livengood_tcpip_ctxdesc *)
7761 	    &txq->txq_descs[txq->txq_next];
7762 	t->tcpip_ipcs = htole32(ipcs);
7763 	t->tcpip_tucs = htole32(tucs);
7764 	t->tcpip_cmdlen = htole32(cmdlen);
7765 	t->tcpip_seg = htole32(seg);
7766 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
7767 
7768 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
7769 	txs->txs_ndesc++;
7770 }
7771 
7772 static inline int
7773 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
7774 {
7775 	struct wm_softc *sc = ifp->if_softc;
7776 	u_int cpuid = cpu_index(curcpu());
7777 
7778 	/*
7779 	 * Currently, simple distribute strategy.
7780 	 * TODO:
7781 	 * distribute by flowid(RSS has value).
7782 	 */
7783 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
7784 }
7785 
7786 static inline bool
7787 wm_linkdown_discard(struct wm_txqueue *txq)
7788 {
7789 
7790 	if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
7791 		return true;
7792 
7793 	return false;
7794 }
7795 
7796 /*
7797  * wm_start:		[ifnet interface function]
7798  *
7799  *	Start packet transmission on the interface.
7800  */
7801 static void
7802 wm_start(struct ifnet *ifp)
7803 {
7804 	struct wm_softc *sc = ifp->if_softc;
7805 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7806 
7807 #ifdef WM_MPSAFE
7808 	KASSERT(if_is_mpsafe(ifp));
7809 #endif
7810 	/*
7811 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
7812 	 */
7813 
7814 	mutex_enter(txq->txq_lock);
7815 	if (!txq->txq_stopping)
7816 		wm_start_locked(ifp);
7817 	mutex_exit(txq->txq_lock);
7818 }
7819 
7820 static void
7821 wm_start_locked(struct ifnet *ifp)
7822 {
7823 	struct wm_softc *sc = ifp->if_softc;
7824 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7825 
7826 	wm_send_common_locked(ifp, txq, false);
7827 }
7828 
7829 static int
7830 wm_transmit(struct ifnet *ifp, struct mbuf *m)
7831 {
7832 	int qid;
7833 	struct wm_softc *sc = ifp->if_softc;
7834 	struct wm_txqueue *txq;
7835 
7836 	qid = wm_select_txqueue(ifp, m);
7837 	txq = &sc->sc_queue[qid].wmq_txq;
7838 
7839 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
7840 		m_freem(m);
7841 		WM_Q_EVCNT_INCR(txq, pcqdrop);
7842 		return ENOBUFS;
7843 	}
7844 
7845 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
7846 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
7847 	if (m->m_flags & M_MCAST)
7848 		if_statinc_ref(nsr, if_omcasts);
7849 	IF_STAT_PUTREF(ifp);
7850 
7851 	if (mutex_tryenter(txq->txq_lock)) {
7852 		if (!txq->txq_stopping)
7853 			wm_transmit_locked(ifp, txq);
7854 		mutex_exit(txq->txq_lock);
7855 	}
7856 
7857 	return 0;
7858 }
7859 
7860 static void
7861 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
7862 {
7863 
7864 	wm_send_common_locked(ifp, txq, true);
7865 }
7866 
7867 static void
7868 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
7869     bool is_transmit)
7870 {
7871 	struct wm_softc *sc = ifp->if_softc;
7872 	struct mbuf *m0;
7873 	struct wm_txsoft *txs;
7874 	bus_dmamap_t dmamap;
7875 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
7876 	bus_addr_t curaddr;
7877 	bus_size_t seglen, curlen;
7878 	uint32_t cksumcmd;
7879 	uint8_t cksumfields;
7880 	bool remap = true;
7881 
7882 	KASSERT(mutex_owned(txq->txq_lock));
7883 
7884 	if ((ifp->if_flags & IFF_RUNNING) == 0)
7885 		return;
7886 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
7887 		return;
7888 
7889 	if (__predict_false(wm_linkdown_discard(txq))) {
7890 		do {
7891 			if (is_transmit)
7892 				m0 = pcq_get(txq->txq_interq);
7893 			else
7894 				IFQ_DEQUEUE(&ifp->if_snd, m0);
7895 			/*
7896 			 * increment successed packet counter as in the case
7897 			 * which the packet is discarded by link down PHY.
7898 			 */
7899 			if (m0 != NULL)
7900 				if_statinc(ifp, if_opackets);
7901 			m_freem(m0);
7902 		} while (m0 != NULL);
7903 		return;
7904 	}
7905 
7906 	/* Remember the previous number of free descriptors. */
7907 	ofree = txq->txq_free;
7908 
7909 	/*
7910 	 * Loop through the send queue, setting up transmit descriptors
7911 	 * until we drain the queue, or use up all available transmit
7912 	 * descriptors.
7913 	 */
7914 	for (;;) {
7915 		m0 = NULL;
7916 
7917 		/* Get a work queue entry. */
7918 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
7919 			wm_txeof(txq, UINT_MAX);
7920 			if (txq->txq_sfree == 0) {
7921 				DPRINTF(sc, WM_DEBUG_TX,
7922 				    ("%s: TX: no free job descriptors\n",
7923 					device_xname(sc->sc_dev)));
7924 				WM_Q_EVCNT_INCR(txq, txsstall);
7925 				break;
7926 			}
7927 		}
7928 
7929 		/* Grab a packet off the queue. */
7930 		if (is_transmit)
7931 			m0 = pcq_get(txq->txq_interq);
7932 		else
7933 			IFQ_DEQUEUE(&ifp->if_snd, m0);
7934 		if (m0 == NULL)
7935 			break;
7936 
7937 		DPRINTF(sc, WM_DEBUG_TX,
7938 		    ("%s: TX: have packet to transmit: %p\n",
7939 			device_xname(sc->sc_dev), m0));
7940 
7941 		txs = &txq->txq_soft[txq->txq_snext];
7942 		dmamap = txs->txs_dmamap;
7943 
7944 		use_tso = (m0->m_pkthdr.csum_flags &
7945 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
7946 
7947 		/*
7948 		 * So says the Linux driver:
7949 		 * The controller does a simple calculation to make sure
7950 		 * there is enough room in the FIFO before initiating the
7951 		 * DMA for each buffer. The calc is:
7952 		 *	4 = ceil(buffer len / MSS)
7953 		 * To make sure we don't overrun the FIFO, adjust the max
7954 		 * buffer len if the MSS drops.
7955 		 */
7956 		dmamap->dm_maxsegsz =
7957 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
7958 		    ? m0->m_pkthdr.segsz << 2
7959 		    : WTX_MAX_LEN;
7960 
7961 		/*
7962 		 * Load the DMA map.  If this fails, the packet either
7963 		 * didn't fit in the allotted number of segments, or we
7964 		 * were short on resources.  For the too-many-segments
7965 		 * case, we simply report an error and drop the packet,
7966 		 * since we can't sanely copy a jumbo packet to a single
7967 		 * buffer.
7968 		 */
7969 retry:
7970 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
7971 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
7972 		if (__predict_false(error)) {
7973 			if (error == EFBIG) {
7974 				if (remap == true) {
7975 					struct mbuf *m;
7976 
7977 					remap = false;
7978 					m = m_defrag(m0, M_NOWAIT);
7979 					if (m != NULL) {
7980 						WM_Q_EVCNT_INCR(txq, defrag);
7981 						m0 = m;
7982 						goto retry;
7983 					}
7984 				}
7985 				WM_Q_EVCNT_INCR(txq, toomanyseg);
7986 				log(LOG_ERR, "%s: Tx packet consumes too many "
7987 				    "DMA segments, dropping...\n",
7988 				    device_xname(sc->sc_dev));
7989 				wm_dump_mbuf_chain(sc, m0);
7990 				m_freem(m0);
7991 				continue;
7992 			}
7993 			/* Short on resources, just stop for now. */
7994 			DPRINTF(sc, WM_DEBUG_TX,
7995 			    ("%s: TX: dmamap load failed: %d\n",
7996 				device_xname(sc->sc_dev), error));
7997 			break;
7998 		}
7999 
8000 		segs_needed = dmamap->dm_nsegs;
8001 		if (use_tso) {
8002 			/* For sentinel descriptor; see below. */
8003 			segs_needed++;
8004 		}
8005 
8006 		/*
8007 		 * Ensure we have enough descriptors free to describe
8008 		 * the packet. Note, we always reserve one descriptor
8009 		 * at the end of the ring due to the semantics of the
8010 		 * TDT register, plus one more in the event we need
8011 		 * to load offload context.
8012 		 */
8013 		if (segs_needed > txq->txq_free - 2) {
8014 			/*
8015 			 * Not enough free descriptors to transmit this
8016 			 * packet.  We haven't committed anything yet,
8017 			 * so just unload the DMA map, put the packet
8018 			 * pack on the queue, and punt. Notify the upper
8019 			 * layer that there are no more slots left.
8020 			 */
8021 			DPRINTF(sc, WM_DEBUG_TX,
8022 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
8023 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
8024 				segs_needed, txq->txq_free - 1));
8025 			txq->txq_flags |= WM_TXQ_NO_SPACE;
8026 			bus_dmamap_unload(sc->sc_dmat, dmamap);
8027 			WM_Q_EVCNT_INCR(txq, txdstall);
8028 			break;
8029 		}
8030 
8031 		/*
8032 		 * Check for 82547 Tx FIFO bug. We need to do this
8033 		 * once we know we can transmit the packet, since we
8034 		 * do some internal FIFO space accounting here.
8035 		 */
8036 		if (sc->sc_type == WM_T_82547 &&
8037 		    wm_82547_txfifo_bugchk(sc, m0)) {
8038 			DPRINTF(sc, WM_DEBUG_TX,
8039 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
8040 				device_xname(sc->sc_dev)));
8041 			txq->txq_flags |= WM_TXQ_NO_SPACE;
8042 			bus_dmamap_unload(sc->sc_dmat, dmamap);
8043 			WM_Q_EVCNT_INCR(txq, fifo_stall);
8044 			break;
8045 		}
8046 
8047 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
8048 
8049 		DPRINTF(sc, WM_DEBUG_TX,
8050 		    ("%s: TX: packet has %d (%d) DMA segments\n",
8051 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
8052 
8053 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
8054 
8055 		/*
8056 		 * Store a pointer to the packet so that we can free it
8057 		 * later.
8058 		 *
8059 		 * Initially, we consider the number of descriptors the
8060 		 * packet uses the number of DMA segments.  This may be
8061 		 * incremented by 1 if we do checksum offload (a descriptor
8062 		 * is used to set the checksum context).
8063 		 */
8064 		txs->txs_mbuf = m0;
8065 		txs->txs_firstdesc = txq->txq_next;
8066 		txs->txs_ndesc = segs_needed;
8067 
8068 		/* Set up offload parameters for this packet. */
8069 		if (m0->m_pkthdr.csum_flags &
8070 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
8071 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8072 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
8073 			wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
8074 		} else {
8075 			txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
8076 			txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
8077 			cksumcmd = 0;
8078 			cksumfields = 0;
8079 		}
8080 
8081 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
8082 
8083 		/* Sync the DMA map. */
8084 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
8085 		    BUS_DMASYNC_PREWRITE);
8086 
8087 		/* Initialize the transmit descriptor. */
8088 		for (nexttx = txq->txq_next, seg = 0;
8089 		     seg < dmamap->dm_nsegs; seg++) {
8090 			for (seglen = dmamap->dm_segs[seg].ds_len,
8091 			     curaddr = dmamap->dm_segs[seg].ds_addr;
8092 			     seglen != 0;
8093 			     curaddr += curlen, seglen -= curlen,
8094 			     nexttx = WM_NEXTTX(txq, nexttx)) {
8095 				curlen = seglen;
8096 
8097 				/*
8098 				 * So says the Linux driver:
8099 				 * Work around for premature descriptor
8100 				 * write-backs in TSO mode.  Append a
8101 				 * 4-byte sentinel descriptor.
8102 				 */
8103 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
8104 				    curlen > 8)
8105 					curlen -= 4;
8106 
8107 				wm_set_dma_addr(
8108 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
8109 				txq->txq_descs[nexttx].wtx_cmdlen
8110 				    = htole32(cksumcmd | curlen);
8111 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
8112 				    = 0;
8113 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
8114 				    = cksumfields;
8115 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
8116 				lasttx = nexttx;
8117 
8118 				DPRINTF(sc, WM_DEBUG_TX,
8119 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
8120 					"len %#04zx\n",
8121 					device_xname(sc->sc_dev), nexttx,
8122 					(uint64_t)curaddr, curlen));
8123 			}
8124 		}
8125 
8126 		KASSERT(lasttx != -1);
8127 
8128 		/*
8129 		 * Set up the command byte on the last descriptor of
8130 		 * the packet. If we're in the interrupt delay window,
8131 		 * delay the interrupt.
8132 		 */
8133 		txq->txq_descs[lasttx].wtx_cmdlen |=
8134 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
8135 
8136 		/*
8137 		 * If VLANs are enabled and the packet has a VLAN tag, set
8138 		 * up the descriptor to encapsulate the packet for us.
8139 		 *
8140 		 * This is only valid on the last descriptor of the packet.
8141 		 */
8142 		if (vlan_has_tag(m0)) {
8143 			txq->txq_descs[lasttx].wtx_cmdlen |=
8144 			    htole32(WTX_CMD_VLE);
8145 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
8146 			    = htole16(vlan_get_tag(m0));
8147 		}
8148 
8149 		txs->txs_lastdesc = lasttx;
8150 
8151 		DPRINTF(sc, WM_DEBUG_TX,
8152 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
8153 			device_xname(sc->sc_dev),
8154 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
8155 
8156 		/* Sync the descriptors we're using. */
8157 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
8158 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8159 
8160 		/* Give the packet to the chip. */
8161 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
8162 
8163 		DPRINTF(sc, WM_DEBUG_TX,
8164 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
8165 
8166 		DPRINTF(sc, WM_DEBUG_TX,
8167 		    ("%s: TX: finished transmitting packet, job %d\n",
8168 			device_xname(sc->sc_dev), txq->txq_snext));
8169 
8170 		/* Advance the tx pointer. */
8171 		txq->txq_free -= txs->txs_ndesc;
8172 		txq->txq_next = nexttx;
8173 
8174 		txq->txq_sfree--;
8175 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
8176 
8177 		/* Pass the packet to any BPF listeners. */
8178 		bpf_mtap(ifp, m0, BPF_D_OUT);
8179 	}
8180 
8181 	if (m0 != NULL) {
8182 		txq->txq_flags |= WM_TXQ_NO_SPACE;
8183 		WM_Q_EVCNT_INCR(txq, descdrop);
8184 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
8185 			__func__));
8186 		m_freem(m0);
8187 	}
8188 
8189 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
8190 		/* No more slots; notify upper layer. */
8191 		txq->txq_flags |= WM_TXQ_NO_SPACE;
8192 	}
8193 
8194 	if (txq->txq_free != ofree) {
8195 		/* Set a watchdog timer in case the chip flakes out. */
8196 		txq->txq_lastsent = time_uptime;
8197 		txq->txq_sending = true;
8198 	}
8199 }
8200 
8201 /*
8202  * wm_nq_tx_offload:
8203  *
8204  *	Set up TCP/IP checksumming parameters for the
8205  *	specified packet, for NEWQUEUE devices
8206  */
8207 static void
8208 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
8209     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
8210 {
8211 	struct mbuf *m0 = txs->txs_mbuf;
8212 	uint32_t vl_len, mssidx, cmdc;
8213 	struct ether_header *eh;
8214 	int offset, iphl;
8215 
8216 	/*
8217 	 * XXX It would be nice if the mbuf pkthdr had offset
8218 	 * fields for the protocol headers.
8219 	 */
8220 	*cmdlenp = 0;
8221 	*fieldsp = 0;
8222 
8223 	eh = mtod(m0, struct ether_header *);
8224 	switch (htons(eh->ether_type)) {
8225 	case ETHERTYPE_IP:
8226 	case ETHERTYPE_IPV6:
8227 		offset = ETHER_HDR_LEN;
8228 		break;
8229 
8230 	case ETHERTYPE_VLAN:
8231 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
8232 		break;
8233 
8234 	default:
8235 		/* Don't support this protocol or encapsulation. */
8236 		*do_csum = false;
8237 		return;
8238 	}
8239 	*do_csum = true;
8240 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
8241 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
8242 
8243 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
8244 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
8245 
8246 	if ((m0->m_pkthdr.csum_flags &
8247 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
8248 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
8249 	} else {
8250 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
8251 	}
8252 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
8253 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
8254 
8255 	if (vlan_has_tag(m0)) {
8256 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
8257 		    << NQTXC_VLLEN_VLAN_SHIFT);
8258 		*cmdlenp |= NQTX_CMD_VLE;
8259 	}
8260 
8261 	mssidx = 0;
8262 
8263 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
8264 		int hlen = offset + iphl;
8265 		int tcp_hlen;
8266 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
8267 
8268 		if (__predict_false(m0->m_len <
8269 				    (hlen + sizeof(struct tcphdr)))) {
8270 			/*
8271 			 * TCP/IP headers are not in the first mbuf; we need
8272 			 * to do this the slow and painful way. Let's just
8273 			 * hope this doesn't happen very often.
8274 			 */
8275 			struct tcphdr th;
8276 
8277 			WM_Q_EVCNT_INCR(txq, tsopain);
8278 
8279 			m_copydata(m0, hlen, sizeof(th), &th);
8280 			if (v4) {
8281 				struct ip ip;
8282 
8283 				m_copydata(m0, offset, sizeof(ip), &ip);
8284 				ip.ip_len = 0;
8285 				m_copyback(m0,
8286 				    offset + offsetof(struct ip, ip_len),
8287 				    sizeof(ip.ip_len), &ip.ip_len);
8288 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
8289 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
8290 			} else {
8291 				struct ip6_hdr ip6;
8292 
8293 				m_copydata(m0, offset, sizeof(ip6), &ip6);
8294 				ip6.ip6_plen = 0;
8295 				m_copyback(m0,
8296 				    offset + offsetof(struct ip6_hdr, ip6_plen),
8297 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
8298 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
8299 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
8300 			}
8301 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
8302 			    sizeof(th.th_sum), &th.th_sum);
8303 
8304 			tcp_hlen = th.th_off << 2;
8305 		} else {
8306 			/*
8307 			 * TCP/IP headers are in the first mbuf; we can do
8308 			 * this the easy way.
8309 			 */
8310 			struct tcphdr *th;
8311 
8312 			if (v4) {
8313 				struct ip *ip =
8314 				    (void *)(mtod(m0, char *) + offset);
8315 				th = (void *)(mtod(m0, char *) + hlen);
8316 
8317 				ip->ip_len = 0;
8318 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
8319 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
8320 			} else {
8321 				struct ip6_hdr *ip6 =
8322 				    (void *)(mtod(m0, char *) + offset);
8323 				th = (void *)(mtod(m0, char *) + hlen);
8324 
8325 				ip6->ip6_plen = 0;
8326 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
8327 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
8328 			}
8329 			tcp_hlen = th->th_off << 2;
8330 		}
8331 		hlen += tcp_hlen;
8332 		*cmdlenp |= NQTX_CMD_TSE;
8333 
8334 		if (v4) {
8335 			WM_Q_EVCNT_INCR(txq, tso);
8336 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
8337 		} else {
8338 			WM_Q_EVCNT_INCR(txq, tso6);
8339 			*fieldsp |= NQTXD_FIELDS_TUXSM;
8340 		}
8341 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
8342 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
8343 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
8344 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
8345 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
8346 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
8347 	} else {
8348 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
8349 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
8350 	}
8351 
8352 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
8353 		*fieldsp |= NQTXD_FIELDS_IXSM;
8354 		cmdc |= NQTXC_CMD_IP4;
8355 	}
8356 
8357 	if (m0->m_pkthdr.csum_flags &
8358 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
8359 		WM_Q_EVCNT_INCR(txq, tusum);
8360 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
8361 			cmdc |= NQTXC_CMD_TCP;
8362 		else
8363 			cmdc |= NQTXC_CMD_UDP;
8364 
8365 		cmdc |= NQTXC_CMD_IP4;
8366 		*fieldsp |= NQTXD_FIELDS_TUXSM;
8367 	}
8368 	if (m0->m_pkthdr.csum_flags &
8369 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
8370 		WM_Q_EVCNT_INCR(txq, tusum6);
8371 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
8372 			cmdc |= NQTXC_CMD_TCP;
8373 		else
8374 			cmdc |= NQTXC_CMD_UDP;
8375 
8376 		cmdc |= NQTXC_CMD_IP6;
8377 		*fieldsp |= NQTXD_FIELDS_TUXSM;
8378 	}
8379 
8380 	/*
8381 	 * We don't have to write context descriptor for every packet to
8382 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
8383 	 * I210 and I211. It is enough to write once per a Tx queue for these
8384 	 * controllers.
8385 	 * It would be overhead to write context descriptor for every packet,
8386 	 * however it does not cause problems.
8387 	 */
8388 	/* Fill in the context descriptor. */
8389 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
8390 	    htole32(vl_len);
8391 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
8392 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
8393 	    htole32(cmdc);
8394 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
8395 	    htole32(mssidx);
8396 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
8397 	DPRINTF(sc, WM_DEBUG_TX,
8398 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
8399 		txq->txq_next, 0, vl_len));
8400 	DPRINTF(sc, WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
8401 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
8402 	txs->txs_ndesc++;
8403 }
8404 
8405 /*
8406  * wm_nq_start:		[ifnet interface function]
8407  *
8408  *	Start packet transmission on the interface for NEWQUEUE devices
8409  */
8410 static void
8411 wm_nq_start(struct ifnet *ifp)
8412 {
8413 	struct wm_softc *sc = ifp->if_softc;
8414 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8415 
8416 #ifdef WM_MPSAFE
8417 	KASSERT(if_is_mpsafe(ifp));
8418 #endif
8419 	/*
8420 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
8421 	 */
8422 
8423 	mutex_enter(txq->txq_lock);
8424 	if (!txq->txq_stopping)
8425 		wm_nq_start_locked(ifp);
8426 	mutex_exit(txq->txq_lock);
8427 }
8428 
8429 static void
8430 wm_nq_start_locked(struct ifnet *ifp)
8431 {
8432 	struct wm_softc *sc = ifp->if_softc;
8433 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8434 
8435 	wm_nq_send_common_locked(ifp, txq, false);
8436 }
8437 
8438 static int
8439 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
8440 {
8441 	int qid;
8442 	struct wm_softc *sc = ifp->if_softc;
8443 	struct wm_txqueue *txq;
8444 
8445 	qid = wm_select_txqueue(ifp, m);
8446 	txq = &sc->sc_queue[qid].wmq_txq;
8447 
8448 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
8449 		m_freem(m);
8450 		WM_Q_EVCNT_INCR(txq, pcqdrop);
8451 		return ENOBUFS;
8452 	}
8453 
8454 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
8455 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
8456 	if (m->m_flags & M_MCAST)
8457 		if_statinc_ref(nsr, if_omcasts);
8458 	IF_STAT_PUTREF(ifp);
8459 
8460 	/*
8461 	 * The situations which this mutex_tryenter() fails at running time
8462 	 * are below two patterns.
8463 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
8464 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
8465 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
8466 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
8467 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
8468 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
8469 	 * stuck, either.
8470 	 */
8471 	if (mutex_tryenter(txq->txq_lock)) {
8472 		if (!txq->txq_stopping)
8473 			wm_nq_transmit_locked(ifp, txq);
8474 		mutex_exit(txq->txq_lock);
8475 	}
8476 
8477 	return 0;
8478 }
8479 
8480 static void
8481 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
8482 {
8483 
8484 	wm_nq_send_common_locked(ifp, txq, true);
8485 }
8486 
8487 static void
8488 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
8489     bool is_transmit)
8490 {
8491 	struct wm_softc *sc = ifp->if_softc;
8492 	struct mbuf *m0;
8493 	struct wm_txsoft *txs;
8494 	bus_dmamap_t dmamap;
8495 	int error, nexttx, lasttx = -1, seg, segs_needed;
8496 	bool do_csum, sent;
8497 	bool remap = true;
8498 
8499 	KASSERT(mutex_owned(txq->txq_lock));
8500 
8501 	if ((ifp->if_flags & IFF_RUNNING) == 0)
8502 		return;
8503 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
8504 		return;
8505 
8506 	if (__predict_false(wm_linkdown_discard(txq))) {
8507 		do {
8508 			if (is_transmit)
8509 				m0 = pcq_get(txq->txq_interq);
8510 			else
8511 				IFQ_DEQUEUE(&ifp->if_snd, m0);
8512 			/*
8513 			 * increment successed packet counter as in the case
8514 			 * which the packet is discarded by link down PHY.
8515 			 */
8516 			if (m0 != NULL)
8517 				if_statinc(ifp, if_opackets);
8518 			m_freem(m0);
8519 		} while (m0 != NULL);
8520 		return;
8521 	}
8522 
8523 	sent = false;
8524 
8525 	/*
8526 	 * Loop through the send queue, setting up transmit descriptors
8527 	 * until we drain the queue, or use up all available transmit
8528 	 * descriptors.
8529 	 */
8530 	for (;;) {
8531 		m0 = NULL;
8532 
8533 		/* Get a work queue entry. */
8534 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
8535 			wm_txeof(txq, UINT_MAX);
8536 			if (txq->txq_sfree == 0) {
8537 				DPRINTF(sc, WM_DEBUG_TX,
8538 				    ("%s: TX: no free job descriptors\n",
8539 					device_xname(sc->sc_dev)));
8540 				WM_Q_EVCNT_INCR(txq, txsstall);
8541 				break;
8542 			}
8543 		}
8544 
8545 		/* Grab a packet off the queue. */
8546 		if (is_transmit)
8547 			m0 = pcq_get(txq->txq_interq);
8548 		else
8549 			IFQ_DEQUEUE(&ifp->if_snd, m0);
8550 		if (m0 == NULL)
8551 			break;
8552 
8553 		DPRINTF(sc, WM_DEBUG_TX,
8554 		    ("%s: TX: have packet to transmit: %p\n",
8555 		    device_xname(sc->sc_dev), m0));
8556 
8557 		txs = &txq->txq_soft[txq->txq_snext];
8558 		dmamap = txs->txs_dmamap;
8559 
8560 		/*
8561 		 * Load the DMA map.  If this fails, the packet either
8562 		 * didn't fit in the allotted number of segments, or we
8563 		 * were short on resources.  For the too-many-segments
8564 		 * case, we simply report an error and drop the packet,
8565 		 * since we can't sanely copy a jumbo packet to a single
8566 		 * buffer.
8567 		 */
8568 retry:
8569 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
8570 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
8571 		if (__predict_false(error)) {
8572 			if (error == EFBIG) {
8573 				if (remap == true) {
8574 					struct mbuf *m;
8575 
8576 					remap = false;
8577 					m = m_defrag(m0, M_NOWAIT);
8578 					if (m != NULL) {
8579 						WM_Q_EVCNT_INCR(txq, defrag);
8580 						m0 = m;
8581 						goto retry;
8582 					}
8583 				}
8584 				WM_Q_EVCNT_INCR(txq, toomanyseg);
8585 				log(LOG_ERR, "%s: Tx packet consumes too many "
8586 				    "DMA segments, dropping...\n",
8587 				    device_xname(sc->sc_dev));
8588 				wm_dump_mbuf_chain(sc, m0);
8589 				m_freem(m0);
8590 				continue;
8591 			}
8592 			/* Short on resources, just stop for now. */
8593 			DPRINTF(sc, WM_DEBUG_TX,
8594 			    ("%s: TX: dmamap load failed: %d\n",
8595 				device_xname(sc->sc_dev), error));
8596 			break;
8597 		}
8598 
8599 		segs_needed = dmamap->dm_nsegs;
8600 
8601 		/*
8602 		 * Ensure we have enough descriptors free to describe
8603 		 * the packet. Note, we always reserve one descriptor
8604 		 * at the end of the ring due to the semantics of the
8605 		 * TDT register, plus one more in the event we need
8606 		 * to load offload context.
8607 		 */
8608 		if (segs_needed > txq->txq_free - 2) {
8609 			/*
8610 			 * Not enough free descriptors to transmit this
8611 			 * packet.  We haven't committed anything yet,
8612 			 * so just unload the DMA map, put the packet
8613 			 * pack on the queue, and punt. Notify the upper
8614 			 * layer that there are no more slots left.
8615 			 */
8616 			DPRINTF(sc, WM_DEBUG_TX,
8617 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
8618 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
8619 				segs_needed, txq->txq_free - 1));
8620 			txq->txq_flags |= WM_TXQ_NO_SPACE;
8621 			bus_dmamap_unload(sc->sc_dmat, dmamap);
8622 			WM_Q_EVCNT_INCR(txq, txdstall);
8623 			break;
8624 		}
8625 
8626 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
8627 
8628 		DPRINTF(sc, WM_DEBUG_TX,
8629 		    ("%s: TX: packet has %d (%d) DMA segments\n",
8630 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
8631 
8632 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
8633 
8634 		/*
8635 		 * Store a pointer to the packet so that we can free it
8636 		 * later.
8637 		 *
8638 		 * Initially, we consider the number of descriptors the
8639 		 * packet uses the number of DMA segments.  This may be
8640 		 * incremented by 1 if we do checksum offload (a descriptor
8641 		 * is used to set the checksum context).
8642 		 */
8643 		txs->txs_mbuf = m0;
8644 		txs->txs_firstdesc = txq->txq_next;
8645 		txs->txs_ndesc = segs_needed;
8646 
8647 		/* Set up offload parameters for this packet. */
8648 		uint32_t cmdlen, fields, dcmdlen;
8649 		if (m0->m_pkthdr.csum_flags &
8650 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
8651 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8652 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
8653 			wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
8654 			    &do_csum);
8655 		} else {
8656 			do_csum = false;
8657 			cmdlen = 0;
8658 			fields = 0;
8659 		}
8660 
8661 		/* Sync the DMA map. */
8662 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
8663 		    BUS_DMASYNC_PREWRITE);
8664 
8665 		/* Initialize the first transmit descriptor. */
8666 		nexttx = txq->txq_next;
8667 		if (!do_csum) {
8668 			/* Setup a legacy descriptor */
8669 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
8670 			    dmamap->dm_segs[0].ds_addr);
8671 			txq->txq_descs[nexttx].wtx_cmdlen =
8672 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
8673 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
8674 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
8675 			if (vlan_has_tag(m0)) {
8676 				txq->txq_descs[nexttx].wtx_cmdlen |=
8677 				    htole32(WTX_CMD_VLE);
8678 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
8679 				    htole16(vlan_get_tag(m0));
8680 			} else
8681 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
8682 
8683 			dcmdlen = 0;
8684 		} else {
8685 			/* Setup an advanced data descriptor */
8686 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
8687 			    htole64(dmamap->dm_segs[0].ds_addr);
8688 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
8689 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
8690 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
8691 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
8692 			    htole32(fields);
8693 			DPRINTF(sc, WM_DEBUG_TX,
8694 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
8695 				device_xname(sc->sc_dev), nexttx,
8696 				(uint64_t)dmamap->dm_segs[0].ds_addr));
8697 			DPRINTF(sc, WM_DEBUG_TX,
8698 			    ("\t 0x%08x%08x\n", fields,
8699 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
8700 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
8701 		}
8702 
8703 		lasttx = nexttx;
8704 		nexttx = WM_NEXTTX(txq, nexttx);
8705 		/*
8706 		 * Fill in the next descriptors. legacy or advanced format
8707 		 * is the same here
8708 		 */
8709 		for (seg = 1; seg < dmamap->dm_nsegs;
8710 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
8711 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
8712 			    htole64(dmamap->dm_segs[seg].ds_addr);
8713 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
8714 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
8715 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
8716 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
8717 			lasttx = nexttx;
8718 
8719 			DPRINTF(sc, WM_DEBUG_TX,
8720 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
8721 				device_xname(sc->sc_dev), nexttx,
8722 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
8723 				dmamap->dm_segs[seg].ds_len));
8724 		}
8725 
8726 		KASSERT(lasttx != -1);
8727 
8728 		/*
8729 		 * Set up the command byte on the last descriptor of
8730 		 * the packet. If we're in the interrupt delay window,
8731 		 * delay the interrupt.
8732 		 */
8733 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
8734 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
8735 		txq->txq_descs[lasttx].wtx_cmdlen |=
8736 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
8737 
8738 		txs->txs_lastdesc = lasttx;
8739 
8740 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
8741 		    device_xname(sc->sc_dev),
8742 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
8743 
8744 		/* Sync the descriptors we're using. */
8745 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
8746 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8747 
8748 		/* Give the packet to the chip. */
8749 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
8750 		sent = true;
8751 
8752 		DPRINTF(sc, WM_DEBUG_TX,
8753 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
8754 
8755 		DPRINTF(sc, WM_DEBUG_TX,
8756 		    ("%s: TX: finished transmitting packet, job %d\n",
8757 			device_xname(sc->sc_dev), txq->txq_snext));
8758 
8759 		/* Advance the tx pointer. */
8760 		txq->txq_free -= txs->txs_ndesc;
8761 		txq->txq_next = nexttx;
8762 
8763 		txq->txq_sfree--;
8764 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
8765 
8766 		/* Pass the packet to any BPF listeners. */
8767 		bpf_mtap(ifp, m0, BPF_D_OUT);
8768 	}
8769 
8770 	if (m0 != NULL) {
8771 		txq->txq_flags |= WM_TXQ_NO_SPACE;
8772 		WM_Q_EVCNT_INCR(txq, descdrop);
8773 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
8774 			__func__));
8775 		m_freem(m0);
8776 	}
8777 
8778 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
8779 		/* No more slots; notify upper layer. */
8780 		txq->txq_flags |= WM_TXQ_NO_SPACE;
8781 	}
8782 
8783 	if (sent) {
8784 		/* Set a watchdog timer in case the chip flakes out. */
8785 		txq->txq_lastsent = time_uptime;
8786 		txq->txq_sending = true;
8787 	}
8788 }
8789 
8790 static void
8791 wm_deferred_start_locked(struct wm_txqueue *txq)
8792 {
8793 	struct wm_softc *sc = txq->txq_sc;
8794 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8795 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
8796 	int qid = wmq->wmq_id;
8797 
8798 	KASSERT(mutex_owned(txq->txq_lock));
8799 
8800 	if (txq->txq_stopping) {
8801 		mutex_exit(txq->txq_lock);
8802 		return;
8803 	}
8804 
8805 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
8806 		/* XXX need for ALTQ or one CPU system */
8807 		if (qid == 0)
8808 			wm_nq_start_locked(ifp);
8809 		wm_nq_transmit_locked(ifp, txq);
8810 	} else {
8811 		/* XXX need for ALTQ or one CPU system */
8812 		if (qid == 0)
8813 			wm_start_locked(ifp);
8814 		wm_transmit_locked(ifp, txq);
8815 	}
8816 }
8817 
8818 /* Interrupt */
8819 
8820 /*
8821  * wm_txeof:
8822  *
8823  *	Helper; handle transmit interrupts.
8824  */
8825 static bool
8826 wm_txeof(struct wm_txqueue *txq, u_int limit)
8827 {
8828 	struct wm_softc *sc = txq->txq_sc;
8829 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8830 	struct wm_txsoft *txs;
8831 	int count = 0;
8832 	int i;
8833 	uint8_t status;
8834 	bool more = false;
8835 
8836 	KASSERT(mutex_owned(txq->txq_lock));
8837 
8838 	if (txq->txq_stopping)
8839 		return false;
8840 
8841 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
8842 
8843 	/*
8844 	 * Go through the Tx list and free mbufs for those
8845 	 * frames which have been transmitted.
8846 	 */
8847 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
8848 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
8849 		if (limit-- == 0) {
8850 			more = true;
8851 			DPRINTF(sc, WM_DEBUG_TX,
8852 			    ("%s: TX: loop limited, job %d is not processed\n",
8853 				device_xname(sc->sc_dev), i));
8854 			break;
8855 		}
8856 
8857 		txs = &txq->txq_soft[i];
8858 
8859 		DPRINTF(sc, WM_DEBUG_TX, ("%s: TX: checking job %d\n",
8860 			device_xname(sc->sc_dev), i));
8861 
8862 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
8863 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
8864 
8865 		status =
8866 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
8867 		if ((status & WTX_ST_DD) == 0) {
8868 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
8869 			    BUS_DMASYNC_PREREAD);
8870 			break;
8871 		}
8872 
8873 		count++;
8874 		DPRINTF(sc, WM_DEBUG_TX,
8875 		    ("%s: TX: job %d done: descs %d..%d\n",
8876 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
8877 		    txs->txs_lastdesc));
8878 
8879 		/*
8880 		 * XXX We should probably be using the statistics
8881 		 * XXX registers, but I don't know if they exist
8882 		 * XXX on chips before the i82544.
8883 		 */
8884 
8885 #ifdef WM_EVENT_COUNTERS
8886 		if (status & WTX_ST_TU)
8887 			WM_Q_EVCNT_INCR(txq, underrun);
8888 #endif /* WM_EVENT_COUNTERS */
8889 
8890 		/*
8891 		 * 82574 and newer's document says the status field has neither
8892 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
8893 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
8894 		 * Developer's Manual", 82574 datasheet and newer.
8895 		 *
8896 		 * XXX I saw the LC bit was set on I218 even though the media
8897 		 * was full duplex, so the bit might be used for other
8898 		 * meaning ...(I have no document).
8899 		 */
8900 
8901 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
8902 		    && ((sc->sc_type < WM_T_82574)
8903 			|| (sc->sc_type == WM_T_80003))) {
8904 			if_statinc(ifp, if_oerrors);
8905 			if (status & WTX_ST_LC)
8906 				log(LOG_WARNING, "%s: late collision\n",
8907 				    device_xname(sc->sc_dev));
8908 			else if (status & WTX_ST_EC) {
8909 				if_statadd(ifp, if_collisions,
8910 				    TX_COLLISION_THRESHOLD + 1);
8911 				log(LOG_WARNING, "%s: excessive collisions\n",
8912 				    device_xname(sc->sc_dev));
8913 			}
8914 		} else
8915 			if_statinc(ifp, if_opackets);
8916 
8917 		txq->txq_packets++;
8918 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
8919 
8920 		txq->txq_free += txs->txs_ndesc;
8921 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
8922 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
8923 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
8924 		m_freem(txs->txs_mbuf);
8925 		txs->txs_mbuf = NULL;
8926 	}
8927 
8928 	/* Update the dirty transmit buffer pointer. */
8929 	txq->txq_sdirty = i;
8930 	DPRINTF(sc, WM_DEBUG_TX,
8931 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
8932 
8933 	if (count != 0)
8934 		rnd_add_uint32(&sc->rnd_source, count);
8935 
8936 	/*
8937 	 * If there are no more pending transmissions, cancel the watchdog
8938 	 * timer.
8939 	 */
8940 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
8941 		txq->txq_sending = false;
8942 
8943 	return more;
8944 }
8945 
8946 static inline uint32_t
8947 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
8948 {
8949 	struct wm_softc *sc = rxq->rxq_sc;
8950 
8951 	if (sc->sc_type == WM_T_82574)
8952 		return EXTRXC_STATUS(
8953 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
8954 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8955 		return NQRXC_STATUS(
8956 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
8957 	else
8958 		return rxq->rxq_descs[idx].wrx_status;
8959 }
8960 
8961 static inline uint32_t
8962 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
8963 {
8964 	struct wm_softc *sc = rxq->rxq_sc;
8965 
8966 	if (sc->sc_type == WM_T_82574)
8967 		return EXTRXC_ERROR(
8968 		    le32toh(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat));
8969 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8970 		return NQRXC_ERROR(
8971 		    le32toh(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat));
8972 	else
8973 		return rxq->rxq_descs[idx].wrx_errors;
8974 }
8975 
8976 static inline uint16_t
8977 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
8978 {
8979 	struct wm_softc *sc = rxq->rxq_sc;
8980 
8981 	if (sc->sc_type == WM_T_82574)
8982 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
8983 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8984 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
8985 	else
8986 		return rxq->rxq_descs[idx].wrx_special;
8987 }
8988 
8989 static inline int
8990 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
8991 {
8992 	struct wm_softc *sc = rxq->rxq_sc;
8993 
8994 	if (sc->sc_type == WM_T_82574)
8995 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
8996 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8997 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
8998 	else
8999 		return rxq->rxq_descs[idx].wrx_len;
9000 }
9001 
9002 #ifdef WM_DEBUG
9003 static inline uint32_t
9004 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
9005 {
9006 	struct wm_softc *sc = rxq->rxq_sc;
9007 
9008 	if (sc->sc_type == WM_T_82574)
9009 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
9010 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9011 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
9012 	else
9013 		return 0;
9014 }
9015 
9016 static inline uint8_t
9017 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
9018 {
9019 	struct wm_softc *sc = rxq->rxq_sc;
9020 
9021 	if (sc->sc_type == WM_T_82574)
9022 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
9023 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9024 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
9025 	else
9026 		return 0;
9027 }
9028 #endif /* WM_DEBUG */
9029 
9030 static inline bool
9031 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
9032     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
9033 {
9034 
9035 	if (sc->sc_type == WM_T_82574)
9036 		return (status & ext_bit) != 0;
9037 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9038 		return (status & nq_bit) != 0;
9039 	else
9040 		return (status & legacy_bit) != 0;
9041 }
9042 
9043 static inline bool
9044 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
9045     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
9046 {
9047 
9048 	if (sc->sc_type == WM_T_82574)
9049 		return (error & ext_bit) != 0;
9050 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
9051 		return (error & nq_bit) != 0;
9052 	else
9053 		return (error & legacy_bit) != 0;
9054 }
9055 
9056 static inline bool
9057 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
9058 {
9059 
9060 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
9061 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
9062 		return true;
9063 	else
9064 		return false;
9065 }
9066 
9067 static inline bool
9068 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
9069 {
9070 	struct wm_softc *sc = rxq->rxq_sc;
9071 
9072 	/* XXX missing error bit for newqueue? */
9073 	if (wm_rxdesc_is_set_error(sc, errors,
9074 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
9075 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
9076 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
9077 		NQRXC_ERROR_RXE)) {
9078 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
9079 		    EXTRXC_ERROR_SE, 0))
9080 			log(LOG_WARNING, "%s: symbol error\n",
9081 			    device_xname(sc->sc_dev));
9082 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
9083 		    EXTRXC_ERROR_SEQ, 0))
9084 			log(LOG_WARNING, "%s: receive sequence error\n",
9085 			    device_xname(sc->sc_dev));
9086 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
9087 		    EXTRXC_ERROR_CE, 0))
9088 			log(LOG_WARNING, "%s: CRC error\n",
9089 			    device_xname(sc->sc_dev));
9090 		return true;
9091 	}
9092 
9093 	return false;
9094 }
9095 
9096 static inline bool
9097 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
9098 {
9099 	struct wm_softc *sc = rxq->rxq_sc;
9100 
9101 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
9102 		NQRXC_STATUS_DD)) {
9103 		/* We have processed all of the receive descriptors. */
9104 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
9105 		return false;
9106 	}
9107 
9108 	return true;
9109 }
9110 
9111 static inline bool
9112 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
9113     uint16_t vlantag, struct mbuf *m)
9114 {
9115 
9116 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
9117 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
9118 		vlan_set_tag(m, le16toh(vlantag));
9119 	}
9120 
9121 	return true;
9122 }
9123 
9124 static inline void
9125 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
9126     uint32_t errors, struct mbuf *m)
9127 {
9128 	struct wm_softc *sc = rxq->rxq_sc;
9129 
9130 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
9131 		if (wm_rxdesc_is_set_status(sc, status,
9132 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
9133 			WM_Q_EVCNT_INCR(rxq, ipsum);
9134 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
9135 			if (wm_rxdesc_is_set_error(sc, errors,
9136 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
9137 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
9138 		}
9139 		if (wm_rxdesc_is_set_status(sc, status,
9140 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
9141 			/*
9142 			 * Note: we don't know if this was TCP or UDP,
9143 			 * so we just set both bits, and expect the
9144 			 * upper layers to deal.
9145 			 */
9146 			WM_Q_EVCNT_INCR(rxq, tusum);
9147 			m->m_pkthdr.csum_flags |=
9148 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
9149 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
9150 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
9151 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
9152 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
9153 		}
9154 	}
9155 }
9156 
9157 /*
9158  * wm_rxeof:
9159  *
9160  *	Helper; handle receive interrupts.
9161  */
9162 static bool
9163 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
9164 {
9165 	struct wm_softc *sc = rxq->rxq_sc;
9166 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9167 	struct wm_rxsoft *rxs;
9168 	struct mbuf *m;
9169 	int i, len;
9170 	int count = 0;
9171 	uint32_t status, errors;
9172 	uint16_t vlantag;
9173 	bool more = false;
9174 
9175 	KASSERT(mutex_owned(rxq->rxq_lock));
9176 
9177 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
9178 		if (limit-- == 0) {
9179 			more = true;
9180 			DPRINTF(sc, WM_DEBUG_RX,
9181 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
9182 				device_xname(sc->sc_dev), i));
9183 			break;
9184 		}
9185 
9186 		rxs = &rxq->rxq_soft[i];
9187 
9188 		DPRINTF(sc, WM_DEBUG_RX,
9189 		    ("%s: RX: checking descriptor %d\n",
9190 			device_xname(sc->sc_dev), i));
9191 		wm_cdrxsync(rxq, i,
9192 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
9193 
9194 		status = wm_rxdesc_get_status(rxq, i);
9195 		errors = wm_rxdesc_get_errors(rxq, i);
9196 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
9197 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
9198 #ifdef WM_DEBUG
9199 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
9200 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
9201 #endif
9202 
9203 		if (!wm_rxdesc_dd(rxq, i, status)) {
9204 			break;
9205 		}
9206 
9207 		count++;
9208 		if (__predict_false(rxq->rxq_discard)) {
9209 			DPRINTF(sc, WM_DEBUG_RX,
9210 			    ("%s: RX: discarding contents of descriptor %d\n",
9211 				device_xname(sc->sc_dev), i));
9212 			wm_init_rxdesc(rxq, i);
9213 			if (wm_rxdesc_is_eop(rxq, status)) {
9214 				/* Reset our state. */
9215 				DPRINTF(sc, WM_DEBUG_RX,
9216 				    ("%s: RX: resetting rxdiscard -> 0\n",
9217 					device_xname(sc->sc_dev)));
9218 				rxq->rxq_discard = 0;
9219 			}
9220 			continue;
9221 		}
9222 
9223 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
9224 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
9225 
9226 		m = rxs->rxs_mbuf;
9227 
9228 		/*
9229 		 * Add a new receive buffer to the ring, unless of
9230 		 * course the length is zero. Treat the latter as a
9231 		 * failed mapping.
9232 		 */
9233 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
9234 			/*
9235 			 * Failed, throw away what we've done so
9236 			 * far, and discard the rest of the packet.
9237 			 */
9238 			if_statinc(ifp, if_ierrors);
9239 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
9240 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
9241 			wm_init_rxdesc(rxq, i);
9242 			if (!wm_rxdesc_is_eop(rxq, status))
9243 				rxq->rxq_discard = 1;
9244 			if (rxq->rxq_head != NULL)
9245 				m_freem(rxq->rxq_head);
9246 			WM_RXCHAIN_RESET(rxq);
9247 			DPRINTF(sc, WM_DEBUG_RX,
9248 			    ("%s: RX: Rx buffer allocation failed, "
9249 			    "dropping packet%s\n", device_xname(sc->sc_dev),
9250 				rxq->rxq_discard ? " (discard)" : ""));
9251 			continue;
9252 		}
9253 
9254 		m->m_len = len;
9255 		rxq->rxq_len += len;
9256 		DPRINTF(sc, WM_DEBUG_RX,
9257 		    ("%s: RX: buffer at %p len %d\n",
9258 			device_xname(sc->sc_dev), m->m_data, len));
9259 
9260 		/* If this is not the end of the packet, keep looking. */
9261 		if (!wm_rxdesc_is_eop(rxq, status)) {
9262 			WM_RXCHAIN_LINK(rxq, m);
9263 			DPRINTF(sc, WM_DEBUG_RX,
9264 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
9265 				device_xname(sc->sc_dev), rxq->rxq_len));
9266 			continue;
9267 		}
9268 
9269 		/*
9270 		 * Okay, we have the entire packet now. The chip is
9271 		 * configured to include the FCS except I35[04], I21[01].
9272 		 * (not all chips can be configured to strip it), so we need
9273 		 * to trim it. Those chips have an eratta, the RCTL_SECRC bit
9274 		 * in RCTL register is always set, so we don't trim it.
9275 		 * PCH2 and newer chip also not include FCS when jumbo
9276 		 * frame is used to do workaround an errata.
9277 		 * May need to adjust length of previous mbuf in the
9278 		 * chain if the current mbuf is too short.
9279 		 */
9280 		if ((sc->sc_flags & WM_F_CRC_STRIP) == 0) {
9281 			if (m->m_len < ETHER_CRC_LEN) {
9282 				rxq->rxq_tail->m_len
9283 				    -= (ETHER_CRC_LEN - m->m_len);
9284 				m->m_len = 0;
9285 			} else
9286 				m->m_len -= ETHER_CRC_LEN;
9287 			len = rxq->rxq_len - ETHER_CRC_LEN;
9288 		} else
9289 			len = rxq->rxq_len;
9290 
9291 		WM_RXCHAIN_LINK(rxq, m);
9292 
9293 		*rxq->rxq_tailp = NULL;
9294 		m = rxq->rxq_head;
9295 
9296 		WM_RXCHAIN_RESET(rxq);
9297 
9298 		DPRINTF(sc, WM_DEBUG_RX,
9299 		    ("%s: RX: have entire packet, len -> %d\n",
9300 			device_xname(sc->sc_dev), len));
9301 
9302 		/* If an error occurred, update stats and drop the packet. */
9303 		if (wm_rxdesc_has_errors(rxq, errors)) {
9304 			m_freem(m);
9305 			continue;
9306 		}
9307 
9308 		/* No errors.  Receive the packet. */
9309 		m_set_rcvif(m, ifp);
9310 		m->m_pkthdr.len = len;
9311 		/*
9312 		 * TODO
9313 		 * should be save rsshash and rsstype to this mbuf.
9314 		 */
9315 		DPRINTF(sc, WM_DEBUG_RX,
9316 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
9317 			device_xname(sc->sc_dev), rsstype, rsshash));
9318 
9319 		/*
9320 		 * If VLANs are enabled, VLAN packets have been unwrapped
9321 		 * for us.  Associate the tag with the packet.
9322 		 */
9323 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
9324 			continue;
9325 
9326 		/* Set up checksum info for this packet. */
9327 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
9328 
9329 		rxq->rxq_packets++;
9330 		rxq->rxq_bytes += len;
9331 		/* Pass it on. */
9332 		if_percpuq_enqueue(sc->sc_ipq, m);
9333 
9334 		if (rxq->rxq_stopping)
9335 			break;
9336 	}
9337 	rxq->rxq_ptr = i;
9338 
9339 	if (count != 0)
9340 		rnd_add_uint32(&sc->rnd_source, count);
9341 
9342 	DPRINTF(sc, WM_DEBUG_RX,
9343 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
9344 
9345 	return more;
9346 }
9347 
9348 /*
9349  * wm_linkintr_gmii:
9350  *
9351  *	Helper; handle link interrupts for GMII.
9352  */
9353 static void
9354 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
9355 {
9356 	device_t dev = sc->sc_dev;
9357 	uint32_t status, reg;
9358 	bool link;
9359 	int rv;
9360 
9361 	KASSERT(WM_CORE_LOCKED(sc));
9362 
9363 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
9364 		__func__));
9365 
9366 	if ((icr & ICR_LSC) == 0) {
9367 		if (icr & ICR_RXSEQ)
9368 			DPRINTF(sc, WM_DEBUG_LINK,
9369 			    ("%s: LINK Receive sequence error\n",
9370 				device_xname(dev)));
9371 		return;
9372 	}
9373 
9374 	/* Link status changed */
9375 	status = CSR_READ(sc, WMREG_STATUS);
9376 	link = status & STATUS_LU;
9377 	if (link) {
9378 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
9379 			device_xname(dev),
9380 			(status & STATUS_FD) ? "FDX" : "HDX"));
9381 		if (wm_phy_need_linkdown_discard(sc))
9382 			wm_clear_linkdown_discard(sc);
9383 	} else {
9384 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9385 			device_xname(dev)));
9386 		if (wm_phy_need_linkdown_discard(sc))
9387 			wm_set_linkdown_discard(sc);
9388 	}
9389 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
9390 		wm_gig_downshift_workaround_ich8lan(sc);
9391 
9392 	if ((sc->sc_type == WM_T_ICH8)
9393 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
9394 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
9395 	}
9396 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
9397 		device_xname(dev)));
9398 	mii_pollstat(&sc->sc_mii);
9399 	if (sc->sc_type == WM_T_82543) {
9400 		int miistatus, active;
9401 
9402 		/*
9403 		 * With 82543, we need to force speed and
9404 		 * duplex on the MAC equal to what the PHY
9405 		 * speed and duplex configuration is.
9406 		 */
9407 		miistatus = sc->sc_mii.mii_media_status;
9408 
9409 		if (miistatus & IFM_ACTIVE) {
9410 			active = sc->sc_mii.mii_media_active;
9411 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
9412 			switch (IFM_SUBTYPE(active)) {
9413 			case IFM_10_T:
9414 				sc->sc_ctrl |= CTRL_SPEED_10;
9415 				break;
9416 			case IFM_100_TX:
9417 				sc->sc_ctrl |= CTRL_SPEED_100;
9418 				break;
9419 			case IFM_1000_T:
9420 				sc->sc_ctrl |= CTRL_SPEED_1000;
9421 				break;
9422 			default:
9423 				/*
9424 				 * Fiber?
9425 				 * Shoud not enter here.
9426 				 */
9427 				device_printf(dev, "unknown media (%x)\n",
9428 				    active);
9429 				break;
9430 			}
9431 			if (active & IFM_FDX)
9432 				sc->sc_ctrl |= CTRL_FD;
9433 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9434 		}
9435 	} else if (sc->sc_type == WM_T_PCH) {
9436 		wm_k1_gig_workaround_hv(sc,
9437 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
9438 	}
9439 
9440 	/*
9441 	 * When connected at 10Mbps half-duplex, some parts are excessively
9442 	 * aggressive resulting in many collisions. To avoid this, increase
9443 	 * the IPG and reduce Rx latency in the PHY.
9444 	 */
9445 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
9446 	    && link) {
9447 		uint32_t tipg_reg;
9448 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
9449 		bool fdx;
9450 		uint16_t emi_addr, emi_val;
9451 
9452 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
9453 		tipg_reg &= ~TIPG_IPGT_MASK;
9454 		fdx = status & STATUS_FD;
9455 
9456 		if (!fdx && (speed == STATUS_SPEED_10)) {
9457 			tipg_reg |= 0xff;
9458 			/* Reduce Rx latency in analog PHY */
9459 			emi_val = 0;
9460 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
9461 		    fdx && speed != STATUS_SPEED_1000) {
9462 			tipg_reg |= 0xc;
9463 			emi_val = 1;
9464 		} else {
9465 			/* Roll back the default values */
9466 			tipg_reg |= 0x08;
9467 			emi_val = 1;
9468 		}
9469 
9470 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
9471 
9472 		rv = sc->phy.acquire(sc);
9473 		if (rv)
9474 			return;
9475 
9476 		if (sc->sc_type == WM_T_PCH2)
9477 			emi_addr = I82579_RX_CONFIG;
9478 		else
9479 			emi_addr = I217_RX_CONFIG;
9480 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
9481 
9482 		if (sc->sc_type >= WM_T_PCH_LPT) {
9483 			uint16_t phy_reg;
9484 
9485 			sc->phy.readreg_locked(dev, 2,
9486 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
9487 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
9488 			if (speed == STATUS_SPEED_100
9489 			    || speed == STATUS_SPEED_10)
9490 				phy_reg |= 0x3e8;
9491 			else
9492 				phy_reg |= 0xfa;
9493 			sc->phy.writereg_locked(dev, 2,
9494 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
9495 
9496 			if (speed == STATUS_SPEED_1000) {
9497 				sc->phy.readreg_locked(dev, 2,
9498 				    HV_PM_CTRL, &phy_reg);
9499 
9500 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
9501 
9502 				sc->phy.writereg_locked(dev, 2,
9503 				    HV_PM_CTRL, phy_reg);
9504 			}
9505 		}
9506 		sc->phy.release(sc);
9507 
9508 		if (rv)
9509 			return;
9510 
9511 		if (sc->sc_type >= WM_T_PCH_SPT) {
9512 			uint16_t data, ptr_gap;
9513 
9514 			if (speed == STATUS_SPEED_1000) {
9515 				rv = sc->phy.acquire(sc);
9516 				if (rv)
9517 					return;
9518 
9519 				rv = sc->phy.readreg_locked(dev, 2,
9520 				    I82579_UNKNOWN1, &data);
9521 				if (rv) {
9522 					sc->phy.release(sc);
9523 					return;
9524 				}
9525 
9526 				ptr_gap = (data & (0x3ff << 2)) >> 2;
9527 				if (ptr_gap < 0x18) {
9528 					data &= ~(0x3ff << 2);
9529 					data |= (0x18 << 2);
9530 					rv = sc->phy.writereg_locked(dev,
9531 					    2, I82579_UNKNOWN1, data);
9532 				}
9533 				sc->phy.release(sc);
9534 				if (rv)
9535 					return;
9536 			} else {
9537 				rv = sc->phy.acquire(sc);
9538 				if (rv)
9539 					return;
9540 
9541 				rv = sc->phy.writereg_locked(dev, 2,
9542 				    I82579_UNKNOWN1, 0xc023);
9543 				sc->phy.release(sc);
9544 				if (rv)
9545 					return;
9546 
9547 			}
9548 		}
9549 	}
9550 
9551 	/*
9552 	 * I217 Packet Loss issue:
9553 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
9554 	 * on power up.
9555 	 * Set the Beacon Duration for I217 to 8 usec
9556 	 */
9557 	if (sc->sc_type >= WM_T_PCH_LPT) {
9558 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
9559 		reg &= ~FEXTNVM4_BEACON_DURATION;
9560 		reg |= FEXTNVM4_BEACON_DURATION_8US;
9561 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
9562 	}
9563 
9564 	/* Work-around I218 hang issue */
9565 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
9566 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
9567 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
9568 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
9569 		wm_k1_workaround_lpt_lp(sc, link);
9570 
9571 	if (sc->sc_type >= WM_T_PCH_LPT) {
9572 		/*
9573 		 * Set platform power management values for Latency
9574 		 * Tolerance Reporting (LTR)
9575 		 */
9576 		wm_platform_pm_pch_lpt(sc,
9577 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
9578 	}
9579 
9580 	/* Clear link partner's EEE ability */
9581 	sc->eee_lp_ability = 0;
9582 
9583 	/* FEXTNVM6 K1-off workaround */
9584 	if (sc->sc_type == WM_T_PCH_SPT) {
9585 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
9586 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
9587 			reg |= FEXTNVM6_K1_OFF_ENABLE;
9588 		else
9589 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
9590 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
9591 	}
9592 
9593 	if (!link)
9594 		return;
9595 
9596 	switch (sc->sc_type) {
9597 	case WM_T_PCH2:
9598 		wm_k1_workaround_lv(sc);
9599 		/* FALLTHROUGH */
9600 	case WM_T_PCH:
9601 		if (sc->sc_phytype == WMPHY_82578)
9602 			wm_link_stall_workaround_hv(sc);
9603 		break;
9604 	default:
9605 		break;
9606 	}
9607 
9608 	/* Enable/Disable EEE after link up */
9609 	if (sc->sc_phytype > WMPHY_82579)
9610 		wm_set_eee_pchlan(sc);
9611 }
9612 
9613 /*
9614  * wm_linkintr_tbi:
9615  *
9616  *	Helper; handle link interrupts for TBI mode.
9617  */
9618 static void
9619 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
9620 {
9621 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9622 	uint32_t status;
9623 
9624 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
9625 		__func__));
9626 
9627 	status = CSR_READ(sc, WMREG_STATUS);
9628 	if (icr & ICR_LSC) {
9629 		wm_check_for_link(sc);
9630 		if (status & STATUS_LU) {
9631 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
9632 				device_xname(sc->sc_dev),
9633 				(status & STATUS_FD) ? "FDX" : "HDX"));
9634 			/*
9635 			 * NOTE: CTRL will update TFCE and RFCE automatically,
9636 			 * so we should update sc->sc_ctrl
9637 			 */
9638 
9639 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9640 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9641 			sc->sc_fcrtl &= ~FCRTL_XONE;
9642 			if (status & STATUS_FD)
9643 				sc->sc_tctl |=
9644 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9645 			else
9646 				sc->sc_tctl |=
9647 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9648 			if (sc->sc_ctrl & CTRL_TFCE)
9649 				sc->sc_fcrtl |= FCRTL_XONE;
9650 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9651 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9652 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
9653 			sc->sc_tbi_linkup = 1;
9654 			if_link_state_change(ifp, LINK_STATE_UP);
9655 		} else {
9656 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9657 				device_xname(sc->sc_dev)));
9658 			sc->sc_tbi_linkup = 0;
9659 			if_link_state_change(ifp, LINK_STATE_DOWN);
9660 		}
9661 		/* Update LED */
9662 		wm_tbi_serdes_set_linkled(sc);
9663 	} else if (icr & ICR_RXSEQ)
9664 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
9665 			device_xname(sc->sc_dev)));
9666 }
9667 
9668 /*
9669  * wm_linkintr_serdes:
9670  *
9671  *	Helper; handle link interrupts for TBI mode.
9672  */
9673 static void
9674 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
9675 {
9676 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9677 	struct mii_data *mii = &sc->sc_mii;
9678 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9679 	uint32_t pcs_adv, pcs_lpab, reg;
9680 
9681 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
9682 		__func__));
9683 
9684 	if (icr & ICR_LSC) {
9685 		/* Check PCS */
9686 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
9687 		if ((reg & PCS_LSTS_LINKOK) != 0) {
9688 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
9689 				device_xname(sc->sc_dev)));
9690 			mii->mii_media_status |= IFM_ACTIVE;
9691 			sc->sc_tbi_linkup = 1;
9692 			if_link_state_change(ifp, LINK_STATE_UP);
9693 		} else {
9694 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
9695 				device_xname(sc->sc_dev)));
9696 			mii->mii_media_status |= IFM_NONE;
9697 			sc->sc_tbi_linkup = 0;
9698 			if_link_state_change(ifp, LINK_STATE_DOWN);
9699 			wm_tbi_serdes_set_linkled(sc);
9700 			return;
9701 		}
9702 		mii->mii_media_active |= IFM_1000_SX;
9703 		if ((reg & PCS_LSTS_FDX) != 0)
9704 			mii->mii_media_active |= IFM_FDX;
9705 		else
9706 			mii->mii_media_active |= IFM_HDX;
9707 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
9708 			/* Check flow */
9709 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
9710 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
9711 				DPRINTF(sc, WM_DEBUG_LINK,
9712 				    ("XXX LINKOK but not ACOMP\n"));
9713 				return;
9714 			}
9715 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
9716 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
9717 			DPRINTF(sc, WM_DEBUG_LINK,
9718 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
9719 			if ((pcs_adv & TXCW_SYM_PAUSE)
9720 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
9721 				mii->mii_media_active |= IFM_FLOW
9722 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
9723 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
9724 			    && (pcs_adv & TXCW_ASYM_PAUSE)
9725 			    && (pcs_lpab & TXCW_SYM_PAUSE)
9726 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
9727 				mii->mii_media_active |= IFM_FLOW
9728 				    | IFM_ETH_TXPAUSE;
9729 			else if ((pcs_adv & TXCW_SYM_PAUSE)
9730 			    && (pcs_adv & TXCW_ASYM_PAUSE)
9731 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
9732 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
9733 				mii->mii_media_active |= IFM_FLOW
9734 				    | IFM_ETH_RXPAUSE;
9735 		}
9736 		/* Update LED */
9737 		wm_tbi_serdes_set_linkled(sc);
9738 	} else
9739 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
9740 		    device_xname(sc->sc_dev)));
9741 }
9742 
9743 /*
9744  * wm_linkintr:
9745  *
9746  *	Helper; handle link interrupts.
9747  */
9748 static void
9749 wm_linkintr(struct wm_softc *sc, uint32_t icr)
9750 {
9751 
9752 	KASSERT(WM_CORE_LOCKED(sc));
9753 
9754 	if (sc->sc_flags & WM_F_HAS_MII)
9755 		wm_linkintr_gmii(sc, icr);
9756 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9757 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
9758 		wm_linkintr_serdes(sc, icr);
9759 	else
9760 		wm_linkintr_tbi(sc, icr);
9761 }
9762 
9763 
9764 static inline void
9765 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
9766 {
9767 
9768 	if (wmq->wmq_txrx_use_workqueue)
9769 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
9770 	else
9771 		softint_schedule(wmq->wmq_si);
9772 }
9773 
9774 /*
9775  * wm_intr_legacy:
9776  *
9777  *	Interrupt service routine for INTx and MSI.
9778  */
9779 static int
9780 wm_intr_legacy(void *arg)
9781 {
9782 	struct wm_softc *sc = arg;
9783 	struct wm_queue *wmq = &sc->sc_queue[0];
9784 	struct wm_txqueue *txq = &wmq->wmq_txq;
9785 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
9786 	uint32_t icr, rndval = 0;
9787 	int handled = 0;
9788 
9789 	while (1 /* CONSTCOND */) {
9790 		icr = CSR_READ(sc, WMREG_ICR);
9791 		if ((icr & sc->sc_icr) == 0)
9792 			break;
9793 		if (handled == 0)
9794 			DPRINTF(sc, WM_DEBUG_TX,
9795 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
9796 		if (rndval == 0)
9797 			rndval = icr;
9798 
9799 		mutex_enter(rxq->rxq_lock);
9800 
9801 		if (rxq->rxq_stopping) {
9802 			mutex_exit(rxq->rxq_lock);
9803 			break;
9804 		}
9805 
9806 		handled = 1;
9807 
9808 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
9809 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
9810 			DPRINTF(sc, WM_DEBUG_RX,
9811 			    ("%s: RX: got Rx intr 0x%08x\n",
9812 				device_xname(sc->sc_dev),
9813 				icr & (ICR_RXDMT0 | ICR_RXT0)));
9814 			WM_Q_EVCNT_INCR(rxq, intr);
9815 		}
9816 #endif
9817 		/*
9818 		 * wm_rxeof() does *not* call upper layer functions directly,
9819 		 * as if_percpuq_enqueue() just call softint_schedule().
9820 		 * So, we can call wm_rxeof() in interrupt context.
9821 		 */
9822 		wm_rxeof(rxq, UINT_MAX);
9823 
9824 		mutex_exit(rxq->rxq_lock);
9825 		mutex_enter(txq->txq_lock);
9826 
9827 		if (txq->txq_stopping) {
9828 			mutex_exit(txq->txq_lock);
9829 			break;
9830 		}
9831 
9832 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
9833 		if (icr & ICR_TXDW) {
9834 			DPRINTF(sc, WM_DEBUG_TX,
9835 			    ("%s: TX: got TXDW interrupt\n",
9836 				device_xname(sc->sc_dev)));
9837 			WM_Q_EVCNT_INCR(txq, txdw);
9838 		}
9839 #endif
9840 		wm_txeof(txq, UINT_MAX);
9841 
9842 		mutex_exit(txq->txq_lock);
9843 		WM_CORE_LOCK(sc);
9844 
9845 		if (sc->sc_core_stopping) {
9846 			WM_CORE_UNLOCK(sc);
9847 			break;
9848 		}
9849 
9850 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
9851 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
9852 			wm_linkintr(sc, icr);
9853 		}
9854 		if ((icr & ICR_GPI(0)) != 0)
9855 			device_printf(sc->sc_dev, "got module interrupt\n");
9856 
9857 		WM_CORE_UNLOCK(sc);
9858 
9859 		if (icr & ICR_RXO) {
9860 #if defined(WM_DEBUG)
9861 			log(LOG_WARNING, "%s: Receive overrun\n",
9862 			    device_xname(sc->sc_dev));
9863 #endif /* defined(WM_DEBUG) */
9864 		}
9865 	}
9866 
9867 	rnd_add_uint32(&sc->rnd_source, rndval);
9868 
9869 	if (handled) {
9870 		/* Try to get more packets going. */
9871 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
9872 		wm_sched_handle_queue(sc, wmq);
9873 	}
9874 
9875 	return handled;
9876 }
9877 
9878 static inline void
9879 wm_txrxintr_disable(struct wm_queue *wmq)
9880 {
9881 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
9882 
9883 	if (sc->sc_type == WM_T_82574)
9884 		CSR_WRITE(sc, WMREG_IMC,
9885 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
9886 	else if (sc->sc_type == WM_T_82575)
9887 		CSR_WRITE(sc, WMREG_EIMC,
9888 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
9889 	else
9890 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
9891 }
9892 
9893 static inline void
9894 wm_txrxintr_enable(struct wm_queue *wmq)
9895 {
9896 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
9897 
9898 	wm_itrs_calculate(sc, wmq);
9899 
9900 	/*
9901 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
9902 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
9903 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
9904 	 * while each wm_handle_queue(wmq) is runnig.
9905 	 */
9906 	if (sc->sc_type == WM_T_82574)
9907 		CSR_WRITE(sc, WMREG_IMS,
9908 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
9909 	else if (sc->sc_type == WM_T_82575)
9910 		CSR_WRITE(sc, WMREG_EIMS,
9911 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
9912 	else
9913 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
9914 }
9915 
9916 static int
9917 wm_txrxintr_msix(void *arg)
9918 {
9919 	struct wm_queue *wmq = arg;
9920 	struct wm_txqueue *txq = &wmq->wmq_txq;
9921 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
9922 	struct wm_softc *sc = txq->txq_sc;
9923 	u_int txlimit = sc->sc_tx_intr_process_limit;
9924 	u_int rxlimit = sc->sc_rx_intr_process_limit;
9925 	bool txmore;
9926 	bool rxmore;
9927 
9928 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
9929 
9930 	DPRINTF(sc, WM_DEBUG_TX,
9931 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
9932 
9933 	wm_txrxintr_disable(wmq);
9934 
9935 	mutex_enter(txq->txq_lock);
9936 
9937 	if (txq->txq_stopping) {
9938 		mutex_exit(txq->txq_lock);
9939 		return 0;
9940 	}
9941 
9942 	WM_Q_EVCNT_INCR(txq, txdw);
9943 	txmore = wm_txeof(txq, txlimit);
9944 	/* wm_deferred start() is done in wm_handle_queue(). */
9945 	mutex_exit(txq->txq_lock);
9946 
9947 	DPRINTF(sc, WM_DEBUG_RX,
9948 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
9949 	mutex_enter(rxq->rxq_lock);
9950 
9951 	if (rxq->rxq_stopping) {
9952 		mutex_exit(rxq->rxq_lock);
9953 		return 0;
9954 	}
9955 
9956 	WM_Q_EVCNT_INCR(rxq, intr);
9957 	rxmore = wm_rxeof(rxq, rxlimit);
9958 	mutex_exit(rxq->rxq_lock);
9959 
9960 	wm_itrs_writereg(sc, wmq);
9961 
9962 	if (txmore || rxmore) {
9963 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
9964 		wm_sched_handle_queue(sc, wmq);
9965 	} else
9966 		wm_txrxintr_enable(wmq);
9967 
9968 	return 1;
9969 }
9970 
9971 static void
9972 wm_handle_queue(void *arg)
9973 {
9974 	struct wm_queue *wmq = arg;
9975 	struct wm_txqueue *txq = &wmq->wmq_txq;
9976 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
9977 	struct wm_softc *sc = txq->txq_sc;
9978 	u_int txlimit = sc->sc_tx_process_limit;
9979 	u_int rxlimit = sc->sc_rx_process_limit;
9980 	bool txmore;
9981 	bool rxmore;
9982 
9983 	mutex_enter(txq->txq_lock);
9984 	if (txq->txq_stopping) {
9985 		mutex_exit(txq->txq_lock);
9986 		return;
9987 	}
9988 	txmore = wm_txeof(txq, txlimit);
9989 	wm_deferred_start_locked(txq);
9990 	mutex_exit(txq->txq_lock);
9991 
9992 	mutex_enter(rxq->rxq_lock);
9993 	if (rxq->rxq_stopping) {
9994 		mutex_exit(rxq->rxq_lock);
9995 		return;
9996 	}
9997 	WM_Q_EVCNT_INCR(rxq, defer);
9998 	rxmore = wm_rxeof(rxq, rxlimit);
9999 	mutex_exit(rxq->rxq_lock);
10000 
10001 	if (txmore || rxmore) {
10002 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
10003 		wm_sched_handle_queue(sc, wmq);
10004 	} else
10005 		wm_txrxintr_enable(wmq);
10006 }
10007 
10008 static void
10009 wm_handle_queue_work(struct work *wk, void *context)
10010 {
10011 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
10012 
10013 	/*
10014 	 * "enqueued flag" is not required here.
10015 	 */
10016 	wm_handle_queue(wmq);
10017 }
10018 
10019 /*
10020  * wm_linkintr_msix:
10021  *
10022  *	Interrupt service routine for link status change for MSI-X.
10023  */
10024 static int
10025 wm_linkintr_msix(void *arg)
10026 {
10027 	struct wm_softc *sc = arg;
10028 	uint32_t reg;
10029 	bool has_rxo;
10030 
10031 	reg = CSR_READ(sc, WMREG_ICR);
10032 	WM_CORE_LOCK(sc);
10033 	DPRINTF(sc, WM_DEBUG_LINK,
10034 	    ("%s: LINK: got link intr. ICR = %08x\n",
10035 		device_xname(sc->sc_dev), reg));
10036 
10037 	if (sc->sc_core_stopping)
10038 		goto out;
10039 
10040 	if ((reg & ICR_LSC) != 0) {
10041 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
10042 		wm_linkintr(sc, ICR_LSC);
10043 	}
10044 	if ((reg & ICR_GPI(0)) != 0)
10045 		device_printf(sc->sc_dev, "got module interrupt\n");
10046 
10047 	/*
10048 	 * XXX 82574 MSI-X mode workaround
10049 	 *
10050 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
10051 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
10052 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
10053 	 * interrupts by writing WMREG_ICS to process receive packets.
10054 	 */
10055 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
10056 #if defined(WM_DEBUG)
10057 		log(LOG_WARNING, "%s: Receive overrun\n",
10058 		    device_xname(sc->sc_dev));
10059 #endif /* defined(WM_DEBUG) */
10060 
10061 		has_rxo = true;
10062 		/*
10063 		 * The RXO interrupt is very high rate when receive traffic is
10064 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
10065 		 * interrupts. ICR_OTHER will be enabled at the end of
10066 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
10067 		 * ICR_RXQ(1) interrupts.
10068 		 */
10069 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
10070 
10071 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
10072 	}
10073 
10074 
10075 
10076 out:
10077 	WM_CORE_UNLOCK(sc);
10078 
10079 	if (sc->sc_type == WM_T_82574) {
10080 		if (!has_rxo)
10081 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
10082 		else
10083 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
10084 	} else if (sc->sc_type == WM_T_82575)
10085 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
10086 	else
10087 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
10088 
10089 	return 1;
10090 }
10091 
10092 /*
10093  * Media related.
10094  * GMII, SGMII, TBI (and SERDES)
10095  */
10096 
10097 /* Common */
10098 
10099 /*
10100  * wm_tbi_serdes_set_linkled:
10101  *
10102  *	Update the link LED on TBI and SERDES devices.
10103  */
10104 static void
10105 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
10106 {
10107 
10108 	if (sc->sc_tbi_linkup)
10109 		sc->sc_ctrl |= CTRL_SWDPIN(0);
10110 	else
10111 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
10112 
10113 	/* 82540 or newer devices are active low */
10114 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
10115 
10116 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10117 }
10118 
10119 /* GMII related */
10120 
10121 /*
10122  * wm_gmii_reset:
10123  *
10124  *	Reset the PHY.
10125  */
10126 static void
10127 wm_gmii_reset(struct wm_softc *sc)
10128 {
10129 	uint32_t reg;
10130 	int rv;
10131 
10132 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
10133 		device_xname(sc->sc_dev), __func__));
10134 
10135 	rv = sc->phy.acquire(sc);
10136 	if (rv != 0) {
10137 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10138 		    __func__);
10139 		return;
10140 	}
10141 
10142 	switch (sc->sc_type) {
10143 	case WM_T_82542_2_0:
10144 	case WM_T_82542_2_1:
10145 		/* null */
10146 		break;
10147 	case WM_T_82543:
10148 		/*
10149 		 * With 82543, we need to force speed and duplex on the MAC
10150 		 * equal to what the PHY speed and duplex configuration is.
10151 		 * In addition, we need to perform a hardware reset on the PHY
10152 		 * to take it out of reset.
10153 		 */
10154 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
10155 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10156 
10157 		/* The PHY reset pin is active-low. */
10158 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
10159 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
10160 		    CTRL_EXT_SWDPIN(4));
10161 		reg |= CTRL_EXT_SWDPIO(4);
10162 
10163 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10164 		CSR_WRITE_FLUSH(sc);
10165 		delay(10*1000);
10166 
10167 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
10168 		CSR_WRITE_FLUSH(sc);
10169 		delay(150);
10170 #if 0
10171 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
10172 #endif
10173 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
10174 		break;
10175 	case WM_T_82544:	/* Reset 10000us */
10176 	case WM_T_82540:
10177 	case WM_T_82545:
10178 	case WM_T_82545_3:
10179 	case WM_T_82546:
10180 	case WM_T_82546_3:
10181 	case WM_T_82541:
10182 	case WM_T_82541_2:
10183 	case WM_T_82547:
10184 	case WM_T_82547_2:
10185 	case WM_T_82571:	/* Reset 100us */
10186 	case WM_T_82572:
10187 	case WM_T_82573:
10188 	case WM_T_82574:
10189 	case WM_T_82575:
10190 	case WM_T_82576:
10191 	case WM_T_82580:
10192 	case WM_T_I350:
10193 	case WM_T_I354:
10194 	case WM_T_I210:
10195 	case WM_T_I211:
10196 	case WM_T_82583:
10197 	case WM_T_80003:
10198 		/* Generic reset */
10199 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10200 		CSR_WRITE_FLUSH(sc);
10201 		delay(20000);
10202 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10203 		CSR_WRITE_FLUSH(sc);
10204 		delay(20000);
10205 
10206 		if ((sc->sc_type == WM_T_82541)
10207 		    || (sc->sc_type == WM_T_82541_2)
10208 		    || (sc->sc_type == WM_T_82547)
10209 		    || (sc->sc_type == WM_T_82547_2)) {
10210 			/* Workaround for igp are done in igp_reset() */
10211 			/* XXX add code to set LED after phy reset */
10212 		}
10213 		break;
10214 	case WM_T_ICH8:
10215 	case WM_T_ICH9:
10216 	case WM_T_ICH10:
10217 	case WM_T_PCH:
10218 	case WM_T_PCH2:
10219 	case WM_T_PCH_LPT:
10220 	case WM_T_PCH_SPT:
10221 	case WM_T_PCH_CNP:
10222 		/* Generic reset */
10223 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
10224 		CSR_WRITE_FLUSH(sc);
10225 		delay(100);
10226 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10227 		CSR_WRITE_FLUSH(sc);
10228 		delay(150);
10229 		break;
10230 	default:
10231 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
10232 		    __func__);
10233 		break;
10234 	}
10235 
10236 	sc->phy.release(sc);
10237 
10238 	/* get_cfg_done */
10239 	wm_get_cfg_done(sc);
10240 
10241 	/* Extra setup */
10242 	switch (sc->sc_type) {
10243 	case WM_T_82542_2_0:
10244 	case WM_T_82542_2_1:
10245 	case WM_T_82543:
10246 	case WM_T_82544:
10247 	case WM_T_82540:
10248 	case WM_T_82545:
10249 	case WM_T_82545_3:
10250 	case WM_T_82546:
10251 	case WM_T_82546_3:
10252 	case WM_T_82541_2:
10253 	case WM_T_82547_2:
10254 	case WM_T_82571:
10255 	case WM_T_82572:
10256 	case WM_T_82573:
10257 	case WM_T_82574:
10258 	case WM_T_82583:
10259 	case WM_T_82575:
10260 	case WM_T_82576:
10261 	case WM_T_82580:
10262 	case WM_T_I350:
10263 	case WM_T_I354:
10264 	case WM_T_I210:
10265 	case WM_T_I211:
10266 	case WM_T_80003:
10267 		/* Null */
10268 		break;
10269 	case WM_T_82541:
10270 	case WM_T_82547:
10271 		/* XXX Configure actively LED after PHY reset */
10272 		break;
10273 	case WM_T_ICH8:
10274 	case WM_T_ICH9:
10275 	case WM_T_ICH10:
10276 	case WM_T_PCH:
10277 	case WM_T_PCH2:
10278 	case WM_T_PCH_LPT:
10279 	case WM_T_PCH_SPT:
10280 	case WM_T_PCH_CNP:
10281 		wm_phy_post_reset(sc);
10282 		break;
10283 	default:
10284 		panic("%s: unknown type\n", __func__);
10285 		break;
10286 	}
10287 }
10288 
10289 /*
10290  * Setup sc_phytype and mii_{read|write}reg.
10291  *
10292  *  To identify PHY type, correct read/write function should be selected.
10293  * To select correct read/write function, PCI ID or MAC type are required
10294  * without accessing PHY registers.
10295  *
10296  *  On the first call of this function, PHY ID is not known yet. Check
10297  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
10298  * result might be incorrect.
10299  *
10300  *  In the second call, PHY OUI and model is used to identify PHY type.
10301  * It might not be perfect because of the lack of compared entry, but it
10302  * would be better than the first call.
10303  *
10304  *  If the detected new result and previous assumption is different,
10305  * diagnous message will be printed.
10306  */
10307 static void
10308 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
10309     uint16_t phy_model)
10310 {
10311 	device_t dev = sc->sc_dev;
10312 	struct mii_data *mii = &sc->sc_mii;
10313 	uint16_t new_phytype = WMPHY_UNKNOWN;
10314 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
10315 	mii_readreg_t new_readreg;
10316 	mii_writereg_t new_writereg;
10317 	bool dodiag = true;
10318 
10319 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
10320 		device_xname(sc->sc_dev), __func__));
10321 
10322 	/*
10323 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
10324 	 * incorrect. So don't print diag output when it's 2nd call.
10325 	 */
10326 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
10327 		dodiag = false;
10328 
10329 	if (mii->mii_readreg == NULL) {
10330 		/*
10331 		 *  This is the first call of this function. For ICH and PCH
10332 		 * variants, it's difficult to determine the PHY access method
10333 		 * by sc_type, so use the PCI product ID for some devices.
10334 		 */
10335 
10336 		switch (sc->sc_pcidevid) {
10337 		case PCI_PRODUCT_INTEL_PCH_M_LM:
10338 		case PCI_PRODUCT_INTEL_PCH_M_LC:
10339 			/* 82577 */
10340 			new_phytype = WMPHY_82577;
10341 			break;
10342 		case PCI_PRODUCT_INTEL_PCH_D_DM:
10343 		case PCI_PRODUCT_INTEL_PCH_D_DC:
10344 			/* 82578 */
10345 			new_phytype = WMPHY_82578;
10346 			break;
10347 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
10348 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
10349 			/* 82579 */
10350 			new_phytype = WMPHY_82579;
10351 			break;
10352 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
10353 		case PCI_PRODUCT_INTEL_82801I_BM:
10354 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
10355 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
10356 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
10357 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
10358 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
10359 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
10360 			/* ICH8, 9, 10 with 82567 */
10361 			new_phytype = WMPHY_BM;
10362 			break;
10363 		default:
10364 			break;
10365 		}
10366 	} else {
10367 		/* It's not the first call. Use PHY OUI and model */
10368 		switch (phy_oui) {
10369 		case MII_OUI_ATTANSIC: /* XXX ??? */
10370 			switch (phy_model) {
10371 			case 0x0004: /* XXX */
10372 				new_phytype = WMPHY_82578;
10373 				break;
10374 			default:
10375 				break;
10376 			}
10377 			break;
10378 		case MII_OUI_xxMARVELL:
10379 			switch (phy_model) {
10380 			case MII_MODEL_xxMARVELL_I210:
10381 				new_phytype = WMPHY_I210;
10382 				break;
10383 			case MII_MODEL_xxMARVELL_E1011:
10384 			case MII_MODEL_xxMARVELL_E1000_3:
10385 			case MII_MODEL_xxMARVELL_E1000_5:
10386 			case MII_MODEL_xxMARVELL_E1112:
10387 				new_phytype = WMPHY_M88;
10388 				break;
10389 			case MII_MODEL_xxMARVELL_E1149:
10390 				new_phytype = WMPHY_BM;
10391 				break;
10392 			case MII_MODEL_xxMARVELL_E1111:
10393 			case MII_MODEL_xxMARVELL_I347:
10394 			case MII_MODEL_xxMARVELL_E1512:
10395 			case MII_MODEL_xxMARVELL_E1340M:
10396 			case MII_MODEL_xxMARVELL_E1543:
10397 				new_phytype = WMPHY_M88;
10398 				break;
10399 			case MII_MODEL_xxMARVELL_I82563:
10400 				new_phytype = WMPHY_GG82563;
10401 				break;
10402 			default:
10403 				break;
10404 			}
10405 			break;
10406 		case MII_OUI_INTEL:
10407 			switch (phy_model) {
10408 			case MII_MODEL_INTEL_I82577:
10409 				new_phytype = WMPHY_82577;
10410 				break;
10411 			case MII_MODEL_INTEL_I82579:
10412 				new_phytype = WMPHY_82579;
10413 				break;
10414 			case MII_MODEL_INTEL_I217:
10415 				new_phytype = WMPHY_I217;
10416 				break;
10417 			case MII_MODEL_INTEL_I82580:
10418 				new_phytype = WMPHY_82580;
10419 				break;
10420 			case MII_MODEL_INTEL_I350:
10421 				new_phytype = WMPHY_I350;
10422 				break;
10423 				break;
10424 			default:
10425 				break;
10426 			}
10427 			break;
10428 		case MII_OUI_yyINTEL:
10429 			switch (phy_model) {
10430 			case MII_MODEL_yyINTEL_I82562G:
10431 			case MII_MODEL_yyINTEL_I82562EM:
10432 			case MII_MODEL_yyINTEL_I82562ET:
10433 				new_phytype = WMPHY_IFE;
10434 				break;
10435 			case MII_MODEL_yyINTEL_IGP01E1000:
10436 				new_phytype = WMPHY_IGP;
10437 				break;
10438 			case MII_MODEL_yyINTEL_I82566:
10439 				new_phytype = WMPHY_IGP_3;
10440 				break;
10441 			default:
10442 				break;
10443 			}
10444 			break;
10445 		default:
10446 			break;
10447 		}
10448 
10449 		if (dodiag) {
10450 			if (new_phytype == WMPHY_UNKNOWN)
10451 				aprint_verbose_dev(dev,
10452 				    "%s: Unknown PHY model. OUI=%06x, "
10453 				    "model=%04x\n", __func__, phy_oui,
10454 				    phy_model);
10455 
10456 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
10457 			    && (sc->sc_phytype != new_phytype)) {
10458 				aprint_error_dev(dev, "Previously assumed PHY "
10459 				    "type(%u) was incorrect. PHY type from PHY"
10460 				    "ID = %u\n", sc->sc_phytype, new_phytype);
10461 			}
10462 		}
10463 	}
10464 
10465 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
10466 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
10467 		/* SGMII */
10468 		new_readreg = wm_sgmii_readreg;
10469 		new_writereg = wm_sgmii_writereg;
10470 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
10471 		/* BM2 (phyaddr == 1) */
10472 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
10473 		    && (new_phytype != WMPHY_BM)
10474 		    && (new_phytype != WMPHY_UNKNOWN))
10475 			doubt_phytype = new_phytype;
10476 		new_phytype = WMPHY_BM;
10477 		new_readreg = wm_gmii_bm_readreg;
10478 		new_writereg = wm_gmii_bm_writereg;
10479 	} else if (sc->sc_type >= WM_T_PCH) {
10480 		/* All PCH* use _hv_ */
10481 		new_readreg = wm_gmii_hv_readreg;
10482 		new_writereg = wm_gmii_hv_writereg;
10483 	} else if (sc->sc_type >= WM_T_ICH8) {
10484 		/* non-82567 ICH8, 9 and 10 */
10485 		new_readreg = wm_gmii_i82544_readreg;
10486 		new_writereg = wm_gmii_i82544_writereg;
10487 	} else if (sc->sc_type >= WM_T_80003) {
10488 		/* 80003 */
10489 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
10490 		    && (new_phytype != WMPHY_GG82563)
10491 		    && (new_phytype != WMPHY_UNKNOWN))
10492 			doubt_phytype = new_phytype;
10493 		new_phytype = WMPHY_GG82563;
10494 		new_readreg = wm_gmii_i80003_readreg;
10495 		new_writereg = wm_gmii_i80003_writereg;
10496 	} else if (sc->sc_type >= WM_T_I210) {
10497 		/* I210 and I211 */
10498 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
10499 		    && (new_phytype != WMPHY_I210)
10500 		    && (new_phytype != WMPHY_UNKNOWN))
10501 			doubt_phytype = new_phytype;
10502 		new_phytype = WMPHY_I210;
10503 		new_readreg = wm_gmii_gs40g_readreg;
10504 		new_writereg = wm_gmii_gs40g_writereg;
10505 	} else if (sc->sc_type >= WM_T_82580) {
10506 		/* 82580, I350 and I354 */
10507 		new_readreg = wm_gmii_82580_readreg;
10508 		new_writereg = wm_gmii_82580_writereg;
10509 	} else if (sc->sc_type >= WM_T_82544) {
10510 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
10511 		new_readreg = wm_gmii_i82544_readreg;
10512 		new_writereg = wm_gmii_i82544_writereg;
10513 	} else {
10514 		new_readreg = wm_gmii_i82543_readreg;
10515 		new_writereg = wm_gmii_i82543_writereg;
10516 	}
10517 
10518 	if (new_phytype == WMPHY_BM) {
10519 		/* All BM use _bm_ */
10520 		new_readreg = wm_gmii_bm_readreg;
10521 		new_writereg = wm_gmii_bm_writereg;
10522 	}
10523 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
10524 		/* All PCH* use _hv_ */
10525 		new_readreg = wm_gmii_hv_readreg;
10526 		new_writereg = wm_gmii_hv_writereg;
10527 	}
10528 
10529 	/* Diag output */
10530 	if (dodiag) {
10531 		if (doubt_phytype != WMPHY_UNKNOWN)
10532 			aprint_error_dev(dev, "Assumed new PHY type was "
10533 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
10534 			    new_phytype);
10535 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
10536 		    && (sc->sc_phytype != new_phytype))
10537 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
10538 			    "was incorrect. New PHY type = %u\n",
10539 			    sc->sc_phytype, new_phytype);
10540 
10541 		if ((mii->mii_readreg != NULL) &&
10542 		    (new_phytype == WMPHY_UNKNOWN))
10543 			aprint_error_dev(dev, "PHY type is still unknown.\n");
10544 
10545 		if ((mii->mii_readreg != NULL) &&
10546 		    (mii->mii_readreg != new_readreg))
10547 			aprint_error_dev(dev, "Previously assumed PHY "
10548 			    "read/write function was incorrect.\n");
10549 	}
10550 
10551 	/* Update now */
10552 	sc->sc_phytype = new_phytype;
10553 	mii->mii_readreg = new_readreg;
10554 	mii->mii_writereg = new_writereg;
10555 	if (new_readreg == wm_gmii_hv_readreg) {
10556 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
10557 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
10558 	} else if (new_readreg == wm_sgmii_readreg) {
10559 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
10560 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
10561 	} else if (new_readreg == wm_gmii_i82544_readreg) {
10562 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
10563 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
10564 	}
10565 }
10566 
10567 /*
10568  * wm_get_phy_id_82575:
10569  *
10570  * Return PHY ID. Return -1 if it failed.
10571  */
10572 static int
10573 wm_get_phy_id_82575(struct wm_softc *sc)
10574 {
10575 	uint32_t reg;
10576 	int phyid = -1;
10577 
10578 	/* XXX */
10579 	if ((sc->sc_flags & WM_F_SGMII) == 0)
10580 		return -1;
10581 
10582 	if (wm_sgmii_uses_mdio(sc)) {
10583 		switch (sc->sc_type) {
10584 		case WM_T_82575:
10585 		case WM_T_82576:
10586 			reg = CSR_READ(sc, WMREG_MDIC);
10587 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
10588 			break;
10589 		case WM_T_82580:
10590 		case WM_T_I350:
10591 		case WM_T_I354:
10592 		case WM_T_I210:
10593 		case WM_T_I211:
10594 			reg = CSR_READ(sc, WMREG_MDICNFG);
10595 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
10596 			break;
10597 		default:
10598 			return -1;
10599 		}
10600 	}
10601 
10602 	return phyid;
10603 }
10604 
10605 /*
10606  * wm_gmii_mediainit:
10607  *
10608  *	Initialize media for use on 1000BASE-T devices.
10609  */
10610 static void
10611 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
10612 {
10613 	device_t dev = sc->sc_dev;
10614 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10615 	struct mii_data *mii = &sc->sc_mii;
10616 
10617 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
10618 		device_xname(sc->sc_dev), __func__));
10619 
10620 	/* We have GMII. */
10621 	sc->sc_flags |= WM_F_HAS_MII;
10622 
10623 	if (sc->sc_type == WM_T_80003)
10624 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
10625 	else
10626 		sc->sc_tipg = TIPG_1000T_DFLT;
10627 
10628 	/*
10629 	 * Let the chip set speed/duplex on its own based on
10630 	 * signals from the PHY.
10631 	 * XXXbouyer - I'm not sure this is right for the 80003,
10632 	 * the em driver only sets CTRL_SLU here - but it seems to work.
10633 	 */
10634 	sc->sc_ctrl |= CTRL_SLU;
10635 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10636 
10637 	/* Initialize our media structures and probe the GMII. */
10638 	mii->mii_ifp = ifp;
10639 
10640 	mii->mii_statchg = wm_gmii_statchg;
10641 
10642 	/* get PHY control from SMBus to PCIe */
10643 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
10644 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
10645 	    || (sc->sc_type == WM_T_PCH_CNP))
10646 		wm_init_phy_workarounds_pchlan(sc);
10647 
10648 	wm_gmii_reset(sc);
10649 
10650 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
10651 	ifmedia_init_with_lock(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
10652 	    wm_gmii_mediastatus, sc->sc_core_lock);
10653 
10654 	/* Setup internal SGMII PHY for SFP */
10655 	wm_sgmii_sfp_preconfig(sc);
10656 
10657 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
10658 	    || (sc->sc_type == WM_T_82580)
10659 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
10660 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
10661 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
10662 			/* Attach only one port */
10663 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
10664 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
10665 		} else {
10666 			int i, id;
10667 			uint32_t ctrl_ext;
10668 
10669 			id = wm_get_phy_id_82575(sc);
10670 			if (id != -1) {
10671 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
10672 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
10673 			}
10674 			if ((id == -1)
10675 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
10676 				/* Power on sgmii phy if it is disabled */
10677 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10678 				CSR_WRITE(sc, WMREG_CTRL_EXT,
10679 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
10680 				CSR_WRITE_FLUSH(sc);
10681 				delay(300*1000); /* XXX too long */
10682 
10683 				/*
10684 				 * From 1 to 8.
10685 				 *
10686 				 * I2C access fails with I2C register's ERROR
10687 				 * bit set, so prevent error message while
10688 				 * scanning.
10689 				 */
10690 				sc->phy.no_errprint = true;
10691 				for (i = 1; i < 8; i++)
10692 					mii_attach(sc->sc_dev, &sc->sc_mii,
10693 					    0xffffffff, i, MII_OFFSET_ANY,
10694 					    MIIF_DOPAUSE);
10695 				sc->phy.no_errprint = false;
10696 
10697 				/* Restore previous sfp cage power state */
10698 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
10699 			}
10700 		}
10701 	} else
10702 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10703 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
10704 
10705 	/*
10706 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
10707 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
10708 	 */
10709 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
10710 		|| (sc->sc_type == WM_T_PCH_SPT)
10711 		|| (sc->sc_type == WM_T_PCH_CNP))
10712 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
10713 		wm_set_mdio_slow_mode_hv(sc);
10714 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10715 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
10716 	}
10717 
10718 	/*
10719 	 * (For ICH8 variants)
10720 	 * If PHY detection failed, use BM's r/w function and retry.
10721 	 */
10722 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
10723 		/* if failed, retry with *_bm_* */
10724 		aprint_verbose_dev(dev, "Assumed PHY access function "
10725 		    "(type = %d) might be incorrect. Use BM and retry.\n",
10726 		    sc->sc_phytype);
10727 		sc->sc_phytype = WMPHY_BM;
10728 		mii->mii_readreg = wm_gmii_bm_readreg;
10729 		mii->mii_writereg = wm_gmii_bm_writereg;
10730 
10731 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
10732 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
10733 	}
10734 
10735 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
10736 		/* Any PHY wasn't find */
10737 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
10738 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
10739 		sc->sc_phytype = WMPHY_NONE;
10740 	} else {
10741 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
10742 
10743 		/*
10744 		 * PHY Found! Check PHY type again by the second call of
10745 		 * wm_gmii_setup_phytype.
10746 		 */
10747 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
10748 		    child->mii_mpd_model);
10749 
10750 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
10751 	}
10752 }
10753 
10754 /*
10755  * wm_gmii_mediachange:	[ifmedia interface function]
10756  *
10757  *	Set hardware to newly-selected media on a 1000BASE-T device.
10758  */
10759 static int
10760 wm_gmii_mediachange(struct ifnet *ifp)
10761 {
10762 	struct wm_softc *sc = ifp->if_softc;
10763 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
10764 	uint32_t reg;
10765 	int rc;
10766 
10767 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
10768 		device_xname(sc->sc_dev), __func__));
10769 	if ((ifp->if_flags & IFF_UP) == 0)
10770 		return 0;
10771 
10772 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
10773 	if ((sc->sc_type == WM_T_82580)
10774 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
10775 	    || (sc->sc_type == WM_T_I211)) {
10776 		reg = CSR_READ(sc, WMREG_PHPM);
10777 		reg &= ~PHPM_GO_LINK_D;
10778 		CSR_WRITE(sc, WMREG_PHPM, reg);
10779 	}
10780 
10781 	/* Disable D0 LPLU. */
10782 	wm_lplu_d0_disable(sc);
10783 
10784 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
10785 	sc->sc_ctrl |= CTRL_SLU;
10786 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
10787 	    || (sc->sc_type > WM_T_82543)) {
10788 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
10789 	} else {
10790 		sc->sc_ctrl &= ~CTRL_ASDE;
10791 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
10792 		if (ife->ifm_media & IFM_FDX)
10793 			sc->sc_ctrl |= CTRL_FD;
10794 		switch (IFM_SUBTYPE(ife->ifm_media)) {
10795 		case IFM_10_T:
10796 			sc->sc_ctrl |= CTRL_SPEED_10;
10797 			break;
10798 		case IFM_100_TX:
10799 			sc->sc_ctrl |= CTRL_SPEED_100;
10800 			break;
10801 		case IFM_1000_T:
10802 			sc->sc_ctrl |= CTRL_SPEED_1000;
10803 			break;
10804 		case IFM_NONE:
10805 			/* There is no specific setting for IFM_NONE */
10806 			break;
10807 		default:
10808 			panic("wm_gmii_mediachange: bad media 0x%x",
10809 			    ife->ifm_media);
10810 		}
10811 	}
10812 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10813 	CSR_WRITE_FLUSH(sc);
10814 
10815 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
10816 		wm_serdes_mediachange(ifp);
10817 
10818 	if (sc->sc_type <= WM_T_82543)
10819 		wm_gmii_reset(sc);
10820 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
10821 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
10822 		/* allow time for SFP cage time to power up phy */
10823 		delay(300 * 1000);
10824 		wm_gmii_reset(sc);
10825 	}
10826 
10827 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
10828 		return 0;
10829 	return rc;
10830 }
10831 
10832 /*
10833  * wm_gmii_mediastatus:	[ifmedia interface function]
10834  *
10835  *	Get the current interface media status on a 1000BASE-T device.
10836  */
10837 static void
10838 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
10839 {
10840 	struct wm_softc *sc = ifp->if_softc;
10841 
10842 	ether_mediastatus(ifp, ifmr);
10843 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
10844 	    | sc->sc_flowflags;
10845 }
10846 
10847 #define	MDI_IO		CTRL_SWDPIN(2)
10848 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
10849 #define	MDI_CLK		CTRL_SWDPIN(3)
10850 
10851 static void
10852 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
10853 {
10854 	uint32_t i, v;
10855 
10856 	v = CSR_READ(sc, WMREG_CTRL);
10857 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
10858 	v |= MDI_DIR | CTRL_SWDPIO(3);
10859 
10860 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
10861 		if (data & i)
10862 			v |= MDI_IO;
10863 		else
10864 			v &= ~MDI_IO;
10865 		CSR_WRITE(sc, WMREG_CTRL, v);
10866 		CSR_WRITE_FLUSH(sc);
10867 		delay(10);
10868 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
10869 		CSR_WRITE_FLUSH(sc);
10870 		delay(10);
10871 		CSR_WRITE(sc, WMREG_CTRL, v);
10872 		CSR_WRITE_FLUSH(sc);
10873 		delay(10);
10874 	}
10875 }
10876 
10877 static uint16_t
10878 wm_i82543_mii_recvbits(struct wm_softc *sc)
10879 {
10880 	uint32_t v, i;
10881 	uint16_t data = 0;
10882 
10883 	v = CSR_READ(sc, WMREG_CTRL);
10884 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
10885 	v |= CTRL_SWDPIO(3);
10886 
10887 	CSR_WRITE(sc, WMREG_CTRL, v);
10888 	CSR_WRITE_FLUSH(sc);
10889 	delay(10);
10890 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
10891 	CSR_WRITE_FLUSH(sc);
10892 	delay(10);
10893 	CSR_WRITE(sc, WMREG_CTRL, v);
10894 	CSR_WRITE_FLUSH(sc);
10895 	delay(10);
10896 
10897 	for (i = 0; i < 16; i++) {
10898 		data <<= 1;
10899 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
10900 		CSR_WRITE_FLUSH(sc);
10901 		delay(10);
10902 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
10903 			data |= 1;
10904 		CSR_WRITE(sc, WMREG_CTRL, v);
10905 		CSR_WRITE_FLUSH(sc);
10906 		delay(10);
10907 	}
10908 
10909 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
10910 	CSR_WRITE_FLUSH(sc);
10911 	delay(10);
10912 	CSR_WRITE(sc, WMREG_CTRL, v);
10913 	CSR_WRITE_FLUSH(sc);
10914 	delay(10);
10915 
10916 	return data;
10917 }
10918 
10919 #undef MDI_IO
10920 #undef MDI_DIR
10921 #undef MDI_CLK
10922 
10923 /*
10924  * wm_gmii_i82543_readreg:	[mii interface function]
10925  *
10926  *	Read a PHY register on the GMII (i82543 version).
10927  */
10928 static int
10929 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
10930 {
10931 	struct wm_softc *sc = device_private(dev);
10932 
10933 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
10934 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
10935 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
10936 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
10937 
10938 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
10939 		device_xname(dev), phy, reg, *val));
10940 
10941 	return 0;
10942 }
10943 
10944 /*
10945  * wm_gmii_i82543_writereg:	[mii interface function]
10946  *
10947  *	Write a PHY register on the GMII (i82543 version).
10948  */
10949 static int
10950 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
10951 {
10952 	struct wm_softc *sc = device_private(dev);
10953 
10954 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
10955 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
10956 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
10957 	    (MII_COMMAND_START << 30), 32);
10958 
10959 	return 0;
10960 }
10961 
10962 /*
10963  * wm_gmii_mdic_readreg:	[mii interface function]
10964  *
10965  *	Read a PHY register on the GMII.
10966  */
10967 static int
10968 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
10969 {
10970 	struct wm_softc *sc = device_private(dev);
10971 	uint32_t mdic = 0;
10972 	int i;
10973 
10974 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
10975 	    && (reg > MII_ADDRMASK)) {
10976 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
10977 		    __func__, sc->sc_phytype, reg);
10978 		reg &= MII_ADDRMASK;
10979 	}
10980 
10981 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
10982 	    MDIC_REGADD(reg));
10983 
10984 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
10985 		delay(50);
10986 		mdic = CSR_READ(sc, WMREG_MDIC);
10987 		if (mdic & MDIC_READY)
10988 			break;
10989 	}
10990 
10991 	if ((mdic & MDIC_READY) == 0) {
10992 		DPRINTF(sc, WM_DEBUG_GMII,
10993 		    ("%s: MDIC read timed out: phy %d reg %d\n",
10994 			device_xname(dev), phy, reg));
10995 		return ETIMEDOUT;
10996 	} else if (mdic & MDIC_E) {
10997 		/* This is normal if no PHY is present. */
10998 		DPRINTF(sc, WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
10999 			device_xname(sc->sc_dev), phy, reg));
11000 		return -1;
11001 	} else
11002 		*val = MDIC_DATA(mdic);
11003 
11004 	/*
11005 	 * Allow some time after each MDIC transaction to avoid
11006 	 * reading duplicate data in the next MDIC transaction.
11007 	 */
11008 	if (sc->sc_type == WM_T_PCH2)
11009 		delay(100);
11010 
11011 	return 0;
11012 }
11013 
11014 /*
11015  * wm_gmii_mdic_writereg:	[mii interface function]
11016  *
11017  *	Write a PHY register on the GMII.
11018  */
11019 static int
11020 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
11021 {
11022 	struct wm_softc *sc = device_private(dev);
11023 	uint32_t mdic = 0;
11024 	int i;
11025 
11026 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
11027 	    && (reg > MII_ADDRMASK)) {
11028 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11029 		    __func__, sc->sc_phytype, reg);
11030 		reg &= MII_ADDRMASK;
11031 	}
11032 
11033 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
11034 	    MDIC_REGADD(reg) | MDIC_DATA(val));
11035 
11036 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
11037 		delay(50);
11038 		mdic = CSR_READ(sc, WMREG_MDIC);
11039 		if (mdic & MDIC_READY)
11040 			break;
11041 	}
11042 
11043 	if ((mdic & MDIC_READY) == 0) {
11044 		DPRINTF(sc, WM_DEBUG_GMII,
11045 		    ("%s: MDIC write timed out: phy %d reg %d\n",
11046 			device_xname(dev), phy, reg));
11047 		return ETIMEDOUT;
11048 	} else if (mdic & MDIC_E) {
11049 		DPRINTF(sc, WM_DEBUG_GMII,
11050 		    ("%s: MDIC write error: phy %d reg %d\n",
11051 			device_xname(dev), phy, reg));
11052 		return -1;
11053 	}
11054 
11055 	/*
11056 	 * Allow some time after each MDIC transaction to avoid
11057 	 * reading duplicate data in the next MDIC transaction.
11058 	 */
11059 	if (sc->sc_type == WM_T_PCH2)
11060 		delay(100);
11061 
11062 	return 0;
11063 }
11064 
11065 /*
11066  * wm_gmii_i82544_readreg:	[mii interface function]
11067  *
11068  *	Read a PHY register on the GMII.
11069  */
11070 static int
11071 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
11072 {
11073 	struct wm_softc *sc = device_private(dev);
11074 	int rv;
11075 
11076 	if (sc->phy.acquire(sc)) {
11077 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11078 		return -1;
11079 	}
11080 
11081 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
11082 
11083 	sc->phy.release(sc);
11084 
11085 	return rv;
11086 }
11087 
11088 static int
11089 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
11090 {
11091 	struct wm_softc *sc = device_private(dev);
11092 	int rv;
11093 
11094 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11095 		switch (sc->sc_phytype) {
11096 		case WMPHY_IGP:
11097 		case WMPHY_IGP_2:
11098 		case WMPHY_IGP_3:
11099 			rv = wm_gmii_mdic_writereg(dev, phy,
11100 			    IGPHY_PAGE_SELECT, reg);
11101 			if (rv != 0)
11102 				return rv;
11103 			break;
11104 		default:
11105 #ifdef WM_DEBUG
11106 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
11107 			    __func__, sc->sc_phytype, reg);
11108 #endif
11109 			break;
11110 		}
11111 	}
11112 
11113 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11114 }
11115 
11116 /*
11117  * wm_gmii_i82544_writereg:	[mii interface function]
11118  *
11119  *	Write a PHY register on the GMII.
11120  */
11121 static int
11122 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
11123 {
11124 	struct wm_softc *sc = device_private(dev);
11125 	int rv;
11126 
11127 	if (sc->phy.acquire(sc)) {
11128 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11129 		return -1;
11130 	}
11131 
11132 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
11133 	sc->phy.release(sc);
11134 
11135 	return rv;
11136 }
11137 
11138 static int
11139 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
11140 {
11141 	struct wm_softc *sc = device_private(dev);
11142 	int rv;
11143 
11144 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11145 		switch (sc->sc_phytype) {
11146 		case WMPHY_IGP:
11147 		case WMPHY_IGP_2:
11148 		case WMPHY_IGP_3:
11149 			rv = wm_gmii_mdic_writereg(dev, phy,
11150 			    IGPHY_PAGE_SELECT, reg);
11151 			if (rv != 0)
11152 				return rv;
11153 			break;
11154 		default:
11155 #ifdef WM_DEBUG
11156 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
11157 			    __func__, sc->sc_phytype, reg);
11158 #endif
11159 			break;
11160 		}
11161 	}
11162 
11163 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11164 }
11165 
11166 /*
11167  * wm_gmii_i80003_readreg:	[mii interface function]
11168  *
11169  *	Read a PHY register on the kumeran
11170  * This could be handled by the PHY layer if we didn't have to lock the
11171  * resource ...
11172  */
11173 static int
11174 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
11175 {
11176 	struct wm_softc *sc = device_private(dev);
11177 	int page_select;
11178 	uint16_t temp, temp2;
11179 	int rv = 0;
11180 
11181 	if (phy != 1) /* Only one PHY on kumeran bus */
11182 		return -1;
11183 
11184 	if (sc->phy.acquire(sc)) {
11185 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11186 		return -1;
11187 	}
11188 
11189 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
11190 		page_select = GG82563_PHY_PAGE_SELECT;
11191 	else {
11192 		/*
11193 		 * Use Alternative Page Select register to access registers
11194 		 * 30 and 31.
11195 		 */
11196 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
11197 	}
11198 	temp = reg >> GG82563_PAGE_SHIFT;
11199 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
11200 		goto out;
11201 
11202 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
11203 		/*
11204 		 * Wait more 200us for a bug of the ready bit in the MDIC
11205 		 * register.
11206 		 */
11207 		delay(200);
11208 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
11209 		if ((rv != 0) || (temp2 != temp)) {
11210 			device_printf(dev, "%s failed\n", __func__);
11211 			rv = -1;
11212 			goto out;
11213 		}
11214 		delay(200);
11215 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11216 		delay(200);
11217 	} else
11218 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11219 
11220 out:
11221 	sc->phy.release(sc);
11222 	return rv;
11223 }
11224 
11225 /*
11226  * wm_gmii_i80003_writereg:	[mii interface function]
11227  *
11228  *	Write a PHY register on the kumeran.
11229  * This could be handled by the PHY layer if we didn't have to lock the
11230  * resource ...
11231  */
11232 static int
11233 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
11234 {
11235 	struct wm_softc *sc = device_private(dev);
11236 	int page_select, rv;
11237 	uint16_t temp, temp2;
11238 
11239 	if (phy != 1) /* Only one PHY on kumeran bus */
11240 		return -1;
11241 
11242 	if (sc->phy.acquire(sc)) {
11243 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11244 		return -1;
11245 	}
11246 
11247 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
11248 		page_select = GG82563_PHY_PAGE_SELECT;
11249 	else {
11250 		/*
11251 		 * Use Alternative Page Select register to access registers
11252 		 * 30 and 31.
11253 		 */
11254 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
11255 	}
11256 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
11257 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
11258 		goto out;
11259 
11260 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
11261 		/*
11262 		 * Wait more 200us for a bug of the ready bit in the MDIC
11263 		 * register.
11264 		 */
11265 		delay(200);
11266 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
11267 		if ((rv != 0) || (temp2 != temp)) {
11268 			device_printf(dev, "%s failed\n", __func__);
11269 			rv = -1;
11270 			goto out;
11271 		}
11272 		delay(200);
11273 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11274 		delay(200);
11275 	} else
11276 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11277 
11278 out:
11279 	sc->phy.release(sc);
11280 	return rv;
11281 }
11282 
11283 /*
11284  * wm_gmii_bm_readreg:	[mii interface function]
11285  *
11286  *	Read a PHY register on the kumeran
11287  * This could be handled by the PHY layer if we didn't have to lock the
11288  * resource ...
11289  */
11290 static int
11291 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
11292 {
11293 	struct wm_softc *sc = device_private(dev);
11294 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
11295 	int rv;
11296 
11297 	if (sc->phy.acquire(sc)) {
11298 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11299 		return -1;
11300 	}
11301 
11302 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11303 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
11304 		    || (reg == 31)) ? 1 : phy;
11305 	/* Page 800 works differently than the rest so it has its own func */
11306 	if (page == BM_WUC_PAGE) {
11307 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
11308 		goto release;
11309 	}
11310 
11311 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11312 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
11313 		    && (sc->sc_type != WM_T_82583))
11314 			rv = wm_gmii_mdic_writereg(dev, phy,
11315 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11316 		else
11317 			rv = wm_gmii_mdic_writereg(dev, phy,
11318 			    BME1000_PHY_PAGE_SELECT, page);
11319 		if (rv != 0)
11320 			goto release;
11321 	}
11322 
11323 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
11324 
11325 release:
11326 	sc->phy.release(sc);
11327 	return rv;
11328 }
11329 
11330 /*
11331  * wm_gmii_bm_writereg:	[mii interface function]
11332  *
11333  *	Write a PHY register on the kumeran.
11334  * This could be handled by the PHY layer if we didn't have to lock the
11335  * resource ...
11336  */
11337 static int
11338 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
11339 {
11340 	struct wm_softc *sc = device_private(dev);
11341 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
11342 	int rv;
11343 
11344 	if (sc->phy.acquire(sc)) {
11345 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11346 		return -1;
11347 	}
11348 
11349 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
11350 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
11351 		    || (reg == 31)) ? 1 : phy;
11352 	/* Page 800 works differently than the rest so it has its own func */
11353 	if (page == BM_WUC_PAGE) {
11354 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
11355 		goto release;
11356 	}
11357 
11358 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
11359 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
11360 		    && (sc->sc_type != WM_T_82583))
11361 			rv = wm_gmii_mdic_writereg(dev, phy,
11362 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11363 		else
11364 			rv = wm_gmii_mdic_writereg(dev, phy,
11365 			    BME1000_PHY_PAGE_SELECT, page);
11366 		if (rv != 0)
11367 			goto release;
11368 	}
11369 
11370 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
11371 
11372 release:
11373 	sc->phy.release(sc);
11374 	return rv;
11375 }
11376 
11377 /*
11378  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
11379  *  @dev: pointer to the HW structure
11380  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
11381  *
11382  *  Assumes semaphore already acquired and phy_reg points to a valid memory
11383  *  address to store contents of the BM_WUC_ENABLE_REG register.
11384  */
11385 static int
11386 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
11387 {
11388 #ifdef WM_DEBUG
11389 	struct wm_softc *sc = device_private(dev);
11390 #endif
11391 	uint16_t temp;
11392 	int rv;
11393 
11394 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
11395 		device_xname(dev), __func__));
11396 
11397 	if (!phy_regp)
11398 		return -1;
11399 
11400 	/* All page select, port ctrl and wakeup registers use phy address 1 */
11401 
11402 	/* Select Port Control Registers page */
11403 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
11404 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
11405 	if (rv != 0)
11406 		return rv;
11407 
11408 	/* Read WUCE and save it */
11409 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
11410 	if (rv != 0)
11411 		return rv;
11412 
11413 	/* Enable both PHY wakeup mode and Wakeup register page writes.
11414 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
11415 	 */
11416 	temp = *phy_regp;
11417 	temp |= BM_WUC_ENABLE_BIT;
11418 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
11419 
11420 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
11421 		return rv;
11422 
11423 	/* Select Host Wakeup Registers page - caller now able to write
11424 	 * registers on the Wakeup registers page
11425 	 */
11426 	return wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
11427 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
11428 }
11429 
11430 /*
11431  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
11432  *  @dev: pointer to the HW structure
11433  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
11434  *
11435  *  Restore BM_WUC_ENABLE_REG to its original value.
11436  *
11437  *  Assumes semaphore already acquired and *phy_reg is the contents of the
11438  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
11439  *  caller.
11440  */
11441 static int
11442 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
11443 {
11444 #ifdef WM_DEBUG
11445 	struct wm_softc *sc = device_private(dev);
11446 #endif
11447 
11448 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
11449 		device_xname(dev), __func__));
11450 
11451 	if (!phy_regp)
11452 		return -1;
11453 
11454 	/* Select Port Control Registers page */
11455 	wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
11456 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
11457 
11458 	/* Restore 769.17 to its original value */
11459 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
11460 
11461 	return 0;
11462 }
11463 
11464 /*
11465  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
11466  *  @sc: pointer to the HW structure
11467  *  @offset: register offset to be read or written
11468  *  @val: pointer to the data to read or write
11469  *  @rd: determines if operation is read or write
11470  *  @page_set: BM_WUC_PAGE already set and access enabled
11471  *
11472  *  Read the PHY register at offset and store the retrieved information in
11473  *  data, or write data to PHY register at offset.  Note the procedure to
11474  *  access the PHY wakeup registers is different than reading the other PHY
11475  *  registers. It works as such:
11476  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
11477  *  2) Set page to 800 for host (801 if we were manageability)
11478  *  3) Write the address using the address opcode (0x11)
11479  *  4) Read or write the data using the data opcode (0x12)
11480  *  5) Restore 769.17.2 to its original value
11481  *
11482  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
11483  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
11484  *
11485  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
11486  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
11487  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
11488  */
11489 static int
11490 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
11491 	bool page_set)
11492 {
11493 	struct wm_softc *sc = device_private(dev);
11494 	uint16_t regnum = BM_PHY_REG_NUM(offset);
11495 	uint16_t page = BM_PHY_REG_PAGE(offset);
11496 	uint16_t wuce;
11497 	int rv = 0;
11498 
11499 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11500 		device_xname(dev), __func__));
11501 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
11502 	if ((sc->sc_type == WM_T_PCH)
11503 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
11504 		device_printf(dev,
11505 		    "Attempting to access page %d while gig enabled.\n", page);
11506 	}
11507 
11508 	if (!page_set) {
11509 		/* Enable access to PHY wakeup registers */
11510 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
11511 		if (rv != 0) {
11512 			device_printf(dev,
11513 			    "%s: Could not enable PHY wakeup reg access\n",
11514 			    __func__);
11515 			return rv;
11516 		}
11517 	}
11518 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
11519 		device_xname(sc->sc_dev), __func__, page, regnum));
11520 
11521 	/*
11522 	 * 2) Access PHY wakeup register.
11523 	 * See wm_access_phy_wakeup_reg_bm.
11524 	 */
11525 
11526 	/* Write the Wakeup register page offset value using opcode 0x11 */
11527 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
11528 	if (rv != 0)
11529 		return rv;
11530 
11531 	if (rd) {
11532 		/* Read the Wakeup register page value using opcode 0x12 */
11533 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
11534 	} else {
11535 		/* Write the Wakeup register page value using opcode 0x12 */
11536 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
11537 	}
11538 	if (rv != 0)
11539 		return rv;
11540 
11541 	if (!page_set)
11542 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
11543 
11544 	return rv;
11545 }
11546 
11547 /*
11548  * wm_gmii_hv_readreg:	[mii interface function]
11549  *
11550  *	Read a PHY register on the kumeran
11551  * This could be handled by the PHY layer if we didn't have to lock the
11552  * resource ...
11553  */
11554 static int
11555 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
11556 {
11557 	struct wm_softc *sc = device_private(dev);
11558 	int rv;
11559 
11560 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11561 		device_xname(dev), __func__));
11562 	if (sc->phy.acquire(sc)) {
11563 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11564 		return -1;
11565 	}
11566 
11567 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
11568 	sc->phy.release(sc);
11569 	return rv;
11570 }
11571 
11572 static int
11573 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
11574 {
11575 	uint16_t page = BM_PHY_REG_PAGE(reg);
11576 	uint16_t regnum = BM_PHY_REG_NUM(reg);
11577 	int rv;
11578 
11579 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
11580 
11581 	/* Page 800 works differently than the rest so it has its own func */
11582 	if (page == BM_WUC_PAGE)
11583 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
11584 
11585 	/*
11586 	 * Lower than page 768 works differently than the rest so it has its
11587 	 * own func
11588 	 */
11589 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
11590 		device_printf(dev, "gmii_hv_readreg!!!\n");
11591 		return -1;
11592 	}
11593 
11594 	/*
11595 	 * XXX I21[789] documents say that the SMBus Address register is at
11596 	 * PHY address 01, Page 0 (not 768), Register 26.
11597 	 */
11598 	if (page == HV_INTC_FC_PAGE_START)
11599 		page = 0;
11600 
11601 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
11602 		rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT,
11603 		    page << BME1000_PAGE_SHIFT);
11604 		if (rv != 0)
11605 			return rv;
11606 	}
11607 
11608 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
11609 }
11610 
11611 /*
11612  * wm_gmii_hv_writereg:	[mii interface function]
11613  *
11614  *	Write a PHY register on the kumeran.
11615  * This could be handled by the PHY layer if we didn't have to lock the
11616  * resource ...
11617  */
11618 static int
11619 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
11620 {
11621 	struct wm_softc *sc = device_private(dev);
11622 	int rv;
11623 
11624 	DPRINTF(sc, WM_DEBUG_GMII, ("%s: %s called\n",
11625 		device_xname(dev), __func__));
11626 
11627 	if (sc->phy.acquire(sc)) {
11628 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11629 		return -1;
11630 	}
11631 
11632 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
11633 	sc->phy.release(sc);
11634 
11635 	return rv;
11636 }
11637 
11638 static int
11639 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
11640 {
11641 	struct wm_softc *sc = device_private(dev);
11642 	uint16_t page = BM_PHY_REG_PAGE(reg);
11643 	uint16_t regnum = BM_PHY_REG_NUM(reg);
11644 	int rv;
11645 
11646 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
11647 
11648 	/* Page 800 works differently than the rest so it has its own func */
11649 	if (page == BM_WUC_PAGE)
11650 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
11651 		    false);
11652 
11653 	/*
11654 	 * Lower than page 768 works differently than the rest so it has its
11655 	 * own func
11656 	 */
11657 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
11658 		device_printf(dev, "gmii_hv_writereg!!!\n");
11659 		return -1;
11660 	}
11661 
11662 	{
11663 		/*
11664 		 * XXX I21[789] documents say that the SMBus Address register
11665 		 * is at PHY address 01, Page 0 (not 768), Register 26.
11666 		 */
11667 		if (page == HV_INTC_FC_PAGE_START)
11668 			page = 0;
11669 
11670 		/*
11671 		 * XXX Workaround MDIO accesses being disabled after entering
11672 		 * IEEE Power Down (whenever bit 11 of the PHY control
11673 		 * register is set)
11674 		 */
11675 		if (sc->sc_phytype == WMPHY_82578) {
11676 			struct mii_softc *child;
11677 
11678 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
11679 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
11680 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
11681 			    && ((val & (1 << 11)) != 0)) {
11682 				device_printf(dev, "XXX need workaround\n");
11683 			}
11684 		}
11685 
11686 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
11687 			rv = wm_gmii_mdic_writereg(dev, 1,
11688 			    IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
11689 			if (rv != 0)
11690 				return rv;
11691 		}
11692 	}
11693 
11694 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
11695 }
11696 
11697 /*
11698  * wm_gmii_82580_readreg:	[mii interface function]
11699  *
11700  *	Read a PHY register on the 82580 and I350.
11701  * This could be handled by the PHY layer if we didn't have to lock the
11702  * resource ...
11703  */
11704 static int
11705 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
11706 {
11707 	struct wm_softc *sc = device_private(dev);
11708 	int rv;
11709 
11710 	if (sc->phy.acquire(sc) != 0) {
11711 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11712 		return -1;
11713 	}
11714 
11715 #ifdef DIAGNOSTIC
11716 	if (reg > MII_ADDRMASK) {
11717 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11718 		    __func__, sc->sc_phytype, reg);
11719 		reg &= MII_ADDRMASK;
11720 	}
11721 #endif
11722 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
11723 
11724 	sc->phy.release(sc);
11725 	return rv;
11726 }
11727 
11728 /*
11729  * wm_gmii_82580_writereg:	[mii interface function]
11730  *
11731  *	Write a PHY register on the 82580 and I350.
11732  * This could be handled by the PHY layer if we didn't have to lock the
11733  * resource ...
11734  */
11735 static int
11736 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
11737 {
11738 	struct wm_softc *sc = device_private(dev);
11739 	int rv;
11740 
11741 	if (sc->phy.acquire(sc) != 0) {
11742 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11743 		return -1;
11744 	}
11745 
11746 #ifdef DIAGNOSTIC
11747 	if (reg > MII_ADDRMASK) {
11748 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
11749 		    __func__, sc->sc_phytype, reg);
11750 		reg &= MII_ADDRMASK;
11751 	}
11752 #endif
11753 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
11754 
11755 	sc->phy.release(sc);
11756 	return rv;
11757 }
11758 
11759 /*
11760  * wm_gmii_gs40g_readreg:	[mii interface function]
11761  *
11762  *	Read a PHY register on the I2100 and I211.
11763  * This could be handled by the PHY layer if we didn't have to lock the
11764  * resource ...
11765  */
11766 static int
11767 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
11768 {
11769 	struct wm_softc *sc = device_private(dev);
11770 	int page, offset;
11771 	int rv;
11772 
11773 	/* Acquire semaphore */
11774 	if (sc->phy.acquire(sc)) {
11775 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11776 		return -1;
11777 	}
11778 
11779 	/* Page select */
11780 	page = reg >> GS40G_PAGE_SHIFT;
11781 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
11782 	if (rv != 0)
11783 		goto release;
11784 
11785 	/* Read reg */
11786 	offset = reg & GS40G_OFFSET_MASK;
11787 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
11788 
11789 release:
11790 	sc->phy.release(sc);
11791 	return rv;
11792 }
11793 
11794 /*
11795  * wm_gmii_gs40g_writereg:	[mii interface function]
11796  *
11797  *	Write a PHY register on the I210 and I211.
11798  * This could be handled by the PHY layer if we didn't have to lock the
11799  * resource ...
11800  */
11801 static int
11802 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
11803 {
11804 	struct wm_softc *sc = device_private(dev);
11805 	uint16_t page;
11806 	int offset, rv;
11807 
11808 	/* Acquire semaphore */
11809 	if (sc->phy.acquire(sc)) {
11810 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
11811 		return -1;
11812 	}
11813 
11814 	/* Page select */
11815 	page = reg >> GS40G_PAGE_SHIFT;
11816 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
11817 	if (rv != 0)
11818 		goto release;
11819 
11820 	/* Write reg */
11821 	offset = reg & GS40G_OFFSET_MASK;
11822 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
11823 
11824 release:
11825 	/* Release semaphore */
11826 	sc->phy.release(sc);
11827 	return rv;
11828 }
11829 
11830 /*
11831  * wm_gmii_statchg:	[mii interface function]
11832  *
11833  *	Callback from MII layer when media changes.
11834  */
11835 static void
11836 wm_gmii_statchg(struct ifnet *ifp)
11837 {
11838 	struct wm_softc *sc = ifp->if_softc;
11839 	struct mii_data *mii = &sc->sc_mii;
11840 
11841 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
11842 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
11843 	sc->sc_fcrtl &= ~FCRTL_XONE;
11844 
11845 	/* Get flow control negotiation result. */
11846 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
11847 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
11848 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
11849 		mii->mii_media_active &= ~IFM_ETH_FMASK;
11850 	}
11851 
11852 	if (sc->sc_flowflags & IFM_FLOW) {
11853 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
11854 			sc->sc_ctrl |= CTRL_TFCE;
11855 			sc->sc_fcrtl |= FCRTL_XONE;
11856 		}
11857 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
11858 			sc->sc_ctrl |= CTRL_RFCE;
11859 	}
11860 
11861 	if (mii->mii_media_active & IFM_FDX) {
11862 		DPRINTF(sc, WM_DEBUG_LINK,
11863 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
11864 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
11865 	} else {
11866 		DPRINTF(sc, WM_DEBUG_LINK,
11867 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
11868 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
11869 	}
11870 
11871 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11872 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
11873 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
11874 						 : WMREG_FCRTL, sc->sc_fcrtl);
11875 	if (sc->sc_type == WM_T_80003) {
11876 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
11877 		case IFM_1000_T:
11878 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
11879 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
11880 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
11881 			break;
11882 		default:
11883 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
11884 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
11885 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
11886 			break;
11887 		}
11888 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
11889 	}
11890 }
11891 
11892 /* kumeran related (80003, ICH* and PCH*) */
11893 
11894 /*
11895  * wm_kmrn_readreg:
11896  *
11897  *	Read a kumeran register
11898  */
11899 static int
11900 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
11901 {
11902 	int rv;
11903 
11904 	if (sc->sc_type == WM_T_80003)
11905 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
11906 	else
11907 		rv = sc->phy.acquire(sc);
11908 	if (rv != 0) {
11909 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
11910 		    __func__);
11911 		return rv;
11912 	}
11913 
11914 	rv = wm_kmrn_readreg_locked(sc, reg, val);
11915 
11916 	if (sc->sc_type == WM_T_80003)
11917 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
11918 	else
11919 		sc->phy.release(sc);
11920 
11921 	return rv;
11922 }
11923 
11924 static int
11925 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
11926 {
11927 
11928 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
11929 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
11930 	    KUMCTRLSTA_REN);
11931 	CSR_WRITE_FLUSH(sc);
11932 	delay(2);
11933 
11934 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
11935 
11936 	return 0;
11937 }
11938 
11939 /*
11940  * wm_kmrn_writereg:
11941  *
11942  *	Write a kumeran register
11943  */
11944 static int
11945 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
11946 {
11947 	int rv;
11948 
11949 	if (sc->sc_type == WM_T_80003)
11950 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
11951 	else
11952 		rv = sc->phy.acquire(sc);
11953 	if (rv != 0) {
11954 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
11955 		    __func__);
11956 		return rv;
11957 	}
11958 
11959 	rv = wm_kmrn_writereg_locked(sc, reg, val);
11960 
11961 	if (sc->sc_type == WM_T_80003)
11962 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
11963 	else
11964 		sc->phy.release(sc);
11965 
11966 	return rv;
11967 }
11968 
11969 static int
11970 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
11971 {
11972 
11973 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
11974 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
11975 
11976 	return 0;
11977 }
11978 
11979 /*
11980  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
11981  * This access method is different from IEEE MMD.
11982  */
11983 static int
11984 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
11985 {
11986 	struct wm_softc *sc = device_private(dev);
11987 	int rv;
11988 
11989 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
11990 	if (rv != 0)
11991 		return rv;
11992 
11993 	if (rd)
11994 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
11995 	else
11996 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
11997 	return rv;
11998 }
11999 
12000 static int
12001 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
12002 {
12003 
12004 	return wm_access_emi_reg_locked(dev, reg, val, true);
12005 }
12006 
12007 static int
12008 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
12009 {
12010 
12011 	return wm_access_emi_reg_locked(dev, reg, &val, false);
12012 }
12013 
12014 /* SGMII related */
12015 
12016 /*
12017  * wm_sgmii_uses_mdio
12018  *
12019  * Check whether the transaction is to the internal PHY or the external
12020  * MDIO interface. Return true if it's MDIO.
12021  */
12022 static bool
12023 wm_sgmii_uses_mdio(struct wm_softc *sc)
12024 {
12025 	uint32_t reg;
12026 	bool ismdio = false;
12027 
12028 	switch (sc->sc_type) {
12029 	case WM_T_82575:
12030 	case WM_T_82576:
12031 		reg = CSR_READ(sc, WMREG_MDIC);
12032 		ismdio = ((reg & MDIC_DEST) != 0);
12033 		break;
12034 	case WM_T_82580:
12035 	case WM_T_I350:
12036 	case WM_T_I354:
12037 	case WM_T_I210:
12038 	case WM_T_I211:
12039 		reg = CSR_READ(sc, WMREG_MDICNFG);
12040 		ismdio = ((reg & MDICNFG_DEST) != 0);
12041 		break;
12042 	default:
12043 		break;
12044 	}
12045 
12046 	return ismdio;
12047 }
12048 
12049 /* Setup internal SGMII PHY for SFP */
12050 static void
12051 wm_sgmii_sfp_preconfig(struct wm_softc *sc)
12052 {
12053 	uint16_t id1, id2, phyreg;
12054 	int i, rv;
12055 
12056 	if (((sc->sc_flags & WM_F_SGMII) == 0)
12057 	    || ((sc->sc_flags & WM_F_SFP) == 0))
12058 		return;
12059 
12060 	for (i = 0; i < MII_NPHY; i++) {
12061 		sc->phy.no_errprint = true;
12062 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR1, &id1);
12063 		if (rv != 0)
12064 			continue;
12065 		rv = sc->phy.readreg_locked(sc->sc_dev, i, MII_PHYIDR2, &id2);
12066 		if (rv != 0)
12067 			continue;
12068 		if (MII_OUI(id1, id2) != MII_OUI_xxMARVELL)
12069 			continue;
12070 		sc->phy.no_errprint = false;
12071 
12072 		sc->phy.readreg_locked(sc->sc_dev, i, MAKPHY_ESSR, &phyreg);
12073 		phyreg &= ~(ESSR_SER_ANEG_BYPASS | ESSR_HWCFG_MODE);
12074 		phyreg |= ESSR_SGMII_WOC_COPPER;
12075 		sc->phy.writereg_locked(sc->sc_dev, i, MAKPHY_ESSR, phyreg);
12076 		break;
12077 	}
12078 
12079 }
12080 
12081 /*
12082  * wm_sgmii_readreg:	[mii interface function]
12083  *
12084  *	Read a PHY register on the SGMII
12085  * This could be handled by the PHY layer if we didn't have to lock the
12086  * resource ...
12087  */
12088 static int
12089 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
12090 {
12091 	struct wm_softc *sc = device_private(dev);
12092 	int rv;
12093 
12094 	if (sc->phy.acquire(sc)) {
12095 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12096 		return -1;
12097 	}
12098 
12099 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
12100 
12101 	sc->phy.release(sc);
12102 	return rv;
12103 }
12104 
12105 static int
12106 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
12107 {
12108 	struct wm_softc *sc = device_private(dev);
12109 	uint32_t i2ccmd;
12110 	int i, rv = 0;
12111 
12112 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
12113 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
12114 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12115 
12116 	/* Poll the ready bit */
12117 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12118 		delay(50);
12119 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12120 		if (i2ccmd & I2CCMD_READY)
12121 			break;
12122 	}
12123 	if ((i2ccmd & I2CCMD_READY) == 0) {
12124 		device_printf(dev, "I2CCMD Read did not complete\n");
12125 		rv = ETIMEDOUT;
12126 	}
12127 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
12128 		if (!sc->phy.no_errprint)
12129 			device_printf(dev, "I2CCMD Error bit set\n");
12130 		rv = EIO;
12131 	}
12132 
12133 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
12134 
12135 	return rv;
12136 }
12137 
12138 /*
12139  * wm_sgmii_writereg:	[mii interface function]
12140  *
12141  *	Write a PHY register on the SGMII.
12142  * This could be handled by the PHY layer if we didn't have to lock the
12143  * resource ...
12144  */
12145 static int
12146 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
12147 {
12148 	struct wm_softc *sc = device_private(dev);
12149 	int rv;
12150 
12151 	if (sc->phy.acquire(sc) != 0) {
12152 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
12153 		return -1;
12154 	}
12155 
12156 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
12157 
12158 	sc->phy.release(sc);
12159 
12160 	return rv;
12161 }
12162 
12163 static int
12164 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
12165 {
12166 	struct wm_softc *sc = device_private(dev);
12167 	uint32_t i2ccmd;
12168 	uint16_t swapdata;
12169 	int rv = 0;
12170 	int i;
12171 
12172 	/* Swap the data bytes for the I2C interface */
12173 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
12174 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
12175 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
12176 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12177 
12178 	/* Poll the ready bit */
12179 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12180 		delay(50);
12181 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12182 		if (i2ccmd & I2CCMD_READY)
12183 			break;
12184 	}
12185 	if ((i2ccmd & I2CCMD_READY) == 0) {
12186 		device_printf(dev, "I2CCMD Write did not complete\n");
12187 		rv = ETIMEDOUT;
12188 	}
12189 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
12190 		device_printf(dev, "I2CCMD Error bit set\n");
12191 		rv = EIO;
12192 	}
12193 
12194 	return rv;
12195 }
12196 
12197 /* TBI related */
12198 
12199 static bool
12200 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
12201 {
12202 	bool sig;
12203 
12204 	sig = ctrl & CTRL_SWDPIN(1);
12205 
12206 	/*
12207 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
12208 	 * detect a signal, 1 if they don't.
12209 	 */
12210 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
12211 		sig = !sig;
12212 
12213 	return sig;
12214 }
12215 
12216 /*
12217  * wm_tbi_mediainit:
12218  *
12219  *	Initialize media for use on 1000BASE-X devices.
12220  */
12221 static void
12222 wm_tbi_mediainit(struct wm_softc *sc)
12223 {
12224 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
12225 	const char *sep = "";
12226 
12227 	if (sc->sc_type < WM_T_82543)
12228 		sc->sc_tipg = TIPG_WM_DFLT;
12229 	else
12230 		sc->sc_tipg = TIPG_LG_DFLT;
12231 
12232 	sc->sc_tbi_serdes_anegticks = 5;
12233 
12234 	/* Initialize our media structures */
12235 	sc->sc_mii.mii_ifp = ifp;
12236 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
12237 
12238 	ifp->if_baudrate = IF_Gbps(1);
12239 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
12240 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
12241 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
12242 		    wm_serdes_mediachange, wm_serdes_mediastatus,
12243 		    sc->sc_core_lock);
12244 	} else {
12245 		ifmedia_init_with_lock(&sc->sc_mii.mii_media, IFM_IMASK,
12246 		    wm_tbi_mediachange, wm_tbi_mediastatus, sc->sc_core_lock);
12247 	}
12248 
12249 	/*
12250 	 * SWD Pins:
12251 	 *
12252 	 *	0 = Link LED (output)
12253 	 *	1 = Loss Of Signal (input)
12254 	 */
12255 	sc->sc_ctrl |= CTRL_SWDPIO(0);
12256 
12257 	/* XXX Perhaps this is only for TBI */
12258 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12259 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
12260 
12261 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
12262 		sc->sc_ctrl &= ~CTRL_LRST;
12263 
12264 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12265 
12266 #define	ADD(ss, mm, dd)							\
12267 do {									\
12268 	aprint_normal("%s%s", sep, ss);					\
12269 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
12270 	sep = ", ";							\
12271 } while (/*CONSTCOND*/0)
12272 
12273 	aprint_normal_dev(sc->sc_dev, "");
12274 
12275 	if (sc->sc_type == WM_T_I354) {
12276 		uint32_t status;
12277 
12278 		status = CSR_READ(sc, WMREG_STATUS);
12279 		if (((status & STATUS_2P5_SKU) != 0)
12280 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
12281 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
12282 		} else
12283 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
12284 	} else if (sc->sc_type == WM_T_82545) {
12285 		/* Only 82545 is LX (XXX except SFP) */
12286 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
12287 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
12288 	} else if (sc->sc_sfptype != 0) {
12289 		/* XXX wm(4) fiber/serdes don't use ifm_data */
12290 		switch (sc->sc_sfptype) {
12291 		default:
12292 		case SFF_SFP_ETH_FLAGS_1000SX:
12293 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
12294 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
12295 			break;
12296 		case SFF_SFP_ETH_FLAGS_1000LX:
12297 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
12298 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
12299 			break;
12300 		case SFF_SFP_ETH_FLAGS_1000CX:
12301 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
12302 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
12303 			break;
12304 		case SFF_SFP_ETH_FLAGS_1000T:
12305 			ADD("1000baseT", IFM_1000_T, 0);
12306 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
12307 			break;
12308 		case SFF_SFP_ETH_FLAGS_100FX:
12309 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
12310 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
12311 			break;
12312 		}
12313 	} else {
12314 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
12315 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
12316 	}
12317 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
12318 	aprint_normal("\n");
12319 
12320 #undef ADD
12321 
12322 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
12323 }
12324 
12325 /*
12326  * wm_tbi_mediachange:	[ifmedia interface function]
12327  *
12328  *	Set hardware to newly-selected media on a 1000BASE-X device.
12329  */
12330 static int
12331 wm_tbi_mediachange(struct ifnet *ifp)
12332 {
12333 	struct wm_softc *sc = ifp->if_softc;
12334 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
12335 	uint32_t status, ctrl;
12336 	bool signal;
12337 	int i;
12338 
12339 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
12340 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
12341 		/* XXX need some work for >= 82571 and < 82575 */
12342 		if (sc->sc_type < WM_T_82575)
12343 			return 0;
12344 	}
12345 
12346 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
12347 	    || (sc->sc_type >= WM_T_82575))
12348 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
12349 
12350 	sc->sc_ctrl &= ~CTRL_LRST;
12351 	sc->sc_txcw = TXCW_ANE;
12352 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12353 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
12354 	else if (ife->ifm_media & IFM_FDX)
12355 		sc->sc_txcw |= TXCW_FD;
12356 	else
12357 		sc->sc_txcw |= TXCW_HD;
12358 
12359 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
12360 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
12361 
12362 	DPRINTF(sc, WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
12363 		device_xname(sc->sc_dev), sc->sc_txcw));
12364 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12365 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12366 	CSR_WRITE_FLUSH(sc);
12367 	delay(1000);
12368 
12369 	ctrl = CSR_READ(sc, WMREG_CTRL);
12370 	signal = wm_tbi_havesignal(sc, ctrl);
12371 
12372 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
12373 		signal));
12374 
12375 	if (signal) {
12376 		/* Have signal; wait for the link to come up. */
12377 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
12378 			delay(10000);
12379 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
12380 				break;
12381 		}
12382 
12383 		DPRINTF(sc, WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
12384 			device_xname(sc->sc_dev), i));
12385 
12386 		status = CSR_READ(sc, WMREG_STATUS);
12387 		DPRINTF(sc, WM_DEBUG_LINK,
12388 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
12389 			device_xname(sc->sc_dev), status, STATUS_LU));
12390 		if (status & STATUS_LU) {
12391 			/* Link is up. */
12392 			DPRINTF(sc, WM_DEBUG_LINK,
12393 			    ("%s: LINK: set media -> link up %s\n",
12394 				device_xname(sc->sc_dev),
12395 				(status & STATUS_FD) ? "FDX" : "HDX"));
12396 
12397 			/*
12398 			 * NOTE: CTRL will update TFCE and RFCE automatically,
12399 			 * so we should update sc->sc_ctrl
12400 			 */
12401 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
12402 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
12403 			sc->sc_fcrtl &= ~FCRTL_XONE;
12404 			if (status & STATUS_FD)
12405 				sc->sc_tctl |=
12406 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
12407 			else
12408 				sc->sc_tctl |=
12409 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
12410 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
12411 				sc->sc_fcrtl |= FCRTL_XONE;
12412 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
12413 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
12414 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
12415 			sc->sc_tbi_linkup = 1;
12416 		} else {
12417 			if (i == WM_LINKUP_TIMEOUT)
12418 				wm_check_for_link(sc);
12419 			/* Link is down. */
12420 			DPRINTF(sc, WM_DEBUG_LINK,
12421 			    ("%s: LINK: set media -> link down\n",
12422 				device_xname(sc->sc_dev)));
12423 			sc->sc_tbi_linkup = 0;
12424 		}
12425 	} else {
12426 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
12427 			device_xname(sc->sc_dev)));
12428 		sc->sc_tbi_linkup = 0;
12429 	}
12430 
12431 	wm_tbi_serdes_set_linkled(sc);
12432 
12433 	return 0;
12434 }
12435 
12436 /*
12437  * wm_tbi_mediastatus:	[ifmedia interface function]
12438  *
12439  *	Get the current interface media status on a 1000BASE-X device.
12440  */
12441 static void
12442 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
12443 {
12444 	struct wm_softc *sc = ifp->if_softc;
12445 	uint32_t ctrl, status;
12446 
12447 	ifmr->ifm_status = IFM_AVALID;
12448 	ifmr->ifm_active = IFM_ETHER;
12449 
12450 	status = CSR_READ(sc, WMREG_STATUS);
12451 	if ((status & STATUS_LU) == 0) {
12452 		ifmr->ifm_active |= IFM_NONE;
12453 		return;
12454 	}
12455 
12456 	ifmr->ifm_status |= IFM_ACTIVE;
12457 	/* Only 82545 is LX */
12458 	if (sc->sc_type == WM_T_82545)
12459 		ifmr->ifm_active |= IFM_1000_LX;
12460 	else
12461 		ifmr->ifm_active |= IFM_1000_SX;
12462 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
12463 		ifmr->ifm_active |= IFM_FDX;
12464 	else
12465 		ifmr->ifm_active |= IFM_HDX;
12466 	ctrl = CSR_READ(sc, WMREG_CTRL);
12467 	if (ctrl & CTRL_RFCE)
12468 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
12469 	if (ctrl & CTRL_TFCE)
12470 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
12471 }
12472 
12473 /* XXX TBI only */
12474 static int
12475 wm_check_for_link(struct wm_softc *sc)
12476 {
12477 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
12478 	uint32_t rxcw;
12479 	uint32_t ctrl;
12480 	uint32_t status;
12481 	bool signal;
12482 
12483 	DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s called\n",
12484 		device_xname(sc->sc_dev), __func__));
12485 
12486 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
12487 		/* XXX need some work for >= 82571 */
12488 		if (sc->sc_type >= WM_T_82571) {
12489 			sc->sc_tbi_linkup = 1;
12490 			return 0;
12491 		}
12492 	}
12493 
12494 	rxcw = CSR_READ(sc, WMREG_RXCW);
12495 	ctrl = CSR_READ(sc, WMREG_CTRL);
12496 	status = CSR_READ(sc, WMREG_STATUS);
12497 	signal = wm_tbi_havesignal(sc, ctrl);
12498 
12499 	DPRINTF(sc, WM_DEBUG_LINK,
12500 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
12501 		device_xname(sc->sc_dev), __func__, signal,
12502 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
12503 
12504 	/*
12505 	 * SWDPIN   LU RXCW
12506 	 *	0    0	  0
12507 	 *	0    0	  1	(should not happen)
12508 	 *	0    1	  0	(should not happen)
12509 	 *	0    1	  1	(should not happen)
12510 	 *	1    0	  0	Disable autonego and force linkup
12511 	 *	1    0	  1	got /C/ but not linkup yet
12512 	 *	1    1	  0	(linkup)
12513 	 *	1    1	  1	If IFM_AUTO, back to autonego
12514 	 *
12515 	 */
12516 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
12517 		DPRINTF(sc, WM_DEBUG_LINK,
12518 		    ("%s: %s: force linkup and fullduplex\n",
12519 			device_xname(sc->sc_dev), __func__));
12520 		sc->sc_tbi_linkup = 0;
12521 		/* Disable auto-negotiation in the TXCW register */
12522 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
12523 
12524 		/*
12525 		 * Force link-up and also force full-duplex.
12526 		 *
12527 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
12528 		 * so we should update sc->sc_ctrl
12529 		 */
12530 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
12531 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12532 	} else if (((status & STATUS_LU) != 0)
12533 	    && ((rxcw & RXCW_C) != 0)
12534 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
12535 		sc->sc_tbi_linkup = 1;
12536 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
12537 			device_xname(sc->sc_dev),
12538 			__func__));
12539 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12540 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
12541 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
12542 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: /C/",
12543 			device_xname(sc->sc_dev), __func__));
12544 	} else {
12545 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
12546 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
12547 			status));
12548 	}
12549 
12550 	return 0;
12551 }
12552 
12553 /*
12554  * wm_tbi_tick:
12555  *
12556  *	Check the link on TBI devices.
12557  *	This function acts as mii_tick().
12558  */
12559 static void
12560 wm_tbi_tick(struct wm_softc *sc)
12561 {
12562 	struct mii_data *mii = &sc->sc_mii;
12563 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
12564 	uint32_t status;
12565 
12566 	KASSERT(WM_CORE_LOCKED(sc));
12567 
12568 	status = CSR_READ(sc, WMREG_STATUS);
12569 
12570 	/* XXX is this needed? */
12571 	(void)CSR_READ(sc, WMREG_RXCW);
12572 	(void)CSR_READ(sc, WMREG_CTRL);
12573 
12574 	/* set link status */
12575 	if ((status & STATUS_LU) == 0) {
12576 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
12577 			device_xname(sc->sc_dev)));
12578 		sc->sc_tbi_linkup = 0;
12579 	} else if (sc->sc_tbi_linkup == 0) {
12580 		DPRINTF(sc, WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
12581 			device_xname(sc->sc_dev),
12582 			(status & STATUS_FD) ? "FDX" : "HDX"));
12583 		sc->sc_tbi_linkup = 1;
12584 		sc->sc_tbi_serdes_ticks = 0;
12585 	}
12586 
12587 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
12588 		goto setled;
12589 
12590 	if ((status & STATUS_LU) == 0) {
12591 		sc->sc_tbi_linkup = 0;
12592 		/* If the timer expired, retry autonegotiation */
12593 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12594 		    && (++sc->sc_tbi_serdes_ticks
12595 			>= sc->sc_tbi_serdes_anegticks)) {
12596 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
12597 				device_xname(sc->sc_dev), __func__));
12598 			sc->sc_tbi_serdes_ticks = 0;
12599 			/*
12600 			 * Reset the link, and let autonegotiation do
12601 			 * its thing
12602 			 */
12603 			sc->sc_ctrl |= CTRL_LRST;
12604 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12605 			CSR_WRITE_FLUSH(sc);
12606 			delay(1000);
12607 			sc->sc_ctrl &= ~CTRL_LRST;
12608 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12609 			CSR_WRITE_FLUSH(sc);
12610 			delay(1000);
12611 			CSR_WRITE(sc, WMREG_TXCW,
12612 			    sc->sc_txcw & ~TXCW_ANE);
12613 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
12614 		}
12615 	}
12616 
12617 setled:
12618 	wm_tbi_serdes_set_linkled(sc);
12619 }
12620 
12621 /* SERDES related */
12622 static void
12623 wm_serdes_power_up_link_82575(struct wm_softc *sc)
12624 {
12625 	uint32_t reg;
12626 
12627 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12628 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
12629 		return;
12630 
12631 	/* Enable PCS to turn on link */
12632 	reg = CSR_READ(sc, WMREG_PCS_CFG);
12633 	reg |= PCS_CFG_PCS_EN;
12634 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
12635 
12636 	/* Power up the laser */
12637 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
12638 	reg &= ~CTRL_EXT_SWDPIN(3);
12639 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12640 
12641 	/* Flush the write to verify completion */
12642 	CSR_WRITE_FLUSH(sc);
12643 	delay(1000);
12644 }
12645 
12646 static int
12647 wm_serdes_mediachange(struct ifnet *ifp)
12648 {
12649 	struct wm_softc *sc = ifp->if_softc;
12650 	bool pcs_autoneg = true; /* XXX */
12651 	uint32_t ctrl_ext, pcs_lctl, reg;
12652 
12653 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
12654 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
12655 		return 0;
12656 
12657 	/* XXX Currently, this function is not called on 8257[12] */
12658 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
12659 	    || (sc->sc_type >= WM_T_82575))
12660 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
12661 
12662 	/* Power on the sfp cage if present */
12663 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
12664 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
12665 	ctrl_ext |= CTRL_EXT_I2C_ENA;
12666 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
12667 
12668 	sc->sc_ctrl |= CTRL_SLU;
12669 
12670 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
12671 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
12672 
12673 		reg = CSR_READ(sc, WMREG_CONNSW);
12674 		reg |= CONNSW_ENRGSRC;
12675 		CSR_WRITE(sc, WMREG_CONNSW, reg);
12676 	}
12677 
12678 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
12679 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
12680 	case CTRL_EXT_LINK_MODE_SGMII:
12681 		/* SGMII mode lets the phy handle forcing speed/duplex */
12682 		pcs_autoneg = true;
12683 		/* Autoneg time out should be disabled for SGMII mode */
12684 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
12685 		break;
12686 	case CTRL_EXT_LINK_MODE_1000KX:
12687 		pcs_autoneg = false;
12688 		/* FALLTHROUGH */
12689 	default:
12690 		if ((sc->sc_type == WM_T_82575)
12691 		    || (sc->sc_type == WM_T_82576)) {
12692 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
12693 				pcs_autoneg = false;
12694 		}
12695 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
12696 		    | CTRL_FRCFDX;
12697 
12698 		/* Set speed of 1000/Full if speed/duplex is forced */
12699 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
12700 	}
12701 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
12702 
12703 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
12704 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
12705 
12706 	if (pcs_autoneg) {
12707 		/* Set PCS register for autoneg */
12708 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
12709 
12710 		/* Disable force flow control for autoneg */
12711 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
12712 
12713 		/* Configure flow control advertisement for autoneg */
12714 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
12715 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
12716 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
12717 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
12718 	} else
12719 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
12720 
12721 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
12722 
12723 	return 0;
12724 }
12725 
12726 static void
12727 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
12728 {
12729 	struct wm_softc *sc = ifp->if_softc;
12730 	struct mii_data *mii = &sc->sc_mii;
12731 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
12732 	uint32_t pcs_adv, pcs_lpab, reg;
12733 
12734 	ifmr->ifm_status = IFM_AVALID;
12735 	ifmr->ifm_active = IFM_ETHER;
12736 
12737 	/* Check PCS */
12738 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
12739 	if ((reg & PCS_LSTS_LINKOK) == 0) {
12740 		ifmr->ifm_active |= IFM_NONE;
12741 		sc->sc_tbi_linkup = 0;
12742 		goto setled;
12743 	}
12744 
12745 	sc->sc_tbi_linkup = 1;
12746 	ifmr->ifm_status |= IFM_ACTIVE;
12747 	if (sc->sc_type == WM_T_I354) {
12748 		uint32_t status;
12749 
12750 		status = CSR_READ(sc, WMREG_STATUS);
12751 		if (((status & STATUS_2P5_SKU) != 0)
12752 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
12753 			ifmr->ifm_active |= IFM_2500_KX;
12754 		} else
12755 			ifmr->ifm_active |= IFM_1000_KX;
12756 	} else {
12757 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
12758 		case PCS_LSTS_SPEED_10:
12759 			ifmr->ifm_active |= IFM_10_T; /* XXX */
12760 			break;
12761 		case PCS_LSTS_SPEED_100:
12762 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
12763 			break;
12764 		case PCS_LSTS_SPEED_1000:
12765 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
12766 			break;
12767 		default:
12768 			device_printf(sc->sc_dev, "Unknown speed\n");
12769 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
12770 			break;
12771 		}
12772 	}
12773 	ifp->if_baudrate = ifmedia_baudrate(ifmr->ifm_active);
12774 	if ((reg & PCS_LSTS_FDX) != 0)
12775 		ifmr->ifm_active |= IFM_FDX;
12776 	else
12777 		ifmr->ifm_active |= IFM_HDX;
12778 	mii->mii_media_active &= ~IFM_ETH_FMASK;
12779 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
12780 		/* Check flow */
12781 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
12782 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
12783 			DPRINTF(sc, WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
12784 			goto setled;
12785 		}
12786 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
12787 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
12788 		DPRINTF(sc, WM_DEBUG_LINK,
12789 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
12790 		if ((pcs_adv & TXCW_SYM_PAUSE)
12791 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
12792 			mii->mii_media_active |= IFM_FLOW
12793 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
12794 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
12795 		    && (pcs_adv & TXCW_ASYM_PAUSE)
12796 		    && (pcs_lpab & TXCW_SYM_PAUSE)
12797 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
12798 			mii->mii_media_active |= IFM_FLOW
12799 			    | IFM_ETH_TXPAUSE;
12800 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
12801 		    && (pcs_adv & TXCW_ASYM_PAUSE)
12802 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
12803 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
12804 			mii->mii_media_active |= IFM_FLOW
12805 			    | IFM_ETH_RXPAUSE;
12806 		}
12807 	}
12808 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
12809 	    | (mii->mii_media_active & IFM_ETH_FMASK);
12810 setled:
12811 	wm_tbi_serdes_set_linkled(sc);
12812 }
12813 
12814 /*
12815  * wm_serdes_tick:
12816  *
12817  *	Check the link on serdes devices.
12818  */
12819 static void
12820 wm_serdes_tick(struct wm_softc *sc)
12821 {
12822 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
12823 	struct mii_data *mii = &sc->sc_mii;
12824 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
12825 	uint32_t reg;
12826 
12827 	KASSERT(WM_CORE_LOCKED(sc));
12828 
12829 	mii->mii_media_status = IFM_AVALID;
12830 	mii->mii_media_active = IFM_ETHER;
12831 
12832 	/* Check PCS */
12833 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
12834 	if ((reg & PCS_LSTS_LINKOK) != 0) {
12835 		mii->mii_media_status |= IFM_ACTIVE;
12836 		sc->sc_tbi_linkup = 1;
12837 		sc->sc_tbi_serdes_ticks = 0;
12838 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
12839 		if ((reg & PCS_LSTS_FDX) != 0)
12840 			mii->mii_media_active |= IFM_FDX;
12841 		else
12842 			mii->mii_media_active |= IFM_HDX;
12843 	} else {
12844 		mii->mii_media_status |= IFM_NONE;
12845 		sc->sc_tbi_linkup = 0;
12846 		/* If the timer expired, retry autonegotiation */
12847 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
12848 		    && (++sc->sc_tbi_serdes_ticks
12849 			>= sc->sc_tbi_serdes_anegticks)) {
12850 			DPRINTF(sc, WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
12851 				device_xname(sc->sc_dev), __func__));
12852 			sc->sc_tbi_serdes_ticks = 0;
12853 			/* XXX */
12854 			wm_serdes_mediachange(ifp);
12855 		}
12856 	}
12857 
12858 	wm_tbi_serdes_set_linkled(sc);
12859 }
12860 
12861 /* SFP related */
12862 
12863 static int
12864 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
12865 {
12866 	uint32_t i2ccmd;
12867 	int i;
12868 
12869 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
12870 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
12871 
12872 	/* Poll the ready bit */
12873 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
12874 		delay(50);
12875 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
12876 		if (i2ccmd & I2CCMD_READY)
12877 			break;
12878 	}
12879 	if ((i2ccmd & I2CCMD_READY) == 0)
12880 		return -1;
12881 	if ((i2ccmd & I2CCMD_ERROR) != 0)
12882 		return -1;
12883 
12884 	*data = i2ccmd & 0x00ff;
12885 
12886 	return 0;
12887 }
12888 
12889 static uint32_t
12890 wm_sfp_get_media_type(struct wm_softc *sc)
12891 {
12892 	uint32_t ctrl_ext;
12893 	uint8_t val = 0;
12894 	int timeout = 3;
12895 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
12896 	int rv = -1;
12897 
12898 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
12899 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
12900 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
12901 	CSR_WRITE_FLUSH(sc);
12902 
12903 	/* Read SFP module data */
12904 	while (timeout) {
12905 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
12906 		if (rv == 0)
12907 			break;
12908 		delay(100*1000); /* XXX too big */
12909 		timeout--;
12910 	}
12911 	if (rv != 0)
12912 		goto out;
12913 
12914 	switch (val) {
12915 	case SFF_SFP_ID_SFF:
12916 		aprint_normal_dev(sc->sc_dev,
12917 		    "Module/Connector soldered to board\n");
12918 		break;
12919 	case SFF_SFP_ID_SFP:
12920 		sc->sc_flags |= WM_F_SFP;
12921 		break;
12922 	case SFF_SFP_ID_UNKNOWN:
12923 		goto out;
12924 	default:
12925 		break;
12926 	}
12927 
12928 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
12929 	if (rv != 0)
12930 		goto out;
12931 
12932 	sc->sc_sfptype = val;
12933 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
12934 		mediatype = WM_MEDIATYPE_SERDES;
12935 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
12936 		sc->sc_flags |= WM_F_SGMII;
12937 		mediatype = WM_MEDIATYPE_COPPER;
12938 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
12939 		sc->sc_flags |= WM_F_SGMII;
12940 		mediatype = WM_MEDIATYPE_SERDES;
12941 	} else {
12942 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
12943 		    __func__, sc->sc_sfptype);
12944 		sc->sc_sfptype = 0; /* XXX unknown */
12945 	}
12946 
12947 out:
12948 	/* Restore I2C interface setting */
12949 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
12950 
12951 	return mediatype;
12952 }
12953 
12954 /*
12955  * NVM related.
12956  * Microwire, SPI (w/wo EERD) and Flash.
12957  */
12958 
12959 /* Both spi and uwire */
12960 
12961 /*
12962  * wm_eeprom_sendbits:
12963  *
12964  *	Send a series of bits to the EEPROM.
12965  */
12966 static void
12967 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
12968 {
12969 	uint32_t reg;
12970 	int x;
12971 
12972 	reg = CSR_READ(sc, WMREG_EECD);
12973 
12974 	for (x = nbits; x > 0; x--) {
12975 		if (bits & (1U << (x - 1)))
12976 			reg |= EECD_DI;
12977 		else
12978 			reg &= ~EECD_DI;
12979 		CSR_WRITE(sc, WMREG_EECD, reg);
12980 		CSR_WRITE_FLUSH(sc);
12981 		delay(2);
12982 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
12983 		CSR_WRITE_FLUSH(sc);
12984 		delay(2);
12985 		CSR_WRITE(sc, WMREG_EECD, reg);
12986 		CSR_WRITE_FLUSH(sc);
12987 		delay(2);
12988 	}
12989 }
12990 
12991 /*
12992  * wm_eeprom_recvbits:
12993  *
12994  *	Receive a series of bits from the EEPROM.
12995  */
12996 static void
12997 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
12998 {
12999 	uint32_t reg, val;
13000 	int x;
13001 
13002 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
13003 
13004 	val = 0;
13005 	for (x = nbits; x > 0; x--) {
13006 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
13007 		CSR_WRITE_FLUSH(sc);
13008 		delay(2);
13009 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
13010 			val |= (1U << (x - 1));
13011 		CSR_WRITE(sc, WMREG_EECD, reg);
13012 		CSR_WRITE_FLUSH(sc);
13013 		delay(2);
13014 	}
13015 	*valp = val;
13016 }
13017 
13018 /* Microwire */
13019 
13020 /*
13021  * wm_nvm_read_uwire:
13022  *
13023  *	Read a word from the EEPROM using the MicroWire protocol.
13024  */
13025 static int
13026 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
13027 {
13028 	uint32_t reg, val;
13029 	int i;
13030 
13031 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13032 		device_xname(sc->sc_dev), __func__));
13033 
13034 	if (sc->nvm.acquire(sc) != 0)
13035 		return -1;
13036 
13037 	for (i = 0; i < wordcnt; i++) {
13038 		/* Clear SK and DI. */
13039 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
13040 		CSR_WRITE(sc, WMREG_EECD, reg);
13041 
13042 		/*
13043 		 * XXX: workaround for a bug in qemu-0.12.x and prior
13044 		 * and Xen.
13045 		 *
13046 		 * We use this workaround only for 82540 because qemu's
13047 		 * e1000 act as 82540.
13048 		 */
13049 		if (sc->sc_type == WM_T_82540) {
13050 			reg |= EECD_SK;
13051 			CSR_WRITE(sc, WMREG_EECD, reg);
13052 			reg &= ~EECD_SK;
13053 			CSR_WRITE(sc, WMREG_EECD, reg);
13054 			CSR_WRITE_FLUSH(sc);
13055 			delay(2);
13056 		}
13057 		/* XXX: end of workaround */
13058 
13059 		/* Set CHIP SELECT. */
13060 		reg |= EECD_CS;
13061 		CSR_WRITE(sc, WMREG_EECD, reg);
13062 		CSR_WRITE_FLUSH(sc);
13063 		delay(2);
13064 
13065 		/* Shift in the READ command. */
13066 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
13067 
13068 		/* Shift in address. */
13069 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
13070 
13071 		/* Shift out the data. */
13072 		wm_eeprom_recvbits(sc, &val, 16);
13073 		data[i] = val & 0xffff;
13074 
13075 		/* Clear CHIP SELECT. */
13076 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
13077 		CSR_WRITE(sc, WMREG_EECD, reg);
13078 		CSR_WRITE_FLUSH(sc);
13079 		delay(2);
13080 	}
13081 
13082 	sc->nvm.release(sc);
13083 	return 0;
13084 }
13085 
13086 /* SPI */
13087 
13088 /*
13089  * Set SPI and FLASH related information from the EECD register.
13090  * For 82541 and 82547, the word size is taken from EEPROM.
13091  */
13092 static int
13093 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
13094 {
13095 	int size;
13096 	uint32_t reg;
13097 	uint16_t data;
13098 
13099 	reg = CSR_READ(sc, WMREG_EECD);
13100 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
13101 
13102 	/* Read the size of NVM from EECD by default */
13103 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
13104 	switch (sc->sc_type) {
13105 	case WM_T_82541:
13106 	case WM_T_82541_2:
13107 	case WM_T_82547:
13108 	case WM_T_82547_2:
13109 		/* Set dummy value to access EEPROM */
13110 		sc->sc_nvm_wordsize = 64;
13111 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
13112 			aprint_error_dev(sc->sc_dev,
13113 			    "%s: failed to read EEPROM size\n", __func__);
13114 		}
13115 		reg = data;
13116 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
13117 		if (size == 0)
13118 			size = 6; /* 64 word size */
13119 		else
13120 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
13121 		break;
13122 	case WM_T_80003:
13123 	case WM_T_82571:
13124 	case WM_T_82572:
13125 	case WM_T_82573: /* SPI case */
13126 	case WM_T_82574: /* SPI case */
13127 	case WM_T_82583: /* SPI case */
13128 		size += NVM_WORD_SIZE_BASE_SHIFT;
13129 		if (size > 14)
13130 			size = 14;
13131 		break;
13132 	case WM_T_82575:
13133 	case WM_T_82576:
13134 	case WM_T_82580:
13135 	case WM_T_I350:
13136 	case WM_T_I354:
13137 	case WM_T_I210:
13138 	case WM_T_I211:
13139 		size += NVM_WORD_SIZE_BASE_SHIFT;
13140 		if (size > 15)
13141 			size = 15;
13142 		break;
13143 	default:
13144 		aprint_error_dev(sc->sc_dev,
13145 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
13146 		return -1;
13147 		break;
13148 	}
13149 
13150 	sc->sc_nvm_wordsize = 1 << size;
13151 
13152 	return 0;
13153 }
13154 
13155 /*
13156  * wm_nvm_ready_spi:
13157  *
13158  *	Wait for a SPI EEPROM to be ready for commands.
13159  */
13160 static int
13161 wm_nvm_ready_spi(struct wm_softc *sc)
13162 {
13163 	uint32_t val;
13164 	int usec;
13165 
13166 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13167 		device_xname(sc->sc_dev), __func__));
13168 
13169 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
13170 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
13171 		wm_eeprom_recvbits(sc, &val, 8);
13172 		if ((val & SPI_SR_RDY) == 0)
13173 			break;
13174 	}
13175 	if (usec >= SPI_MAX_RETRIES) {
13176 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
13177 		return -1;
13178 	}
13179 	return 0;
13180 }
13181 
13182 /*
13183  * wm_nvm_read_spi:
13184  *
13185  *	Read a work from the EEPROM using the SPI protocol.
13186  */
13187 static int
13188 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
13189 {
13190 	uint32_t reg, val;
13191 	int i;
13192 	uint8_t opc;
13193 	int rv = 0;
13194 
13195 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13196 		device_xname(sc->sc_dev), __func__));
13197 
13198 	if (sc->nvm.acquire(sc) != 0)
13199 		return -1;
13200 
13201 	/* Clear SK and CS. */
13202 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
13203 	CSR_WRITE(sc, WMREG_EECD, reg);
13204 	CSR_WRITE_FLUSH(sc);
13205 	delay(2);
13206 
13207 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
13208 		goto out;
13209 
13210 	/* Toggle CS to flush commands. */
13211 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
13212 	CSR_WRITE_FLUSH(sc);
13213 	delay(2);
13214 	CSR_WRITE(sc, WMREG_EECD, reg);
13215 	CSR_WRITE_FLUSH(sc);
13216 	delay(2);
13217 
13218 	opc = SPI_OPC_READ;
13219 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
13220 		opc |= SPI_OPC_A8;
13221 
13222 	wm_eeprom_sendbits(sc, opc, 8);
13223 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
13224 
13225 	for (i = 0; i < wordcnt; i++) {
13226 		wm_eeprom_recvbits(sc, &val, 16);
13227 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
13228 	}
13229 
13230 	/* Raise CS and clear SK. */
13231 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
13232 	CSR_WRITE(sc, WMREG_EECD, reg);
13233 	CSR_WRITE_FLUSH(sc);
13234 	delay(2);
13235 
13236 out:
13237 	sc->nvm.release(sc);
13238 	return rv;
13239 }
13240 
13241 /* Using with EERD */
13242 
13243 static int
13244 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
13245 {
13246 	uint32_t attempts = 100000;
13247 	uint32_t i, reg = 0;
13248 	int32_t done = -1;
13249 
13250 	for (i = 0; i < attempts; i++) {
13251 		reg = CSR_READ(sc, rw);
13252 
13253 		if (reg & EERD_DONE) {
13254 			done = 0;
13255 			break;
13256 		}
13257 		delay(5);
13258 	}
13259 
13260 	return done;
13261 }
13262 
13263 static int
13264 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
13265 {
13266 	int i, eerd = 0;
13267 	int rv = 0;
13268 
13269 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13270 		device_xname(sc->sc_dev), __func__));
13271 
13272 	if (sc->nvm.acquire(sc) != 0)
13273 		return -1;
13274 
13275 	for (i = 0; i < wordcnt; i++) {
13276 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
13277 		CSR_WRITE(sc, WMREG_EERD, eerd);
13278 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
13279 		if (rv != 0) {
13280 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
13281 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
13282 			break;
13283 		}
13284 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
13285 	}
13286 
13287 	sc->nvm.release(sc);
13288 	return rv;
13289 }
13290 
13291 /* Flash */
13292 
13293 static int
13294 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
13295 {
13296 	uint32_t eecd;
13297 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
13298 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
13299 	uint32_t nvm_dword = 0;
13300 	uint8_t sig_byte = 0;
13301 	int rv;
13302 
13303 	switch (sc->sc_type) {
13304 	case WM_T_PCH_SPT:
13305 	case WM_T_PCH_CNP:
13306 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
13307 		act_offset = ICH_NVM_SIG_WORD * 2;
13308 
13309 		/* Set bank to 0 in case flash read fails. */
13310 		*bank = 0;
13311 
13312 		/* Check bank 0 */
13313 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
13314 		if (rv != 0)
13315 			return rv;
13316 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
13317 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13318 			*bank = 0;
13319 			return 0;
13320 		}
13321 
13322 		/* Check bank 1 */
13323 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
13324 		    &nvm_dword);
13325 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
13326 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13327 			*bank = 1;
13328 			return 0;
13329 		}
13330 		aprint_error_dev(sc->sc_dev,
13331 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
13332 		return -1;
13333 	case WM_T_ICH8:
13334 	case WM_T_ICH9:
13335 		eecd = CSR_READ(sc, WMREG_EECD);
13336 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
13337 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
13338 			return 0;
13339 		}
13340 		/* FALLTHROUGH */
13341 	default:
13342 		/* Default to 0 */
13343 		*bank = 0;
13344 
13345 		/* Check bank 0 */
13346 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
13347 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13348 			*bank = 0;
13349 			return 0;
13350 		}
13351 
13352 		/* Check bank 1 */
13353 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
13354 		    &sig_byte);
13355 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
13356 			*bank = 1;
13357 			return 0;
13358 		}
13359 	}
13360 
13361 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
13362 		device_xname(sc->sc_dev)));
13363 	return -1;
13364 }
13365 
13366 /******************************************************************************
13367  * This function does initial flash setup so that a new read/write/erase cycle
13368  * can be started.
13369  *
13370  * sc - The pointer to the hw structure
13371  ****************************************************************************/
13372 static int32_t
13373 wm_ich8_cycle_init(struct wm_softc *sc)
13374 {
13375 	uint16_t hsfsts;
13376 	int32_t error = 1;
13377 	int32_t i     = 0;
13378 
13379 	if (sc->sc_type >= WM_T_PCH_SPT)
13380 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
13381 	else
13382 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
13383 
13384 	/* May be check the Flash Des Valid bit in Hw status */
13385 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
13386 		return error;
13387 
13388 	/* Clear FCERR in Hw status by writing 1 */
13389 	/* Clear DAEL in Hw status by writing a 1 */
13390 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
13391 
13392 	if (sc->sc_type >= WM_T_PCH_SPT)
13393 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
13394 	else
13395 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
13396 
13397 	/*
13398 	 * Either we should have a hardware SPI cycle in progress bit to check
13399 	 * against, in order to start a new cycle or FDONE bit should be
13400 	 * changed in the hardware so that it is 1 after hardware reset, which
13401 	 * can then be used as an indication whether a cycle is in progress or
13402 	 * has been completed .. we should also have some software semaphore
13403 	 * mechanism to guard FDONE or the cycle in progress bit so that two
13404 	 * threads access to those bits can be sequentiallized or a way so that
13405 	 * 2 threads don't start the cycle at the same time
13406 	 */
13407 
13408 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
13409 		/*
13410 		 * There is no cycle running at present, so we can start a
13411 		 * cycle
13412 		 */
13413 
13414 		/* Begin by setting Flash Cycle Done. */
13415 		hsfsts |= HSFSTS_DONE;
13416 		if (sc->sc_type >= WM_T_PCH_SPT)
13417 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13418 			    hsfsts & 0xffffUL);
13419 		else
13420 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
13421 		error = 0;
13422 	} else {
13423 		/*
13424 		 * Otherwise poll for sometime so the current cycle has a
13425 		 * chance to end before giving up.
13426 		 */
13427 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
13428 			if (sc->sc_type >= WM_T_PCH_SPT)
13429 				hsfsts = ICH8_FLASH_READ32(sc,
13430 				    ICH_FLASH_HSFSTS) & 0xffffUL;
13431 			else
13432 				hsfsts = ICH8_FLASH_READ16(sc,
13433 				    ICH_FLASH_HSFSTS);
13434 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
13435 				error = 0;
13436 				break;
13437 			}
13438 			delay(1);
13439 		}
13440 		if (error == 0) {
13441 			/*
13442 			 * Successful in waiting for previous cycle to timeout,
13443 			 * now set the Flash Cycle Done.
13444 			 */
13445 			hsfsts |= HSFSTS_DONE;
13446 			if (sc->sc_type >= WM_T_PCH_SPT)
13447 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13448 				    hsfsts & 0xffffUL);
13449 			else
13450 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
13451 				    hsfsts);
13452 		}
13453 	}
13454 	return error;
13455 }
13456 
13457 /******************************************************************************
13458  * This function starts a flash cycle and waits for its completion
13459  *
13460  * sc - The pointer to the hw structure
13461  ****************************************************************************/
13462 static int32_t
13463 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
13464 {
13465 	uint16_t hsflctl;
13466 	uint16_t hsfsts;
13467 	int32_t error = 1;
13468 	uint32_t i = 0;
13469 
13470 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
13471 	if (sc->sc_type >= WM_T_PCH_SPT)
13472 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
13473 	else
13474 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
13475 	hsflctl |= HSFCTL_GO;
13476 	if (sc->sc_type >= WM_T_PCH_SPT)
13477 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13478 		    (uint32_t)hsflctl << 16);
13479 	else
13480 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
13481 
13482 	/* Wait till FDONE bit is set to 1 */
13483 	do {
13484 		if (sc->sc_type >= WM_T_PCH_SPT)
13485 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
13486 			    & 0xffffUL;
13487 		else
13488 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
13489 		if (hsfsts & HSFSTS_DONE)
13490 			break;
13491 		delay(1);
13492 		i++;
13493 	} while (i < timeout);
13494 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
13495 		error = 0;
13496 
13497 	return error;
13498 }
13499 
13500 /******************************************************************************
13501  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
13502  *
13503  * sc - The pointer to the hw structure
13504  * index - The index of the byte or word to read.
13505  * size - Size of data to read, 1=byte 2=word, 4=dword
13506  * data - Pointer to the word to store the value read.
13507  *****************************************************************************/
13508 static int32_t
13509 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
13510     uint32_t size, uint32_t *data)
13511 {
13512 	uint16_t hsfsts;
13513 	uint16_t hsflctl;
13514 	uint32_t flash_linear_address;
13515 	uint32_t flash_data = 0;
13516 	int32_t error = 1;
13517 	int32_t count = 0;
13518 
13519 	if (size < 1  || size > 4 || data == 0x0 ||
13520 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
13521 		return error;
13522 
13523 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
13524 	    sc->sc_ich8_flash_base;
13525 
13526 	do {
13527 		delay(1);
13528 		/* Steps */
13529 		error = wm_ich8_cycle_init(sc);
13530 		if (error)
13531 			break;
13532 
13533 		if (sc->sc_type >= WM_T_PCH_SPT)
13534 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
13535 			    >> 16;
13536 		else
13537 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
13538 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
13539 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
13540 		    & HSFCTL_BCOUNT_MASK;
13541 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
13542 		if (sc->sc_type >= WM_T_PCH_SPT) {
13543 			/*
13544 			 * In SPT, This register is in Lan memory space, not
13545 			 * flash. Therefore, only 32 bit access is supported.
13546 			 */
13547 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
13548 			    (uint32_t)hsflctl << 16);
13549 		} else
13550 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
13551 
13552 		/*
13553 		 * Write the last 24 bits of index into Flash Linear address
13554 		 * field in Flash Address
13555 		 */
13556 		/* TODO: TBD maybe check the index against the size of flash */
13557 
13558 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
13559 
13560 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
13561 
13562 		/*
13563 		 * Check if FCERR is set to 1, if set to 1, clear it and try
13564 		 * the whole sequence a few more times, else read in (shift in)
13565 		 * the Flash Data0, the order is least significant byte first
13566 		 * msb to lsb
13567 		 */
13568 		if (error == 0) {
13569 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
13570 			if (size == 1)
13571 				*data = (uint8_t)(flash_data & 0x000000FF);
13572 			else if (size == 2)
13573 				*data = (uint16_t)(flash_data & 0x0000FFFF);
13574 			else if (size == 4)
13575 				*data = (uint32_t)flash_data;
13576 			break;
13577 		} else {
13578 			/*
13579 			 * If we've gotten here, then things are probably
13580 			 * completely hosed, but if the error condition is
13581 			 * detected, it won't hurt to give it another try...
13582 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
13583 			 */
13584 			if (sc->sc_type >= WM_T_PCH_SPT)
13585 				hsfsts = ICH8_FLASH_READ32(sc,
13586 				    ICH_FLASH_HSFSTS) & 0xffffUL;
13587 			else
13588 				hsfsts = ICH8_FLASH_READ16(sc,
13589 				    ICH_FLASH_HSFSTS);
13590 
13591 			if (hsfsts & HSFSTS_ERR) {
13592 				/* Repeat for some time before giving up. */
13593 				continue;
13594 			} else if ((hsfsts & HSFSTS_DONE) == 0)
13595 				break;
13596 		}
13597 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
13598 
13599 	return error;
13600 }
13601 
13602 /******************************************************************************
13603  * Reads a single byte from the NVM using the ICH8 flash access registers.
13604  *
13605  * sc - pointer to wm_hw structure
13606  * index - The index of the byte to read.
13607  * data - Pointer to a byte to store the value read.
13608  *****************************************************************************/
13609 static int32_t
13610 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
13611 {
13612 	int32_t status;
13613 	uint32_t word = 0;
13614 
13615 	status = wm_read_ich8_data(sc, index, 1, &word);
13616 	if (status == 0)
13617 		*data = (uint8_t)word;
13618 	else
13619 		*data = 0;
13620 
13621 	return status;
13622 }
13623 
13624 /******************************************************************************
13625  * Reads a word from the NVM using the ICH8 flash access registers.
13626  *
13627  * sc - pointer to wm_hw structure
13628  * index - The starting byte index of the word to read.
13629  * data - Pointer to a word to store the value read.
13630  *****************************************************************************/
13631 static int32_t
13632 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
13633 {
13634 	int32_t status;
13635 	uint32_t word = 0;
13636 
13637 	status = wm_read_ich8_data(sc, index, 2, &word);
13638 	if (status == 0)
13639 		*data = (uint16_t)word;
13640 	else
13641 		*data = 0;
13642 
13643 	return status;
13644 }
13645 
13646 /******************************************************************************
13647  * Reads a dword from the NVM using the ICH8 flash access registers.
13648  *
13649  * sc - pointer to wm_hw structure
13650  * index - The starting byte index of the word to read.
13651  * data - Pointer to a word to store the value read.
13652  *****************************************************************************/
13653 static int32_t
13654 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
13655 {
13656 	int32_t status;
13657 
13658 	status = wm_read_ich8_data(sc, index, 4, data);
13659 	return status;
13660 }
13661 
13662 /******************************************************************************
13663  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
13664  * register.
13665  *
13666  * sc - Struct containing variables accessed by shared code
13667  * offset - offset of word in the EEPROM to read
13668  * data - word read from the EEPROM
13669  * words - number of words to read
13670  *****************************************************************************/
13671 static int
13672 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
13673 {
13674 	int32_t	 rv = 0;
13675 	uint32_t flash_bank = 0;
13676 	uint32_t act_offset = 0;
13677 	uint32_t bank_offset = 0;
13678 	uint16_t word = 0;
13679 	uint16_t i = 0;
13680 
13681 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13682 		device_xname(sc->sc_dev), __func__));
13683 
13684 	if (sc->nvm.acquire(sc) != 0)
13685 		return -1;
13686 
13687 	/*
13688 	 * We need to know which is the valid flash bank.  In the event
13689 	 * that we didn't allocate eeprom_shadow_ram, we may not be
13690 	 * managing flash_bank. So it cannot be trusted and needs
13691 	 * to be updated with each read.
13692 	 */
13693 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
13694 	if (rv) {
13695 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
13696 			device_xname(sc->sc_dev)));
13697 		flash_bank = 0;
13698 	}
13699 
13700 	/*
13701 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
13702 	 * size
13703 	 */
13704 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
13705 
13706 	for (i = 0; i < words; i++) {
13707 		/* The NVM part needs a byte offset, hence * 2 */
13708 		act_offset = bank_offset + ((offset + i) * 2);
13709 		rv = wm_read_ich8_word(sc, act_offset, &word);
13710 		if (rv) {
13711 			aprint_error_dev(sc->sc_dev,
13712 			    "%s: failed to read NVM\n", __func__);
13713 			break;
13714 		}
13715 		data[i] = word;
13716 	}
13717 
13718 	sc->nvm.release(sc);
13719 	return rv;
13720 }
13721 
13722 /******************************************************************************
13723  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
13724  * register.
13725  *
13726  * sc - Struct containing variables accessed by shared code
13727  * offset - offset of word in the EEPROM to read
13728  * data - word read from the EEPROM
13729  * words - number of words to read
13730  *****************************************************************************/
13731 static int
13732 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
13733 {
13734 	int32_t	 rv = 0;
13735 	uint32_t flash_bank = 0;
13736 	uint32_t act_offset = 0;
13737 	uint32_t bank_offset = 0;
13738 	uint32_t dword = 0;
13739 	uint16_t i = 0;
13740 
13741 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13742 		device_xname(sc->sc_dev), __func__));
13743 
13744 	if (sc->nvm.acquire(sc) != 0)
13745 		return -1;
13746 
13747 	/*
13748 	 * We need to know which is the valid flash bank.  In the event
13749 	 * that we didn't allocate eeprom_shadow_ram, we may not be
13750 	 * managing flash_bank. So it cannot be trusted and needs
13751 	 * to be updated with each read.
13752 	 */
13753 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
13754 	if (rv) {
13755 		DPRINTF(sc, WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
13756 			device_xname(sc->sc_dev)));
13757 		flash_bank = 0;
13758 	}
13759 
13760 	/*
13761 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
13762 	 * size
13763 	 */
13764 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
13765 
13766 	for (i = 0; i < words; i++) {
13767 		/* The NVM part needs a byte offset, hence * 2 */
13768 		act_offset = bank_offset + ((offset + i) * 2);
13769 		/* but we must read dword aligned, so mask ... */
13770 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
13771 		if (rv) {
13772 			aprint_error_dev(sc->sc_dev,
13773 			    "%s: failed to read NVM\n", __func__);
13774 			break;
13775 		}
13776 		/* ... and pick out low or high word */
13777 		if ((act_offset & 0x2) == 0)
13778 			data[i] = (uint16_t)(dword & 0xFFFF);
13779 		else
13780 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
13781 	}
13782 
13783 	sc->nvm.release(sc);
13784 	return rv;
13785 }
13786 
13787 /* iNVM */
13788 
13789 static int
13790 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
13791 {
13792 	int32_t	 rv = 0;
13793 	uint32_t invm_dword;
13794 	uint16_t i;
13795 	uint8_t record_type, word_address;
13796 
13797 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13798 		device_xname(sc->sc_dev), __func__));
13799 
13800 	for (i = 0; i < INVM_SIZE; i++) {
13801 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
13802 		/* Get record type */
13803 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
13804 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
13805 			break;
13806 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
13807 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
13808 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
13809 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
13810 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
13811 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
13812 			if (word_address == address) {
13813 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
13814 				rv = 0;
13815 				break;
13816 			}
13817 		}
13818 	}
13819 
13820 	return rv;
13821 }
13822 
13823 static int
13824 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
13825 {
13826 	int rv = 0;
13827 	int i;
13828 
13829 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
13830 		device_xname(sc->sc_dev), __func__));
13831 
13832 	if (sc->nvm.acquire(sc) != 0)
13833 		return -1;
13834 
13835 	for (i = 0; i < words; i++) {
13836 		switch (offset + i) {
13837 		case NVM_OFF_MACADDR:
13838 		case NVM_OFF_MACADDR1:
13839 		case NVM_OFF_MACADDR2:
13840 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
13841 			if (rv != 0) {
13842 				data[i] = 0xffff;
13843 				rv = -1;
13844 			}
13845 			break;
13846 		case NVM_OFF_CFG1: /* == INVM_AUTOLOAD */
13847 			rv = wm_nvm_read_word_invm(sc, offset, data);
13848 			if (rv != 0) {
13849 				*data = INVM_DEFAULT_AL;
13850 				rv = 0;
13851 			}
13852 			break;
13853 		case NVM_OFF_CFG2:
13854 			rv = wm_nvm_read_word_invm(sc, offset, data);
13855 			if (rv != 0) {
13856 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
13857 				rv = 0;
13858 			}
13859 			break;
13860 		case NVM_OFF_CFG4:
13861 			rv = wm_nvm_read_word_invm(sc, offset, data);
13862 			if (rv != 0) {
13863 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
13864 				rv = 0;
13865 			}
13866 			break;
13867 		case NVM_OFF_LED_1_CFG:
13868 			rv = wm_nvm_read_word_invm(sc, offset, data);
13869 			if (rv != 0) {
13870 				*data = NVM_LED_1_CFG_DEFAULT_I211;
13871 				rv = 0;
13872 			}
13873 			break;
13874 		case NVM_OFF_LED_0_2_CFG:
13875 			rv = wm_nvm_read_word_invm(sc, offset, data);
13876 			if (rv != 0) {
13877 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
13878 				rv = 0;
13879 			}
13880 			break;
13881 		case NVM_OFF_ID_LED_SETTINGS:
13882 			rv = wm_nvm_read_word_invm(sc, offset, data);
13883 			if (rv != 0) {
13884 				*data = ID_LED_RESERVED_FFFF;
13885 				rv = 0;
13886 			}
13887 			break;
13888 		default:
13889 			DPRINTF(sc, WM_DEBUG_NVM,
13890 			    ("NVM word 0x%02x is not mapped.\n", offset));
13891 			*data = NVM_RESERVED_WORD;
13892 			break;
13893 		}
13894 	}
13895 
13896 	sc->nvm.release(sc);
13897 	return rv;
13898 }
13899 
13900 /* Lock, detecting NVM type, validate checksum, version and read */
13901 
13902 static int
13903 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
13904 {
13905 	uint32_t eecd = 0;
13906 
13907 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
13908 	    || sc->sc_type == WM_T_82583) {
13909 		eecd = CSR_READ(sc, WMREG_EECD);
13910 
13911 		/* Isolate bits 15 & 16 */
13912 		eecd = ((eecd >> 15) & 0x03);
13913 
13914 		/* If both bits are set, device is Flash type */
13915 		if (eecd == 0x03)
13916 			return 0;
13917 	}
13918 	return 1;
13919 }
13920 
13921 static int
13922 wm_nvm_flash_presence_i210(struct wm_softc *sc)
13923 {
13924 	uint32_t eec;
13925 
13926 	eec = CSR_READ(sc, WMREG_EEC);
13927 	if ((eec & EEC_FLASH_DETECTED) != 0)
13928 		return 1;
13929 
13930 	return 0;
13931 }
13932 
13933 /*
13934  * wm_nvm_validate_checksum
13935  *
13936  * The checksum is defined as the sum of the first 64 (16 bit) words.
13937  */
13938 static int
13939 wm_nvm_validate_checksum(struct wm_softc *sc)
13940 {
13941 	uint16_t checksum;
13942 	uint16_t eeprom_data;
13943 #ifdef WM_DEBUG
13944 	uint16_t csum_wordaddr, valid_checksum;
13945 #endif
13946 	int i;
13947 
13948 	checksum = 0;
13949 
13950 	/* Don't check for I211 */
13951 	if (sc->sc_type == WM_T_I211)
13952 		return 0;
13953 
13954 #ifdef WM_DEBUG
13955 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
13956 	    || (sc->sc_type == WM_T_PCH_CNP)) {
13957 		csum_wordaddr = NVM_OFF_COMPAT;
13958 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
13959 	} else {
13960 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
13961 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
13962 	}
13963 
13964 	/* Dump EEPROM image for debug */
13965 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
13966 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
13967 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
13968 		/* XXX PCH_SPT? */
13969 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
13970 		if ((eeprom_data & valid_checksum) == 0)
13971 			DPRINTF(sc, WM_DEBUG_NVM,
13972 			    ("%s: NVM need to be updated (%04x != %04x)\n",
13973 				device_xname(sc->sc_dev), eeprom_data,
13974 				    valid_checksum));
13975 	}
13976 
13977 	if ((sc->sc_debug & WM_DEBUG_NVM) != 0) {
13978 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
13979 		for (i = 0; i < NVM_SIZE; i++) {
13980 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
13981 				printf("XXXX ");
13982 			else
13983 				printf("%04hx ", eeprom_data);
13984 			if (i % 8 == 7)
13985 				printf("\n");
13986 		}
13987 	}
13988 
13989 #endif /* WM_DEBUG */
13990 
13991 	for (i = 0; i < NVM_SIZE; i++) {
13992 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
13993 			return 1;
13994 		checksum += eeprom_data;
13995 	}
13996 
13997 	if (checksum != (uint16_t) NVM_CHECKSUM) {
13998 #ifdef WM_DEBUG
13999 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
14000 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
14001 #endif
14002 	}
14003 
14004 	return 0;
14005 }
14006 
14007 static void
14008 wm_nvm_version_invm(struct wm_softc *sc)
14009 {
14010 	uint32_t dword;
14011 
14012 	/*
14013 	 * Linux's code to decode version is very strange, so we don't
14014 	 * obey that algorithm and just use word 61 as the document.
14015 	 * Perhaps it's not perfect though...
14016 	 *
14017 	 * Example:
14018 	 *
14019 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
14020 	 */
14021 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
14022 	dword = __SHIFTOUT(dword, INVM_VER_1);
14023 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
14024 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
14025 }
14026 
14027 static void
14028 wm_nvm_version(struct wm_softc *sc)
14029 {
14030 	uint16_t major, minor, build, patch;
14031 	uint16_t uid0, uid1;
14032 	uint16_t nvm_data;
14033 	uint16_t off;
14034 	bool check_version = false;
14035 	bool check_optionrom = false;
14036 	bool have_build = false;
14037 	bool have_uid = true;
14038 
14039 	/*
14040 	 * Version format:
14041 	 *
14042 	 * XYYZ
14043 	 * X0YZ
14044 	 * X0YY
14045 	 *
14046 	 * Example:
14047 	 *
14048 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
14049 	 *	82571	0x50a6	5.10.6?
14050 	 *	82572	0x506a	5.6.10?
14051 	 *	82572EI	0x5069	5.6.9?
14052 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
14053 	 *		0x2013	2.1.3?
14054 	 *	82583	0x10a0	1.10.0? (document says it's default value)
14055 	 * ICH8+82567	0x0040	0.4.0?
14056 	 * ICH9+82566	0x1040	1.4.0?
14057 	 *ICH10+82567	0x0043	0.4.3?
14058 	 *  PCH+82577	0x00c1	0.12.1?
14059 	 * PCH2+82579	0x00d3	0.13.3?
14060 	 *		0x00d4	0.13.4?
14061 	 *  LPT+I218	0x0023	0.2.3?
14062 	 *  SPT+I219	0x0084	0.8.4?
14063 	 *  CNP+I219	0x0054	0.5.4?
14064 	 */
14065 
14066 	/*
14067 	 * XXX
14068 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
14069 	 * I've never seen on real 82574 hardware with such small SPI ROM.
14070 	 */
14071 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
14072 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
14073 		have_uid = false;
14074 
14075 	switch (sc->sc_type) {
14076 	case WM_T_82571:
14077 	case WM_T_82572:
14078 	case WM_T_82574:
14079 	case WM_T_82583:
14080 		check_version = true;
14081 		check_optionrom = true;
14082 		have_build = true;
14083 		break;
14084 	case WM_T_ICH8:
14085 	case WM_T_ICH9:
14086 	case WM_T_ICH10:
14087 	case WM_T_PCH:
14088 	case WM_T_PCH2:
14089 	case WM_T_PCH_LPT:
14090 	case WM_T_PCH_SPT:
14091 	case WM_T_PCH_CNP:
14092 		check_version = true;
14093 		have_build = true;
14094 		have_uid = false;
14095 		break;
14096 	case WM_T_82575:
14097 	case WM_T_82576:
14098 	case WM_T_82580:
14099 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
14100 			check_version = true;
14101 		break;
14102 	case WM_T_I211:
14103 		wm_nvm_version_invm(sc);
14104 		have_uid = false;
14105 		goto printver;
14106 	case WM_T_I210:
14107 		if (!wm_nvm_flash_presence_i210(sc)) {
14108 			wm_nvm_version_invm(sc);
14109 			have_uid = false;
14110 			goto printver;
14111 		}
14112 		/* FALLTHROUGH */
14113 	case WM_T_I350:
14114 	case WM_T_I354:
14115 		check_version = true;
14116 		check_optionrom = true;
14117 		break;
14118 	default:
14119 		return;
14120 	}
14121 	if (check_version
14122 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
14123 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
14124 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
14125 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
14126 			build = nvm_data & NVM_BUILD_MASK;
14127 			have_build = true;
14128 		} else
14129 			minor = nvm_data & 0x00ff;
14130 
14131 		/* Decimal */
14132 		minor = (minor / 16) * 10 + (minor % 16);
14133 		sc->sc_nvm_ver_major = major;
14134 		sc->sc_nvm_ver_minor = minor;
14135 
14136 printver:
14137 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
14138 		    sc->sc_nvm_ver_minor);
14139 		if (have_build) {
14140 			sc->sc_nvm_ver_build = build;
14141 			aprint_verbose(".%d", build);
14142 		}
14143 	}
14144 
14145 	/* Assume the Option ROM area is at avove NVM_SIZE */
14146 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
14147 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
14148 		/* Option ROM Version */
14149 		if ((off != 0x0000) && (off != 0xffff)) {
14150 			int rv;
14151 
14152 			off += NVM_COMBO_VER_OFF;
14153 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
14154 			rv |= wm_nvm_read(sc, off, 1, &uid0);
14155 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
14156 			    && (uid1 != 0) && (uid1 != 0xffff)) {
14157 				/* 16bits */
14158 				major = uid0 >> 8;
14159 				build = (uid0 << 8) | (uid1 >> 8);
14160 				patch = uid1 & 0x00ff;
14161 				aprint_verbose(", option ROM Version %d.%d.%d",
14162 				    major, build, patch);
14163 			}
14164 		}
14165 	}
14166 
14167 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
14168 		aprint_verbose(", Image Unique ID %08x",
14169 		    ((uint32_t)uid1 << 16) | uid0);
14170 }
14171 
14172 /*
14173  * wm_nvm_read:
14174  *
14175  *	Read data from the serial EEPROM.
14176  */
14177 static int
14178 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
14179 {
14180 	int rv;
14181 
14182 	DPRINTF(sc, WM_DEBUG_NVM, ("%s: %s called\n",
14183 		device_xname(sc->sc_dev), __func__));
14184 
14185 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
14186 		return -1;
14187 
14188 	rv = sc->nvm.read(sc, word, wordcnt, data);
14189 
14190 	return rv;
14191 }
14192 
14193 /*
14194  * Hardware semaphores.
14195  * Very complexed...
14196  */
14197 
14198 static int
14199 wm_get_null(struct wm_softc *sc)
14200 {
14201 
14202 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14203 		device_xname(sc->sc_dev), __func__));
14204 	return 0;
14205 }
14206 
14207 static void
14208 wm_put_null(struct wm_softc *sc)
14209 {
14210 
14211 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14212 		device_xname(sc->sc_dev), __func__));
14213 	return;
14214 }
14215 
14216 static int
14217 wm_get_eecd(struct wm_softc *sc)
14218 {
14219 	uint32_t reg;
14220 	int x;
14221 
14222 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
14223 		device_xname(sc->sc_dev), __func__));
14224 
14225 	reg = CSR_READ(sc, WMREG_EECD);
14226 
14227 	/* Request EEPROM access. */
14228 	reg |= EECD_EE_REQ;
14229 	CSR_WRITE(sc, WMREG_EECD, reg);
14230 
14231 	/* ..and wait for it to be granted. */
14232 	for (x = 0; x < 1000; x++) {
14233 		reg = CSR_READ(sc, WMREG_EECD);
14234 		if (reg & EECD_EE_GNT)
14235 			break;
14236 		delay(5);
14237 	}
14238 	if ((reg & EECD_EE_GNT) == 0) {
14239 		aprint_error_dev(sc->sc_dev,
14240 		    "could not acquire EEPROM GNT\n");
14241 		reg &= ~EECD_EE_REQ;
14242 		CSR_WRITE(sc, WMREG_EECD, reg);
14243 		return -1;
14244 	}
14245 
14246 	return 0;
14247 }
14248 
14249 static void
14250 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
14251 {
14252 
14253 	*eecd |= EECD_SK;
14254 	CSR_WRITE(sc, WMREG_EECD, *eecd);
14255 	CSR_WRITE_FLUSH(sc);
14256 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
14257 		delay(1);
14258 	else
14259 		delay(50);
14260 }
14261 
14262 static void
14263 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
14264 {
14265 
14266 	*eecd &= ~EECD_SK;
14267 	CSR_WRITE(sc, WMREG_EECD, *eecd);
14268 	CSR_WRITE_FLUSH(sc);
14269 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
14270 		delay(1);
14271 	else
14272 		delay(50);
14273 }
14274 
14275 static void
14276 wm_put_eecd(struct wm_softc *sc)
14277 {
14278 	uint32_t reg;
14279 
14280 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14281 		device_xname(sc->sc_dev), __func__));
14282 
14283 	/* Stop nvm */
14284 	reg = CSR_READ(sc, WMREG_EECD);
14285 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
14286 		/* Pull CS high */
14287 		reg |= EECD_CS;
14288 		wm_nvm_eec_clock_lower(sc, &reg);
14289 	} else {
14290 		/* CS on Microwire is active-high */
14291 		reg &= ~(EECD_CS | EECD_DI);
14292 		CSR_WRITE(sc, WMREG_EECD, reg);
14293 		wm_nvm_eec_clock_raise(sc, &reg);
14294 		wm_nvm_eec_clock_lower(sc, &reg);
14295 	}
14296 
14297 	reg = CSR_READ(sc, WMREG_EECD);
14298 	reg &= ~EECD_EE_REQ;
14299 	CSR_WRITE(sc, WMREG_EECD, reg);
14300 
14301 	return;
14302 }
14303 
14304 /*
14305  * Get hardware semaphore.
14306  * Same as e1000_get_hw_semaphore_generic()
14307  */
14308 static int
14309 wm_get_swsm_semaphore(struct wm_softc *sc)
14310 {
14311 	int32_t timeout;
14312 	uint32_t swsm;
14313 
14314 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14315 		device_xname(sc->sc_dev), __func__));
14316 	KASSERT(sc->sc_nvm_wordsize > 0);
14317 
14318 retry:
14319 	/* Get the SW semaphore. */
14320 	timeout = sc->sc_nvm_wordsize + 1;
14321 	while (timeout) {
14322 		swsm = CSR_READ(sc, WMREG_SWSM);
14323 
14324 		if ((swsm & SWSM_SMBI) == 0)
14325 			break;
14326 
14327 		delay(50);
14328 		timeout--;
14329 	}
14330 
14331 	if (timeout == 0) {
14332 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
14333 			/*
14334 			 * In rare circumstances, the SW semaphore may already
14335 			 * be held unintentionally. Clear the semaphore once
14336 			 * before giving up.
14337 			 */
14338 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
14339 			wm_put_swsm_semaphore(sc);
14340 			goto retry;
14341 		}
14342 		aprint_error_dev(sc->sc_dev,
14343 		    "could not acquire SWSM SMBI\n");
14344 		return 1;
14345 	}
14346 
14347 	/* Get the FW semaphore. */
14348 	timeout = sc->sc_nvm_wordsize + 1;
14349 	while (timeout) {
14350 		swsm = CSR_READ(sc, WMREG_SWSM);
14351 		swsm |= SWSM_SWESMBI;
14352 		CSR_WRITE(sc, WMREG_SWSM, swsm);
14353 		/* If we managed to set the bit we got the semaphore. */
14354 		swsm = CSR_READ(sc, WMREG_SWSM);
14355 		if (swsm & SWSM_SWESMBI)
14356 			break;
14357 
14358 		delay(50);
14359 		timeout--;
14360 	}
14361 
14362 	if (timeout == 0) {
14363 		aprint_error_dev(sc->sc_dev,
14364 		    "could not acquire SWSM SWESMBI\n");
14365 		/* Release semaphores */
14366 		wm_put_swsm_semaphore(sc);
14367 		return 1;
14368 	}
14369 	return 0;
14370 }
14371 
14372 /*
14373  * Put hardware semaphore.
14374  * Same as e1000_put_hw_semaphore_generic()
14375  */
14376 static void
14377 wm_put_swsm_semaphore(struct wm_softc *sc)
14378 {
14379 	uint32_t swsm;
14380 
14381 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14382 		device_xname(sc->sc_dev), __func__));
14383 
14384 	swsm = CSR_READ(sc, WMREG_SWSM);
14385 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
14386 	CSR_WRITE(sc, WMREG_SWSM, swsm);
14387 }
14388 
14389 /*
14390  * Get SW/FW semaphore.
14391  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
14392  */
14393 static int
14394 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
14395 {
14396 	uint32_t swfw_sync;
14397 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
14398 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
14399 	int timeout;
14400 
14401 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14402 		device_xname(sc->sc_dev), __func__));
14403 
14404 	if (sc->sc_type == WM_T_80003)
14405 		timeout = 50;
14406 	else
14407 		timeout = 200;
14408 
14409 	while (timeout) {
14410 		if (wm_get_swsm_semaphore(sc)) {
14411 			aprint_error_dev(sc->sc_dev,
14412 			    "%s: failed to get semaphore\n",
14413 			    __func__);
14414 			return 1;
14415 		}
14416 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
14417 		if ((swfw_sync & (swmask | fwmask)) == 0) {
14418 			swfw_sync |= swmask;
14419 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
14420 			wm_put_swsm_semaphore(sc);
14421 			return 0;
14422 		}
14423 		wm_put_swsm_semaphore(sc);
14424 		delay(5000);
14425 		timeout--;
14426 	}
14427 	device_printf(sc->sc_dev,
14428 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
14429 	    mask, swfw_sync);
14430 	return 1;
14431 }
14432 
14433 static void
14434 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
14435 {
14436 	uint32_t swfw_sync;
14437 
14438 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14439 		device_xname(sc->sc_dev), __func__));
14440 
14441 	while (wm_get_swsm_semaphore(sc) != 0)
14442 		continue;
14443 
14444 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
14445 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
14446 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
14447 
14448 	wm_put_swsm_semaphore(sc);
14449 }
14450 
14451 static int
14452 wm_get_nvm_80003(struct wm_softc *sc)
14453 {
14454 	int rv;
14455 
14456 	DPRINTF(sc, WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
14457 		device_xname(sc->sc_dev), __func__));
14458 
14459 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
14460 		aprint_error_dev(sc->sc_dev,
14461 		    "%s: failed to get semaphore(SWFW)\n", __func__);
14462 		return rv;
14463 	}
14464 
14465 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14466 	    && (rv = wm_get_eecd(sc)) != 0) {
14467 		aprint_error_dev(sc->sc_dev,
14468 		    "%s: failed to get semaphore(EECD)\n", __func__);
14469 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
14470 		return rv;
14471 	}
14472 
14473 	return 0;
14474 }
14475 
14476 static void
14477 wm_put_nvm_80003(struct wm_softc *sc)
14478 {
14479 
14480 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14481 		device_xname(sc->sc_dev), __func__));
14482 
14483 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14484 		wm_put_eecd(sc);
14485 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
14486 }
14487 
14488 static int
14489 wm_get_nvm_82571(struct wm_softc *sc)
14490 {
14491 	int rv;
14492 
14493 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14494 		device_xname(sc->sc_dev), __func__));
14495 
14496 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
14497 		return rv;
14498 
14499 	switch (sc->sc_type) {
14500 	case WM_T_82573:
14501 		break;
14502 	default:
14503 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14504 			rv = wm_get_eecd(sc);
14505 		break;
14506 	}
14507 
14508 	if (rv != 0) {
14509 		aprint_error_dev(sc->sc_dev,
14510 		    "%s: failed to get semaphore\n",
14511 		    __func__);
14512 		wm_put_swsm_semaphore(sc);
14513 	}
14514 
14515 	return rv;
14516 }
14517 
14518 static void
14519 wm_put_nvm_82571(struct wm_softc *sc)
14520 {
14521 
14522 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14523 		device_xname(sc->sc_dev), __func__));
14524 
14525 	switch (sc->sc_type) {
14526 	case WM_T_82573:
14527 		break;
14528 	default:
14529 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
14530 			wm_put_eecd(sc);
14531 		break;
14532 	}
14533 
14534 	wm_put_swsm_semaphore(sc);
14535 }
14536 
14537 static int
14538 wm_get_phy_82575(struct wm_softc *sc)
14539 {
14540 
14541 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14542 		device_xname(sc->sc_dev), __func__));
14543 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
14544 }
14545 
14546 static void
14547 wm_put_phy_82575(struct wm_softc *sc)
14548 {
14549 
14550 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14551 		device_xname(sc->sc_dev), __func__));
14552 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
14553 }
14554 
14555 static int
14556 wm_get_swfwhw_semaphore(struct wm_softc *sc)
14557 {
14558 	uint32_t ext_ctrl;
14559 	int timeout = 200;
14560 
14561 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14562 		device_xname(sc->sc_dev), __func__));
14563 
14564 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
14565 	for (timeout = 0; timeout < 200; timeout++) {
14566 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14567 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
14568 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14569 
14570 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14571 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
14572 			return 0;
14573 		delay(5000);
14574 	}
14575 	device_printf(sc->sc_dev,
14576 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
14577 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
14578 	return 1;
14579 }
14580 
14581 static void
14582 wm_put_swfwhw_semaphore(struct wm_softc *sc)
14583 {
14584 	uint32_t ext_ctrl;
14585 
14586 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14587 		device_xname(sc->sc_dev), __func__));
14588 
14589 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14590 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14591 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14592 
14593 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
14594 }
14595 
14596 static int
14597 wm_get_swflag_ich8lan(struct wm_softc *sc)
14598 {
14599 	uint32_t ext_ctrl;
14600 	int timeout;
14601 
14602 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14603 		device_xname(sc->sc_dev), __func__));
14604 	mutex_enter(sc->sc_ich_phymtx);
14605 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
14606 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14607 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
14608 			break;
14609 		delay(1000);
14610 	}
14611 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
14612 		device_printf(sc->sc_dev,
14613 		    "SW has already locked the resource\n");
14614 		goto out;
14615 	}
14616 
14617 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
14618 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14619 	for (timeout = 0; timeout < 1000; timeout++) {
14620 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14621 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
14622 			break;
14623 		delay(1000);
14624 	}
14625 	if (timeout >= 1000) {
14626 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
14627 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14628 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14629 		goto out;
14630 	}
14631 	return 0;
14632 
14633 out:
14634 	mutex_exit(sc->sc_ich_phymtx);
14635 	return 1;
14636 }
14637 
14638 static void
14639 wm_put_swflag_ich8lan(struct wm_softc *sc)
14640 {
14641 	uint32_t ext_ctrl;
14642 
14643 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14644 		device_xname(sc->sc_dev), __func__));
14645 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
14646 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
14647 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14648 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
14649 	} else {
14650 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
14651 	}
14652 
14653 	mutex_exit(sc->sc_ich_phymtx);
14654 }
14655 
14656 static int
14657 wm_get_nvm_ich8lan(struct wm_softc *sc)
14658 {
14659 
14660 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14661 		device_xname(sc->sc_dev), __func__));
14662 	mutex_enter(sc->sc_ich_nvmmtx);
14663 
14664 	return 0;
14665 }
14666 
14667 static void
14668 wm_put_nvm_ich8lan(struct wm_softc *sc)
14669 {
14670 
14671 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14672 		device_xname(sc->sc_dev), __func__));
14673 	mutex_exit(sc->sc_ich_nvmmtx);
14674 }
14675 
14676 static int
14677 wm_get_hw_semaphore_82573(struct wm_softc *sc)
14678 {
14679 	int i = 0;
14680 	uint32_t reg;
14681 
14682 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14683 		device_xname(sc->sc_dev), __func__));
14684 
14685 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14686 	do {
14687 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
14688 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
14689 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14690 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
14691 			break;
14692 		delay(2*1000);
14693 		i++;
14694 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
14695 
14696 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
14697 		wm_put_hw_semaphore_82573(sc);
14698 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
14699 		    device_xname(sc->sc_dev));
14700 		return -1;
14701 	}
14702 
14703 	return 0;
14704 }
14705 
14706 static void
14707 wm_put_hw_semaphore_82573(struct wm_softc *sc)
14708 {
14709 	uint32_t reg;
14710 
14711 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14712 		device_xname(sc->sc_dev), __func__));
14713 
14714 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14715 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
14716 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
14717 }
14718 
14719 /*
14720  * Management mode and power management related subroutines.
14721  * BMC, AMT, suspend/resume and EEE.
14722  */
14723 
14724 #ifdef WM_WOL
14725 static int
14726 wm_check_mng_mode(struct wm_softc *sc)
14727 {
14728 	int rv;
14729 
14730 	switch (sc->sc_type) {
14731 	case WM_T_ICH8:
14732 	case WM_T_ICH9:
14733 	case WM_T_ICH10:
14734 	case WM_T_PCH:
14735 	case WM_T_PCH2:
14736 	case WM_T_PCH_LPT:
14737 	case WM_T_PCH_SPT:
14738 	case WM_T_PCH_CNP:
14739 		rv = wm_check_mng_mode_ich8lan(sc);
14740 		break;
14741 	case WM_T_82574:
14742 	case WM_T_82583:
14743 		rv = wm_check_mng_mode_82574(sc);
14744 		break;
14745 	case WM_T_82571:
14746 	case WM_T_82572:
14747 	case WM_T_82573:
14748 	case WM_T_80003:
14749 		rv = wm_check_mng_mode_generic(sc);
14750 		break;
14751 	default:
14752 		/* Noting to do */
14753 		rv = 0;
14754 		break;
14755 	}
14756 
14757 	return rv;
14758 }
14759 
14760 static int
14761 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
14762 {
14763 	uint32_t fwsm;
14764 
14765 	fwsm = CSR_READ(sc, WMREG_FWSM);
14766 
14767 	if (((fwsm & FWSM_FW_VALID) != 0)
14768 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
14769 		return 1;
14770 
14771 	return 0;
14772 }
14773 
14774 static int
14775 wm_check_mng_mode_82574(struct wm_softc *sc)
14776 {
14777 	uint16_t data;
14778 
14779 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
14780 
14781 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
14782 		return 1;
14783 
14784 	return 0;
14785 }
14786 
14787 static int
14788 wm_check_mng_mode_generic(struct wm_softc *sc)
14789 {
14790 	uint32_t fwsm;
14791 
14792 	fwsm = CSR_READ(sc, WMREG_FWSM);
14793 
14794 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
14795 		return 1;
14796 
14797 	return 0;
14798 }
14799 #endif /* WM_WOL */
14800 
14801 static int
14802 wm_enable_mng_pass_thru(struct wm_softc *sc)
14803 {
14804 	uint32_t manc, fwsm, factps;
14805 
14806 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
14807 		return 0;
14808 
14809 	manc = CSR_READ(sc, WMREG_MANC);
14810 
14811 	DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
14812 		device_xname(sc->sc_dev), manc));
14813 	if ((manc & MANC_RECV_TCO_EN) == 0)
14814 		return 0;
14815 
14816 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
14817 		fwsm = CSR_READ(sc, WMREG_FWSM);
14818 		factps = CSR_READ(sc, WMREG_FACTPS);
14819 		if (((factps & FACTPS_MNGCG) == 0)
14820 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
14821 			return 1;
14822 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
14823 		uint16_t data;
14824 
14825 		factps = CSR_READ(sc, WMREG_FACTPS);
14826 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
14827 		DPRINTF(sc, WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
14828 			device_xname(sc->sc_dev), factps, data));
14829 		if (((factps & FACTPS_MNGCG) == 0)
14830 		    && ((data & NVM_CFG2_MNGM_MASK)
14831 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
14832 			return 1;
14833 	} else if (((manc & MANC_SMBUS_EN) != 0)
14834 	    && ((manc & MANC_ASF_EN) == 0))
14835 		return 1;
14836 
14837 	return 0;
14838 }
14839 
14840 static bool
14841 wm_phy_resetisblocked(struct wm_softc *sc)
14842 {
14843 	bool blocked = false;
14844 	uint32_t reg;
14845 	int i = 0;
14846 
14847 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
14848 		device_xname(sc->sc_dev), __func__));
14849 
14850 	switch (sc->sc_type) {
14851 	case WM_T_ICH8:
14852 	case WM_T_ICH9:
14853 	case WM_T_ICH10:
14854 	case WM_T_PCH:
14855 	case WM_T_PCH2:
14856 	case WM_T_PCH_LPT:
14857 	case WM_T_PCH_SPT:
14858 	case WM_T_PCH_CNP:
14859 		do {
14860 			reg = CSR_READ(sc, WMREG_FWSM);
14861 			if ((reg & FWSM_RSPCIPHY) == 0) {
14862 				blocked = true;
14863 				delay(10*1000);
14864 				continue;
14865 			}
14866 			blocked = false;
14867 		} while (blocked && (i++ < 30));
14868 		return blocked;
14869 		break;
14870 	case WM_T_82571:
14871 	case WM_T_82572:
14872 	case WM_T_82573:
14873 	case WM_T_82574:
14874 	case WM_T_82583:
14875 	case WM_T_80003:
14876 		reg = CSR_READ(sc, WMREG_MANC);
14877 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
14878 			return true;
14879 		else
14880 			return false;
14881 		break;
14882 	default:
14883 		/* No problem */
14884 		break;
14885 	}
14886 
14887 	return false;
14888 }
14889 
14890 static void
14891 wm_get_hw_control(struct wm_softc *sc)
14892 {
14893 	uint32_t reg;
14894 
14895 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14896 		device_xname(sc->sc_dev), __func__));
14897 
14898 	if (sc->sc_type == WM_T_82573) {
14899 		reg = CSR_READ(sc, WMREG_SWSM);
14900 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
14901 	} else if (sc->sc_type >= WM_T_82571) {
14902 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
14903 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
14904 	}
14905 }
14906 
14907 static void
14908 wm_release_hw_control(struct wm_softc *sc)
14909 {
14910 	uint32_t reg;
14911 
14912 	DPRINTF(sc, WM_DEBUG_LOCK, ("%s: %s called\n",
14913 		device_xname(sc->sc_dev), __func__));
14914 
14915 	if (sc->sc_type == WM_T_82573) {
14916 		reg = CSR_READ(sc, WMREG_SWSM);
14917 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
14918 	} else if (sc->sc_type >= WM_T_82571) {
14919 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
14920 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
14921 	}
14922 }
14923 
14924 static void
14925 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
14926 {
14927 	uint32_t reg;
14928 
14929 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
14930 		device_xname(sc->sc_dev), __func__));
14931 
14932 	if (sc->sc_type < WM_T_PCH2)
14933 		return;
14934 
14935 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
14936 
14937 	if (gate)
14938 		reg |= EXTCNFCTR_GATE_PHY_CFG;
14939 	else
14940 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
14941 
14942 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
14943 }
14944 
14945 static int
14946 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
14947 {
14948 	uint32_t fwsm, reg;
14949 	int rv = 0;
14950 
14951 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
14952 		device_xname(sc->sc_dev), __func__));
14953 
14954 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
14955 	wm_gate_hw_phy_config_ich8lan(sc, true);
14956 
14957 	/* Disable ULP */
14958 	wm_ulp_disable(sc);
14959 
14960 	/* Acquire PHY semaphore */
14961 	rv = sc->phy.acquire(sc);
14962 	if (rv != 0) {
14963 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
14964 		device_xname(sc->sc_dev), __func__));
14965 		return -1;
14966 	}
14967 
14968 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
14969 	 * inaccessible and resetting the PHY is not blocked, toggle the
14970 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
14971 	 */
14972 	fwsm = CSR_READ(sc, WMREG_FWSM);
14973 	switch (sc->sc_type) {
14974 	case WM_T_PCH_LPT:
14975 	case WM_T_PCH_SPT:
14976 	case WM_T_PCH_CNP:
14977 		if (wm_phy_is_accessible_pchlan(sc))
14978 			break;
14979 
14980 		/* Before toggling LANPHYPC, see if PHY is accessible by
14981 		 * forcing MAC to SMBus mode first.
14982 		 */
14983 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
14984 		reg |= CTRL_EXT_FORCE_SMBUS;
14985 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
14986 #if 0
14987 		/* XXX Isn't this required??? */
14988 		CSR_WRITE_FLUSH(sc);
14989 #endif
14990 		/* Wait 50 milliseconds for MAC to finish any retries
14991 		 * that it might be trying to perform from previous
14992 		 * attempts to acknowledge any phy read requests.
14993 		 */
14994 		delay(50 * 1000);
14995 		/* FALLTHROUGH */
14996 	case WM_T_PCH2:
14997 		if (wm_phy_is_accessible_pchlan(sc) == true)
14998 			break;
14999 		/* FALLTHROUGH */
15000 	case WM_T_PCH:
15001 		if (sc->sc_type == WM_T_PCH)
15002 			if ((fwsm & FWSM_FW_VALID) != 0)
15003 				break;
15004 
15005 		if (wm_phy_resetisblocked(sc) == true) {
15006 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
15007 			break;
15008 		}
15009 
15010 		/* Toggle LANPHYPC Value bit */
15011 		wm_toggle_lanphypc_pch_lpt(sc);
15012 
15013 		if (sc->sc_type >= WM_T_PCH_LPT) {
15014 			if (wm_phy_is_accessible_pchlan(sc) == true)
15015 				break;
15016 
15017 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
15018 			 * so ensure that the MAC is also out of SMBus mode
15019 			 */
15020 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
15021 			reg &= ~CTRL_EXT_FORCE_SMBUS;
15022 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15023 
15024 			if (wm_phy_is_accessible_pchlan(sc) == true)
15025 				break;
15026 			rv = -1;
15027 		}
15028 		break;
15029 	default:
15030 		break;
15031 	}
15032 
15033 	/* Release semaphore */
15034 	sc->phy.release(sc);
15035 
15036 	if (rv == 0) {
15037 		/* Check to see if able to reset PHY.  Print error if not */
15038 		if (wm_phy_resetisblocked(sc)) {
15039 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
15040 			goto out;
15041 		}
15042 
15043 		/* Reset the PHY before any access to it.  Doing so, ensures
15044 		 * that the PHY is in a known good state before we read/write
15045 		 * PHY registers.  The generic reset is sufficient here,
15046 		 * because we haven't determined the PHY type yet.
15047 		 */
15048 		if (wm_reset_phy(sc) != 0)
15049 			goto out;
15050 
15051 		/* On a successful reset, possibly need to wait for the PHY
15052 		 * to quiesce to an accessible state before returning control
15053 		 * to the calling function.  If the PHY does not quiesce, then
15054 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
15055 		 *  the PHY is in.
15056 		 */
15057 		if (wm_phy_resetisblocked(sc))
15058 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
15059 	}
15060 
15061 out:
15062 	/* Ungate automatic PHY configuration on non-managed 82579 */
15063 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
15064 		delay(10*1000);
15065 		wm_gate_hw_phy_config_ich8lan(sc, false);
15066 	}
15067 
15068 	return 0;
15069 }
15070 
15071 static void
15072 wm_init_manageability(struct wm_softc *sc)
15073 {
15074 
15075 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15076 		device_xname(sc->sc_dev), __func__));
15077 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
15078 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
15079 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
15080 
15081 		/* Disable hardware interception of ARP */
15082 		manc &= ~MANC_ARP_EN;
15083 
15084 		/* Enable receiving management packets to the host */
15085 		if (sc->sc_type >= WM_T_82571) {
15086 			manc |= MANC_EN_MNG2HOST;
15087 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
15088 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
15089 		}
15090 
15091 		CSR_WRITE(sc, WMREG_MANC, manc);
15092 	}
15093 }
15094 
15095 static void
15096 wm_release_manageability(struct wm_softc *sc)
15097 {
15098 
15099 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
15100 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
15101 
15102 		manc |= MANC_ARP_EN;
15103 		if (sc->sc_type >= WM_T_82571)
15104 			manc &= ~MANC_EN_MNG2HOST;
15105 
15106 		CSR_WRITE(sc, WMREG_MANC, manc);
15107 	}
15108 }
15109 
15110 static void
15111 wm_get_wakeup(struct wm_softc *sc)
15112 {
15113 
15114 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
15115 	switch (sc->sc_type) {
15116 	case WM_T_82573:
15117 	case WM_T_82583:
15118 		sc->sc_flags |= WM_F_HAS_AMT;
15119 		/* FALLTHROUGH */
15120 	case WM_T_80003:
15121 	case WM_T_82575:
15122 	case WM_T_82576:
15123 	case WM_T_82580:
15124 	case WM_T_I350:
15125 	case WM_T_I354:
15126 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
15127 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
15128 		/* FALLTHROUGH */
15129 	case WM_T_82541:
15130 	case WM_T_82541_2:
15131 	case WM_T_82547:
15132 	case WM_T_82547_2:
15133 	case WM_T_82571:
15134 	case WM_T_82572:
15135 	case WM_T_82574:
15136 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
15137 		break;
15138 	case WM_T_ICH8:
15139 	case WM_T_ICH9:
15140 	case WM_T_ICH10:
15141 	case WM_T_PCH:
15142 	case WM_T_PCH2:
15143 	case WM_T_PCH_LPT:
15144 	case WM_T_PCH_SPT:
15145 	case WM_T_PCH_CNP:
15146 		sc->sc_flags |= WM_F_HAS_AMT;
15147 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
15148 		break;
15149 	default:
15150 		break;
15151 	}
15152 
15153 	/* 1: HAS_MANAGE */
15154 	if (wm_enable_mng_pass_thru(sc) != 0)
15155 		sc->sc_flags |= WM_F_HAS_MANAGE;
15156 
15157 	/*
15158 	 * Note that the WOL flags is set after the resetting of the eeprom
15159 	 * stuff
15160 	 */
15161 }
15162 
15163 /*
15164  * Unconfigure Ultra Low Power mode.
15165  * Only for I217 and newer (see below).
15166  */
15167 static int
15168 wm_ulp_disable(struct wm_softc *sc)
15169 {
15170 	uint32_t reg;
15171 	uint16_t phyreg;
15172 	int i = 0, rv = 0;
15173 
15174 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15175 		device_xname(sc->sc_dev), __func__));
15176 	/* Exclude old devices */
15177 	if ((sc->sc_type < WM_T_PCH_LPT)
15178 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
15179 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
15180 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
15181 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
15182 		return 0;
15183 
15184 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
15185 		/* Request ME un-configure ULP mode in the PHY */
15186 		reg = CSR_READ(sc, WMREG_H2ME);
15187 		reg &= ~H2ME_ULP;
15188 		reg |= H2ME_ENFORCE_SETTINGS;
15189 		CSR_WRITE(sc, WMREG_H2ME, reg);
15190 
15191 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
15192 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
15193 			if (i++ == 30) {
15194 				device_printf(sc->sc_dev, "%s timed out\n",
15195 				    __func__);
15196 				return -1;
15197 			}
15198 			delay(10 * 1000);
15199 		}
15200 		reg = CSR_READ(sc, WMREG_H2ME);
15201 		reg &= ~H2ME_ENFORCE_SETTINGS;
15202 		CSR_WRITE(sc, WMREG_H2ME, reg);
15203 
15204 		return 0;
15205 	}
15206 
15207 	/* Acquire semaphore */
15208 	rv = sc->phy.acquire(sc);
15209 	if (rv != 0) {
15210 		DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s: failed\n",
15211 		device_xname(sc->sc_dev), __func__));
15212 		return -1;
15213 	}
15214 
15215 	/* Toggle LANPHYPC */
15216 	wm_toggle_lanphypc_pch_lpt(sc);
15217 
15218 	/* Unforce SMBus mode in PHY */
15219 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
15220 	if (rv != 0) {
15221 		uint32_t reg2;
15222 
15223 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
15224 			__func__);
15225 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
15226 		reg2 |= CTRL_EXT_FORCE_SMBUS;
15227 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
15228 		delay(50 * 1000);
15229 
15230 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
15231 		    &phyreg);
15232 		if (rv != 0)
15233 			goto release;
15234 	}
15235 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
15236 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
15237 
15238 	/* Unforce SMBus mode in MAC */
15239 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
15240 	reg &= ~CTRL_EXT_FORCE_SMBUS;
15241 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15242 
15243 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
15244 	if (rv != 0)
15245 		goto release;
15246 	phyreg |= HV_PM_CTRL_K1_ENA;
15247 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
15248 
15249 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
15250 		&phyreg);
15251 	if (rv != 0)
15252 		goto release;
15253 	phyreg &= ~(I218_ULP_CONFIG1_IND
15254 	    | I218_ULP_CONFIG1_STICKY_ULP
15255 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
15256 	    | I218_ULP_CONFIG1_WOL_HOST
15257 	    | I218_ULP_CONFIG1_INBAND_EXIT
15258 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
15259 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
15260 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
15261 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
15262 	phyreg |= I218_ULP_CONFIG1_START;
15263 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
15264 
15265 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
15266 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
15267 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
15268 
15269 release:
15270 	/* Release semaphore */
15271 	sc->phy.release(sc);
15272 	wm_gmii_reset(sc);
15273 	delay(50 * 1000);
15274 
15275 	return rv;
15276 }
15277 
15278 /* WOL in the newer chipset interfaces (pchlan) */
15279 static int
15280 wm_enable_phy_wakeup(struct wm_softc *sc)
15281 {
15282 	device_t dev = sc->sc_dev;
15283 	uint32_t mreg, moff;
15284 	uint16_t wuce, wuc, wufc, preg;
15285 	int i, rv;
15286 
15287 	KASSERT(sc->sc_type >= WM_T_PCH);
15288 
15289 	/* Copy MAC RARs to PHY RARs */
15290 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
15291 
15292 	/* Activate PHY wakeup */
15293 	rv = sc->phy.acquire(sc);
15294 	if (rv != 0) {
15295 		device_printf(dev, "%s: failed to acquire semaphore\n",
15296 		    __func__);
15297 		return rv;
15298 	}
15299 
15300 	/*
15301 	 * Enable access to PHY wakeup registers.
15302 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
15303 	 */
15304 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
15305 	if (rv != 0) {
15306 		device_printf(dev,
15307 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
15308 		goto release;
15309 	}
15310 
15311 	/* Copy MAC MTA to PHY MTA */
15312 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
15313 		uint16_t lo, hi;
15314 
15315 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
15316 		lo = (uint16_t)(mreg & 0xffff);
15317 		hi = (uint16_t)((mreg >> 16) & 0xffff);
15318 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
15319 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
15320 	}
15321 
15322 	/* Configure PHY Rx Control register */
15323 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
15324 	mreg = CSR_READ(sc, WMREG_RCTL);
15325 	if (mreg & RCTL_UPE)
15326 		preg |= BM_RCTL_UPE;
15327 	if (mreg & RCTL_MPE)
15328 		preg |= BM_RCTL_MPE;
15329 	preg &= ~(BM_RCTL_MO_MASK);
15330 	moff = __SHIFTOUT(mreg, RCTL_MO);
15331 	if (moff != 0)
15332 		preg |= moff << BM_RCTL_MO_SHIFT;
15333 	if (mreg & RCTL_BAM)
15334 		preg |= BM_RCTL_BAM;
15335 	if (mreg & RCTL_PMCF)
15336 		preg |= BM_RCTL_PMCF;
15337 	mreg = CSR_READ(sc, WMREG_CTRL);
15338 	if (mreg & CTRL_RFCE)
15339 		preg |= BM_RCTL_RFCE;
15340 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
15341 
15342 	wuc = WUC_APME | WUC_PME_EN;
15343 	wufc = WUFC_MAG;
15344 	/* Enable PHY wakeup in MAC register */
15345 	CSR_WRITE(sc, WMREG_WUC,
15346 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
15347 	CSR_WRITE(sc, WMREG_WUFC, wufc);
15348 
15349 	/* Configure and enable PHY wakeup in PHY registers */
15350 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
15351 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
15352 
15353 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
15354 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
15355 
15356 release:
15357 	sc->phy.release(sc);
15358 
15359 	return 0;
15360 }
15361 
15362 /* Power down workaround on D3 */
15363 static void
15364 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
15365 {
15366 	uint32_t reg;
15367 	uint16_t phyreg;
15368 	int i;
15369 
15370 	for (i = 0; i < 2; i++) {
15371 		/* Disable link */
15372 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
15373 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
15374 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
15375 
15376 		/*
15377 		 * Call gig speed drop workaround on Gig disable before
15378 		 * accessing any PHY registers
15379 		 */
15380 		if (sc->sc_type == WM_T_ICH8)
15381 			wm_gig_downshift_workaround_ich8lan(sc);
15382 
15383 		/* Write VR power-down enable */
15384 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
15385 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
15386 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
15387 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
15388 
15389 		/* Read it back and test */
15390 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
15391 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
15392 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
15393 			break;
15394 
15395 		/* Issue PHY reset and repeat at most one more time */
15396 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
15397 	}
15398 }
15399 
15400 /*
15401  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
15402  *  @sc: pointer to the HW structure
15403  *
15404  *  During S0 to Sx transition, it is possible the link remains at gig
15405  *  instead of negotiating to a lower speed.  Before going to Sx, set
15406  *  'Gig Disable' to force link speed negotiation to a lower speed based on
15407  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
15408  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
15409  *  needs to be written.
15410  *  Parts that support (and are linked to a partner which support) EEE in
15411  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
15412  *  than 10Mbps w/o EEE.
15413  */
15414 static void
15415 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
15416 {
15417 	device_t dev = sc->sc_dev;
15418 	struct ethercom *ec = &sc->sc_ethercom;
15419 	uint32_t phy_ctrl;
15420 	int rv;
15421 
15422 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
15423 	phy_ctrl |= PHY_CTRL_GBE_DIS;
15424 
15425 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
15426 
15427 	if (sc->sc_phytype == WMPHY_I217) {
15428 		uint16_t devid = sc->sc_pcidevid;
15429 
15430 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
15431 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
15432 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
15433 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
15434 		    (sc->sc_type >= WM_T_PCH_SPT))
15435 			CSR_WRITE(sc, WMREG_FEXTNVM6,
15436 			    CSR_READ(sc, WMREG_FEXTNVM6)
15437 			    & ~FEXTNVM6_REQ_PLL_CLK);
15438 
15439 		if (sc->phy.acquire(sc) != 0)
15440 			goto out;
15441 
15442 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
15443 			uint16_t eee_advert;
15444 
15445 			rv = wm_read_emi_reg_locked(dev,
15446 			    I217_EEE_ADVERTISEMENT, &eee_advert);
15447 			if (rv)
15448 				goto release;
15449 
15450 			/*
15451 			 * Disable LPLU if both link partners support 100BaseT
15452 			 * EEE and 100Full is advertised on both ends of the
15453 			 * link, and enable Auto Enable LPI since there will
15454 			 * be no driver to enable LPI while in Sx.
15455 			 */
15456 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
15457 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
15458 				uint16_t anar, phy_reg;
15459 
15460 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
15461 				    &anar);
15462 				if (anar & ANAR_TX_FD) {
15463 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
15464 					    PHY_CTRL_NOND0A_LPLU);
15465 
15466 					/* Set Auto Enable LPI after link up */
15467 					sc->phy.readreg_locked(dev, 2,
15468 					    I217_LPI_GPIO_CTRL, &phy_reg);
15469 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
15470 					sc->phy.writereg_locked(dev, 2,
15471 					    I217_LPI_GPIO_CTRL, phy_reg);
15472 				}
15473 			}
15474 		}
15475 
15476 		/*
15477 		 * For i217 Intel Rapid Start Technology support,
15478 		 * when the system is going into Sx and no manageability engine
15479 		 * is present, the driver must configure proxy to reset only on
15480 		 * power good.	LPI (Low Power Idle) state must also reset only
15481 		 * on power good, as well as the MTA (Multicast table array).
15482 		 * The SMBus release must also be disabled on LCD reset.
15483 		 */
15484 
15485 		/*
15486 		 * Enable MTA to reset for Intel Rapid Start Technology
15487 		 * Support
15488 		 */
15489 
15490 release:
15491 		sc->phy.release(sc);
15492 	}
15493 out:
15494 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
15495 
15496 	if (sc->sc_type == WM_T_ICH8)
15497 		wm_gig_downshift_workaround_ich8lan(sc);
15498 
15499 	if (sc->sc_type >= WM_T_PCH) {
15500 		wm_oem_bits_config_ich8lan(sc, false);
15501 
15502 		/* Reset PHY to activate OEM bits on 82577/8 */
15503 		if (sc->sc_type == WM_T_PCH)
15504 			wm_reset_phy(sc);
15505 
15506 		if (sc->phy.acquire(sc) != 0)
15507 			return;
15508 		wm_write_smbus_addr(sc);
15509 		sc->phy.release(sc);
15510 	}
15511 }
15512 
15513 /*
15514  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
15515  *  @sc: pointer to the HW structure
15516  *
15517  *  During Sx to S0 transitions on non-managed devices or managed devices
15518  *  on which PHY resets are not blocked, if the PHY registers cannot be
15519  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
15520  *  the PHY.
15521  *  On i217, setup Intel Rapid Start Technology.
15522  */
15523 static int
15524 wm_resume_workarounds_pchlan(struct wm_softc *sc)
15525 {
15526 	device_t dev = sc->sc_dev;
15527 	int rv;
15528 
15529 	if (sc->sc_type < WM_T_PCH2)
15530 		return 0;
15531 
15532 	rv = wm_init_phy_workarounds_pchlan(sc);
15533 	if (rv != 0)
15534 		return -1;
15535 
15536 	/* For i217 Intel Rapid Start Technology support when the system
15537 	 * is transitioning from Sx and no manageability engine is present
15538 	 * configure SMBus to restore on reset, disable proxy, and enable
15539 	 * the reset on MTA (Multicast table array).
15540 	 */
15541 	if (sc->sc_phytype == WMPHY_I217) {
15542 		uint16_t phy_reg;
15543 
15544 		if (sc->phy.acquire(sc) != 0)
15545 			return -1;
15546 
15547 		/* Clear Auto Enable LPI after link up */
15548 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
15549 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
15550 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
15551 
15552 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
15553 			/* Restore clear on SMB if no manageability engine
15554 			 * is present
15555 			 */
15556 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
15557 			    &phy_reg);
15558 			if (rv != 0)
15559 				goto release;
15560 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
15561 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
15562 
15563 			/* Disable Proxy */
15564 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
15565 		}
15566 		/* Enable reset on MTA */
15567 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
15568 		if (rv != 0)
15569 			goto release;
15570 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
15571 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
15572 
15573 release:
15574 		sc->phy.release(sc);
15575 		return rv;
15576 	}
15577 
15578 	return 0;
15579 }
15580 
15581 static void
15582 wm_enable_wakeup(struct wm_softc *sc)
15583 {
15584 	uint32_t reg, pmreg;
15585 	pcireg_t pmode;
15586 	int rv = 0;
15587 
15588 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15589 		device_xname(sc->sc_dev), __func__));
15590 
15591 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
15592 	    &pmreg, NULL) == 0)
15593 		return;
15594 
15595 	if ((sc->sc_flags & WM_F_WOL) == 0)
15596 		goto pme;
15597 
15598 	/* Advertise the wakeup capability */
15599 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
15600 	    | CTRL_SWDPIN(3));
15601 
15602 	/* Keep the laser running on fiber adapters */
15603 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
15604 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
15605 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
15606 		reg |= CTRL_EXT_SWDPIN(3);
15607 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
15608 	}
15609 
15610 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
15611 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
15612 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
15613 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
15614 		wm_suspend_workarounds_ich8lan(sc);
15615 
15616 #if 0	/* For the multicast packet */
15617 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
15618 	reg |= WUFC_MC;
15619 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
15620 #endif
15621 
15622 	if (sc->sc_type >= WM_T_PCH) {
15623 		rv = wm_enable_phy_wakeup(sc);
15624 		if (rv != 0)
15625 			goto pme;
15626 	} else {
15627 		/* Enable wakeup by the MAC */
15628 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
15629 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
15630 	}
15631 
15632 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
15633 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
15634 		|| (sc->sc_type == WM_T_PCH2))
15635 	    && (sc->sc_phytype == WMPHY_IGP_3))
15636 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
15637 
15638 pme:
15639 	/* Request PME */
15640 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
15641 	pmode |= PCI_PMCSR_PME_STS; /* in case it's already set (W1C) */
15642 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
15643 		/* For WOL */
15644 		pmode |= PCI_PMCSR_PME_EN;
15645 	} else {
15646 		/* Disable WOL */
15647 		pmode &= ~PCI_PMCSR_PME_EN;
15648 	}
15649 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
15650 }
15651 
15652 /* Disable ASPM L0s and/or L1 for workaround */
15653 static void
15654 wm_disable_aspm(struct wm_softc *sc)
15655 {
15656 	pcireg_t reg, mask = 0;
15657 	unsigned const char *str = "";
15658 
15659 	/*
15660 	 *  Only for PCIe device which has PCIe capability in the PCI config
15661 	 * space.
15662 	 */
15663 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
15664 		return;
15665 
15666 	switch (sc->sc_type) {
15667 	case WM_T_82571:
15668 	case WM_T_82572:
15669 		/*
15670 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
15671 		 * State Power management L1 State (ASPM L1).
15672 		 */
15673 		mask = PCIE_LCSR_ASPM_L1;
15674 		str = "L1 is";
15675 		break;
15676 	case WM_T_82573:
15677 	case WM_T_82574:
15678 	case WM_T_82583:
15679 		/*
15680 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
15681 		 *
15682 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
15683 		 * some chipset.  The document of 82574 and 82583 says that
15684 		 * disabling L0s with some specific chipset is sufficient,
15685 		 * but we follow as of the Intel em driver does.
15686 		 *
15687 		 * References:
15688 		 * Errata 8 of the Specification Update of i82573.
15689 		 * Errata 20 of the Specification Update of i82574.
15690 		 * Errata 9 of the Specification Update of i82583.
15691 		 */
15692 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
15693 		str = "L0s and L1 are";
15694 		break;
15695 	default:
15696 		return;
15697 	}
15698 
15699 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
15700 	    sc->sc_pcixe_capoff + PCIE_LCSR);
15701 	reg &= ~mask;
15702 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
15703 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
15704 
15705 	/* Print only in wm_attach() */
15706 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
15707 		aprint_verbose_dev(sc->sc_dev,
15708 		    "ASPM %s disabled to workaround the errata.\n", str);
15709 }
15710 
15711 /* LPLU */
15712 
15713 static void
15714 wm_lplu_d0_disable(struct wm_softc *sc)
15715 {
15716 	struct mii_data *mii = &sc->sc_mii;
15717 	uint32_t reg;
15718 	uint16_t phyval;
15719 
15720 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15721 		device_xname(sc->sc_dev), __func__));
15722 
15723 	if (sc->sc_phytype == WMPHY_IFE)
15724 		return;
15725 
15726 	switch (sc->sc_type) {
15727 	case WM_T_82571:
15728 	case WM_T_82572:
15729 	case WM_T_82573:
15730 	case WM_T_82575:
15731 	case WM_T_82576:
15732 		mii->mii_readreg(sc->sc_dev, 1, IGPHY_POWER_MGMT, &phyval);
15733 		phyval &= ~PMR_D0_LPLU;
15734 		mii->mii_writereg(sc->sc_dev, 1, IGPHY_POWER_MGMT, phyval);
15735 		break;
15736 	case WM_T_82580:
15737 	case WM_T_I350:
15738 	case WM_T_I210:
15739 	case WM_T_I211:
15740 		reg = CSR_READ(sc, WMREG_PHPM);
15741 		reg &= ~PHPM_D0A_LPLU;
15742 		CSR_WRITE(sc, WMREG_PHPM, reg);
15743 		break;
15744 	case WM_T_82574:
15745 	case WM_T_82583:
15746 	case WM_T_ICH8:
15747 	case WM_T_ICH9:
15748 	case WM_T_ICH10:
15749 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
15750 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
15751 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
15752 		CSR_WRITE_FLUSH(sc);
15753 		break;
15754 	case WM_T_PCH:
15755 	case WM_T_PCH2:
15756 	case WM_T_PCH_LPT:
15757 	case WM_T_PCH_SPT:
15758 	case WM_T_PCH_CNP:
15759 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
15760 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
15761 		if (wm_phy_resetisblocked(sc) == false)
15762 			phyval |= HV_OEM_BITS_ANEGNOW;
15763 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
15764 		break;
15765 	default:
15766 		break;
15767 	}
15768 }
15769 
15770 /* EEE */
15771 
15772 static int
15773 wm_set_eee_i350(struct wm_softc *sc)
15774 {
15775 	struct ethercom *ec = &sc->sc_ethercom;
15776 	uint32_t ipcnfg, eeer;
15777 	uint32_t ipcnfg_mask
15778 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
15779 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
15780 
15781 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
15782 
15783 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
15784 	eeer = CSR_READ(sc, WMREG_EEER);
15785 
15786 	/* Enable or disable per user setting */
15787 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
15788 		ipcnfg |= ipcnfg_mask;
15789 		eeer |= eeer_mask;
15790 	} else {
15791 		ipcnfg &= ~ipcnfg_mask;
15792 		eeer &= ~eeer_mask;
15793 	}
15794 
15795 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
15796 	CSR_WRITE(sc, WMREG_EEER, eeer);
15797 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
15798 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
15799 
15800 	return 0;
15801 }
15802 
15803 static int
15804 wm_set_eee_pchlan(struct wm_softc *sc)
15805 {
15806 	device_t dev = sc->sc_dev;
15807 	struct ethercom *ec = &sc->sc_ethercom;
15808 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
15809 	int rv = 0;
15810 
15811 	switch (sc->sc_phytype) {
15812 	case WMPHY_82579:
15813 		lpa = I82579_EEE_LP_ABILITY;
15814 		pcs_status = I82579_EEE_PCS_STATUS;
15815 		adv_addr = I82579_EEE_ADVERTISEMENT;
15816 		break;
15817 	case WMPHY_I217:
15818 		lpa = I217_EEE_LP_ABILITY;
15819 		pcs_status = I217_EEE_PCS_STATUS;
15820 		adv_addr = I217_EEE_ADVERTISEMENT;
15821 		break;
15822 	default:
15823 		return 0;
15824 	}
15825 
15826 	if (sc->phy.acquire(sc)) {
15827 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
15828 		return 0;
15829 	}
15830 
15831 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
15832 	if (rv != 0)
15833 		goto release;
15834 
15835 	/* Clear bits that enable EEE in various speeds */
15836 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
15837 
15838 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
15839 		/* Save off link partner's EEE ability */
15840 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
15841 		if (rv != 0)
15842 			goto release;
15843 
15844 		/* Read EEE advertisement */
15845 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
15846 			goto release;
15847 
15848 		/*
15849 		 * Enable EEE only for speeds in which the link partner is
15850 		 * EEE capable and for which we advertise EEE.
15851 		 */
15852 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
15853 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
15854 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
15855 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
15856 			if ((data & ANLPAR_TX_FD) != 0)
15857 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
15858 			else {
15859 				/*
15860 				 * EEE is not supported in 100Half, so ignore
15861 				 * partner's EEE in 100 ability if full-duplex
15862 				 * is not advertised.
15863 				 */
15864 				sc->eee_lp_ability
15865 				    &= ~AN_EEEADVERT_100_TX;
15866 			}
15867 		}
15868 	}
15869 
15870 	if (sc->sc_phytype == WMPHY_82579) {
15871 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
15872 		if (rv != 0)
15873 			goto release;
15874 
15875 		data &= ~I82579_LPI_PLL_SHUT_100;
15876 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
15877 	}
15878 
15879 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
15880 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
15881 		goto release;
15882 
15883 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
15884 release:
15885 	sc->phy.release(sc);
15886 
15887 	return rv;
15888 }
15889 
15890 static int
15891 wm_set_eee(struct wm_softc *sc)
15892 {
15893 	struct ethercom *ec = &sc->sc_ethercom;
15894 
15895 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
15896 		return 0;
15897 
15898 	if (sc->sc_type == WM_T_I354) {
15899 		/* I354 uses an external PHY */
15900 		return 0; /* not yet */
15901 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
15902 		return wm_set_eee_i350(sc);
15903 	else if (sc->sc_type >= WM_T_PCH2)
15904 		return wm_set_eee_pchlan(sc);
15905 
15906 	return 0;
15907 }
15908 
15909 /*
15910  * Workarounds (mainly PHY related).
15911  * Basically, PHY's workarounds are in the PHY drivers.
15912  */
15913 
15914 /* Work-around for 82566 Kumeran PCS lock loss */
15915 static int
15916 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
15917 {
15918 	struct mii_data *mii = &sc->sc_mii;
15919 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
15920 	int i, reg, rv;
15921 	uint16_t phyreg;
15922 
15923 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
15924 		device_xname(sc->sc_dev), __func__));
15925 
15926 	/* If the link is not up, do nothing */
15927 	if ((status & STATUS_LU) == 0)
15928 		return 0;
15929 
15930 	/* Nothing to do if the link is other than 1Gbps */
15931 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
15932 		return 0;
15933 
15934 	for (i = 0; i < 10; i++) {
15935 		/* read twice */
15936 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
15937 		if (rv != 0)
15938 			return rv;
15939 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
15940 		if (rv != 0)
15941 			return rv;
15942 
15943 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
15944 			goto out;	/* GOOD! */
15945 
15946 		/* Reset the PHY */
15947 		wm_reset_phy(sc);
15948 		delay(5*1000);
15949 	}
15950 
15951 	/* Disable GigE link negotiation */
15952 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
15953 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
15954 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
15955 
15956 	/*
15957 	 * Call gig speed drop workaround on Gig disable before accessing
15958 	 * any PHY registers.
15959 	 */
15960 	wm_gig_downshift_workaround_ich8lan(sc);
15961 
15962 out:
15963 	return 0;
15964 }
15965 
15966 /*
15967  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
15968  *  @sc: pointer to the HW structure
15969  *
15970  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
15971  *  LPLU, Gig disable, MDIC PHY reset):
15972  *    1) Set Kumeran Near-end loopback
15973  *    2) Clear Kumeran Near-end loopback
15974  *  Should only be called for ICH8[m] devices with any 1G Phy.
15975  */
15976 static void
15977 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
15978 {
15979 	uint16_t kmreg;
15980 
15981 	/* Only for igp3 */
15982 	if (sc->sc_phytype == WMPHY_IGP_3) {
15983 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
15984 			return;
15985 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
15986 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
15987 			return;
15988 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
15989 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
15990 	}
15991 }
15992 
15993 /*
15994  * Workaround for pch's PHYs
15995  * XXX should be moved to new PHY driver?
15996  */
15997 static int
15998 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
15999 {
16000 	device_t dev = sc->sc_dev;
16001 	struct mii_data *mii = &sc->sc_mii;
16002 	struct mii_softc *child;
16003 	uint16_t phy_data, phyrev = 0;
16004 	int phytype = sc->sc_phytype;
16005 	int rv;
16006 
16007 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16008 		device_xname(dev), __func__));
16009 	KASSERT(sc->sc_type == WM_T_PCH);
16010 
16011 	/* Set MDIO slow mode before any other MDIO access */
16012 	if (phytype == WMPHY_82577)
16013 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
16014 			return rv;
16015 
16016 	child = LIST_FIRST(&mii->mii_phys);
16017 	if (child != NULL)
16018 		phyrev = child->mii_mpd_rev;
16019 
16020 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
16021 	if ((child != NULL) &&
16022 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
16023 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
16024 		/* Disable generation of early preamble (0x4431) */
16025 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
16026 		    &phy_data);
16027 		if (rv != 0)
16028 			return rv;
16029 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
16030 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
16031 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
16032 		    phy_data);
16033 		if (rv != 0)
16034 			return rv;
16035 
16036 		/* Preamble tuning for SSC */
16037 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
16038 		if (rv != 0)
16039 			return rv;
16040 	}
16041 
16042 	/* 82578 */
16043 	if (phytype == WMPHY_82578) {
16044 		/*
16045 		 * Return registers to default by doing a soft reset then
16046 		 * writing 0x3140 to the control register
16047 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
16048 		 */
16049 		if ((child != NULL) && (phyrev < 2)) {
16050 			PHY_RESET(child);
16051 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
16052 			if (rv != 0)
16053 				return rv;
16054 		}
16055 	}
16056 
16057 	/* Select page 0 */
16058 	if ((rv = sc->phy.acquire(sc)) != 0)
16059 		return rv;
16060 	rv = wm_gmii_mdic_writereg(dev, 1, IGPHY_PAGE_SELECT, 0);
16061 	sc->phy.release(sc);
16062 	if (rv != 0)
16063 		return rv;
16064 
16065 	/*
16066 	 * Configure the K1 Si workaround during phy reset assuming there is
16067 	 * link so that it disables K1 if link is in 1Gbps.
16068 	 */
16069 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
16070 		return rv;
16071 
16072 	/* Workaround for link disconnects on a busy hub in half duplex */
16073 	rv = sc->phy.acquire(sc);
16074 	if (rv)
16075 		return rv;
16076 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
16077 	if (rv)
16078 		goto release;
16079 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
16080 	    phy_data & 0x00ff);
16081 	if (rv)
16082 		goto release;
16083 
16084 	/* Set MSE higher to enable link to stay up when noise is high */
16085 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
16086 release:
16087 	sc->phy.release(sc);
16088 
16089 	return rv;
16090 }
16091 
16092 /*
16093  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
16094  *  @sc:   pointer to the HW structure
16095  */
16096 static void
16097 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
16098 {
16099 
16100 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16101 		device_xname(sc->sc_dev), __func__));
16102 
16103 	if (sc->phy.acquire(sc) != 0)
16104 		return;
16105 
16106 	wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
16107 
16108 	sc->phy.release(sc);
16109 }
16110 
16111 static void
16112 wm_copy_rx_addrs_to_phy_ich8lan_locked(struct wm_softc *sc)
16113 {
16114 	device_t dev = sc->sc_dev;
16115 	uint32_t mac_reg;
16116 	uint16_t i, wuce;
16117 	int count;
16118 
16119 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16120 		device_xname(dev), __func__));
16121 
16122 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
16123 		return;
16124 
16125 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
16126 	count = wm_rar_count(sc);
16127 	for (i = 0; i < count; i++) {
16128 		uint16_t lo, hi;
16129 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
16130 		lo = (uint16_t)(mac_reg & 0xffff);
16131 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
16132 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
16133 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
16134 
16135 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
16136 		lo = (uint16_t)(mac_reg & 0xffff);
16137 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
16138 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
16139 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
16140 	}
16141 
16142 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
16143 }
16144 
16145 /*
16146  *  wm_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
16147  *  with 82579 PHY
16148  *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
16149  */
16150 static int
16151 wm_lv_jumbo_workaround_ich8lan(struct wm_softc *sc, bool enable)
16152 {
16153 	device_t dev = sc->sc_dev;
16154 	int rar_count;
16155 	int rv;
16156 	uint32_t mac_reg;
16157 	uint16_t dft_ctrl, data;
16158 	uint16_t i;
16159 
16160 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16161 		device_xname(dev), __func__));
16162 
16163 	if (sc->sc_type < WM_T_PCH2)
16164 		return 0;
16165 
16166 	/* Acquire PHY semaphore */
16167 	rv = sc->phy.acquire(sc);
16168 	if (rv != 0)
16169 		return rv;
16170 
16171 	/* Disable Rx path while enabling/disabling workaround */
16172 	rv = sc->phy.readreg_locked(dev, 2, I82579_DFT_CTRL, &dft_ctrl);
16173 	if (rv != 0)
16174 		goto out;
16175 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
16176 	    dft_ctrl | (1 << 14));
16177 	if (rv != 0)
16178 		goto out;
16179 
16180 	if (enable) {
16181 		/* Write Rx addresses (rar_entry_count for RAL/H, and
16182 		 * SHRAL/H) and initial CRC values to the MAC
16183 		 */
16184 		rar_count = wm_rar_count(sc);
16185 		for (i = 0; i < rar_count; i++) {
16186 			uint8_t mac_addr[ETHER_ADDR_LEN] = {0};
16187 			uint32_t addr_high, addr_low;
16188 
16189 			addr_high = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
16190 			if (!(addr_high & RAL_AV))
16191 				continue;
16192 			addr_low = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
16193 			mac_addr[0] = (addr_low & 0xFF);
16194 			mac_addr[1] = ((addr_low >> 8) & 0xFF);
16195 			mac_addr[2] = ((addr_low >> 16) & 0xFF);
16196 			mac_addr[3] = ((addr_low >> 24) & 0xFF);
16197 			mac_addr[4] = (addr_high & 0xFF);
16198 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
16199 
16200 			CSR_WRITE(sc, WMREG_PCH_RAICC(i),
16201 			    ~ether_crc32_le(mac_addr, ETHER_ADDR_LEN));
16202 		}
16203 
16204 		/* Write Rx addresses to the PHY */
16205 		wm_copy_rx_addrs_to_phy_ich8lan_locked(sc);
16206 	}
16207 
16208 	/*
16209 	 * If enable ==
16210 	 *	true: Enable jumbo frame workaround in the MAC.
16211 	 *	false: Write MAC register values back to h/w defaults.
16212 	 */
16213 	mac_reg = CSR_READ(sc, WMREG_FFLT_DBG);
16214 	if (enable) {
16215 		mac_reg &= ~(1 << 14);
16216 		mac_reg |= (7 << 15);
16217 	} else
16218 		mac_reg &= ~(0xf << 14);
16219 	CSR_WRITE(sc, WMREG_FFLT_DBG, mac_reg);
16220 
16221 	mac_reg = CSR_READ(sc, WMREG_RCTL);
16222 	if (enable) {
16223 		mac_reg |= RCTL_SECRC;
16224 		sc->sc_rctl |= RCTL_SECRC;
16225 		sc->sc_flags |= WM_F_CRC_STRIP;
16226 	} else {
16227 		mac_reg &= ~RCTL_SECRC;
16228 		sc->sc_rctl &= ~RCTL_SECRC;
16229 		sc->sc_flags &= ~WM_F_CRC_STRIP;
16230 	}
16231 	CSR_WRITE(sc, WMREG_RCTL, mac_reg);
16232 
16233 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, &data);
16234 	if (rv != 0)
16235 		goto out;
16236 	if (enable)
16237 		data |= 1 << 0;
16238 	else
16239 		data &= ~(1 << 0);
16240 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_CTRL, data);
16241 	if (rv != 0)
16242 		goto out;
16243 
16244 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, &data);
16245 	if (rv != 0)
16246 		goto out;
16247 	/*
16248 	 * XXX FreeBSD and Linux do the same thing that they set the same value
16249 	 * on both the enable case and the disable case. Is it correct?
16250 	 */
16251 	data &= ~(0xf << 8);
16252 	data |= (0xb << 8);
16253 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_HD_CTRL, data);
16254 	if (rv != 0)
16255 		goto out;
16256 
16257 	/*
16258 	 * If enable ==
16259 	 *	true: Enable jumbo frame workaround in the PHY.
16260 	 *	false: Write PHY register values back to h/w defaults.
16261 	 */
16262 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 23), &data);
16263 	if (rv != 0)
16264 		goto out;
16265 	data &= ~(0x7F << 5);
16266 	if (enable)
16267 		data |= (0x37 << 5);
16268 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 23), data);
16269 	if (rv != 0)
16270 		goto out;
16271 
16272 	rv = sc->phy.readreg_locked(dev, 2, BME1000_REG(769, 16), &data);
16273 	if (rv != 0)
16274 		goto out;
16275 	if (enable)
16276 		data &= ~(1 << 13);
16277 	else
16278 		data |= (1 << 13);
16279 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(769, 16), data);
16280 	if (rv != 0)
16281 		goto out;
16282 
16283 	rv = sc->phy.readreg_locked(dev, 2, I82579_UNKNOWN1, &data);
16284 	if (rv != 0)
16285 		goto out;
16286 	data &= ~(0x3FF << 2);
16287 	if (enable)
16288 		data |= (I82579_TX_PTR_GAP << 2);
16289 	else
16290 		data |= (0x8 << 2);
16291 	rv = sc->phy.writereg_locked(dev, 2, I82579_UNKNOWN1, data);
16292 	if (rv != 0)
16293 		goto out;
16294 
16295 	rv = sc->phy.writereg_locked(dev, 2, BME1000_REG(776, 23),
16296 	    enable ? 0xf100 : 0x7e00);
16297 	if (rv != 0)
16298 		goto out;
16299 
16300 	rv = sc->phy.readreg_locked(dev, 2, HV_PM_CTRL, &data);
16301 	if (rv != 0)
16302 		goto out;
16303 	if (enable)
16304 		data |= 1 << 10;
16305 	else
16306 		data &= ~(1 << 10);
16307 	rv = sc->phy.writereg_locked(dev, 2, HV_PM_CTRL, data);
16308 	if (rv != 0)
16309 		goto out;
16310 
16311 	/* Re-enable Rx path after enabling/disabling workaround */
16312 	rv = sc->phy.writereg_locked(dev, 2, I82579_DFT_CTRL,
16313 	    dft_ctrl & ~(1 << 14));
16314 
16315 out:
16316 	sc->phy.release(sc);
16317 
16318 	return rv;
16319 }
16320 
16321 /*
16322  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
16323  *  done after every PHY reset.
16324  */
16325 static int
16326 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
16327 {
16328 	device_t dev = sc->sc_dev;
16329 	int rv;
16330 
16331 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16332 		device_xname(dev), __func__));
16333 	KASSERT(sc->sc_type == WM_T_PCH2);
16334 
16335 	/* Set MDIO slow mode before any other MDIO access */
16336 	rv = wm_set_mdio_slow_mode_hv(sc);
16337 	if (rv != 0)
16338 		return rv;
16339 
16340 	rv = sc->phy.acquire(sc);
16341 	if (rv != 0)
16342 		return rv;
16343 	/* Set MSE higher to enable link to stay up when noise is high */
16344 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
16345 	if (rv != 0)
16346 		goto release;
16347 	/* Drop link after 5 times MSE threshold was reached */
16348 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
16349 release:
16350 	sc->phy.release(sc);
16351 
16352 	return rv;
16353 }
16354 
16355 /**
16356  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
16357  *  @link: link up bool flag
16358  *
16359  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
16360  *  preventing further DMA write requests.  Workaround the issue by disabling
16361  *  the de-assertion of the clock request when in 1Gpbs mode.
16362  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
16363  *  speeds in order to avoid Tx hangs.
16364  **/
16365 static int
16366 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
16367 {
16368 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
16369 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
16370 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
16371 	uint16_t phyreg;
16372 
16373 	if (link && (speed == STATUS_SPEED_1000)) {
16374 		sc->phy.acquire(sc);
16375 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
16376 		    &phyreg);
16377 		if (rv != 0)
16378 			goto release;
16379 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
16380 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
16381 		if (rv != 0)
16382 			goto release;
16383 		delay(20);
16384 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
16385 
16386 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
16387 		    &phyreg);
16388 release:
16389 		sc->phy.release(sc);
16390 		return rv;
16391 	}
16392 
16393 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
16394 
16395 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
16396 	if (((child != NULL) && (child->mii_mpd_rev > 5))
16397 	    || !link
16398 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
16399 		goto update_fextnvm6;
16400 
16401 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
16402 
16403 	/* Clear link status transmit timeout */
16404 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
16405 	if (speed == STATUS_SPEED_100) {
16406 		/* Set inband Tx timeout to 5x10us for 100Half */
16407 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
16408 
16409 		/* Do not extend the K1 entry latency for 100Half */
16410 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
16411 	} else {
16412 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
16413 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
16414 
16415 		/* Extend the K1 entry latency for 10 Mbps */
16416 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
16417 	}
16418 
16419 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
16420 
16421 update_fextnvm6:
16422 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
16423 	return 0;
16424 }
16425 
16426 /*
16427  *  wm_k1_gig_workaround_hv - K1 Si workaround
16428  *  @sc:   pointer to the HW structure
16429  *  @link: link up bool flag
16430  *
16431  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
16432  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
16433  *  If link is down, the function will restore the default K1 setting located
16434  *  in the NVM.
16435  */
16436 static int
16437 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
16438 {
16439 	int k1_enable = sc->sc_nvm_k1_enabled;
16440 
16441 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16442 		device_xname(sc->sc_dev), __func__));
16443 
16444 	if (sc->phy.acquire(sc) != 0)
16445 		return -1;
16446 
16447 	if (link) {
16448 		k1_enable = 0;
16449 
16450 		/* Link stall fix for link up */
16451 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
16452 		    0x0100);
16453 	} else {
16454 		/* Link stall fix for link down */
16455 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
16456 		    0x4100);
16457 	}
16458 
16459 	wm_configure_k1_ich8lan(sc, k1_enable);
16460 	sc->phy.release(sc);
16461 
16462 	return 0;
16463 }
16464 
16465 /*
16466  *  wm_k1_workaround_lv - K1 Si workaround
16467  *  @sc:   pointer to the HW structure
16468  *
16469  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
16470  *  Disable K1 for 1000 and 100 speeds
16471  */
16472 static int
16473 wm_k1_workaround_lv(struct wm_softc *sc)
16474 {
16475 	uint32_t reg;
16476 	uint16_t phyreg;
16477 	int rv;
16478 
16479 	if (sc->sc_type != WM_T_PCH2)
16480 		return 0;
16481 
16482 	/* Set K1 beacon duration based on 10Mbps speed */
16483 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
16484 	if (rv != 0)
16485 		return rv;
16486 
16487 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
16488 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
16489 		if (phyreg &
16490 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
16491 			/* LV 1G/100 Packet drop issue wa  */
16492 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
16493 			    &phyreg);
16494 			if (rv != 0)
16495 				return rv;
16496 			phyreg &= ~HV_PM_CTRL_K1_ENA;
16497 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
16498 			    phyreg);
16499 			if (rv != 0)
16500 				return rv;
16501 		} else {
16502 			/* For 10Mbps */
16503 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
16504 			reg &= ~FEXTNVM4_BEACON_DURATION;
16505 			reg |= FEXTNVM4_BEACON_DURATION_16US;
16506 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
16507 		}
16508 	}
16509 
16510 	return 0;
16511 }
16512 
16513 /*
16514  *  wm_link_stall_workaround_hv - Si workaround
16515  *  @sc: pointer to the HW structure
16516  *
16517  *  This function works around a Si bug where the link partner can get
16518  *  a link up indication before the PHY does. If small packets are sent
16519  *  by the link partner they can be placed in the packet buffer without
16520  *  being properly accounted for by the PHY and will stall preventing
16521  *  further packets from being received.  The workaround is to clear the
16522  *  packet buffer after the PHY detects link up.
16523  */
16524 static int
16525 wm_link_stall_workaround_hv(struct wm_softc *sc)
16526 {
16527 	uint16_t phyreg;
16528 
16529 	if (sc->sc_phytype != WMPHY_82578)
16530 		return 0;
16531 
16532 	/* Do not apply workaround if in PHY loopback bit 14 set */
16533 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
16534 	if ((phyreg & BMCR_LOOP) != 0)
16535 		return 0;
16536 
16537 	/* Check if link is up and at 1Gbps */
16538 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
16539 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
16540 	    | BM_CS_STATUS_SPEED_MASK;
16541 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
16542 		| BM_CS_STATUS_SPEED_1000))
16543 		return 0;
16544 
16545 	delay(200 * 1000);	/* XXX too big */
16546 
16547 	/* Flush the packets in the fifo buffer */
16548 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
16549 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
16550 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
16551 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
16552 
16553 	return 0;
16554 }
16555 
16556 static int
16557 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
16558 {
16559 	int rv;
16560 	uint16_t reg;
16561 
16562 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
16563 	if (rv != 0)
16564 		return rv;
16565 
16566 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
16567 	    reg | HV_KMRN_MDIO_SLOW);
16568 }
16569 
16570 /*
16571  *  wm_configure_k1_ich8lan - Configure K1 power state
16572  *  @sc: pointer to the HW structure
16573  *  @enable: K1 state to configure
16574  *
16575  *  Configure the K1 power state based on the provided parameter.
16576  *  Assumes semaphore already acquired.
16577  */
16578 static void
16579 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
16580 {
16581 	uint32_t ctrl, ctrl_ext, tmp;
16582 	uint16_t kmreg;
16583 	int rv;
16584 
16585 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
16586 
16587 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
16588 	if (rv != 0)
16589 		return;
16590 
16591 	if (k1_enable)
16592 		kmreg |= KUMCTRLSTA_K1_ENABLE;
16593 	else
16594 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
16595 
16596 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
16597 	if (rv != 0)
16598 		return;
16599 
16600 	delay(20);
16601 
16602 	ctrl = CSR_READ(sc, WMREG_CTRL);
16603 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
16604 
16605 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
16606 	tmp |= CTRL_FRCSPD;
16607 
16608 	CSR_WRITE(sc, WMREG_CTRL, tmp);
16609 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
16610 	CSR_WRITE_FLUSH(sc);
16611 	delay(20);
16612 
16613 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
16614 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
16615 	CSR_WRITE_FLUSH(sc);
16616 	delay(20);
16617 
16618 	return;
16619 }
16620 
16621 /* special case - for 82575 - need to do manual init ... */
16622 static void
16623 wm_reset_init_script_82575(struct wm_softc *sc)
16624 {
16625 	/*
16626 	 * Remark: this is untested code - we have no board without EEPROM
16627 	 *  same setup as mentioned int the FreeBSD driver for the i82575
16628 	 */
16629 
16630 	/* SerDes configuration via SERDESCTRL */
16631 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
16632 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
16633 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
16634 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
16635 
16636 	/* CCM configuration via CCMCTL register */
16637 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
16638 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
16639 
16640 	/* PCIe lanes configuration */
16641 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
16642 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
16643 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
16644 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
16645 
16646 	/* PCIe PLL Configuration */
16647 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
16648 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
16649 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
16650 }
16651 
16652 static void
16653 wm_reset_mdicnfg_82580(struct wm_softc *sc)
16654 {
16655 	uint32_t reg;
16656 	uint16_t nvmword;
16657 	int rv;
16658 
16659 	if (sc->sc_type != WM_T_82580)
16660 		return;
16661 	if ((sc->sc_flags & WM_F_SGMII) == 0)
16662 		return;
16663 
16664 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
16665 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
16666 	if (rv != 0) {
16667 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
16668 		    __func__);
16669 		return;
16670 	}
16671 
16672 	reg = CSR_READ(sc, WMREG_MDICNFG);
16673 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
16674 		reg |= MDICNFG_DEST;
16675 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
16676 		reg |= MDICNFG_COM_MDIO;
16677 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
16678 }
16679 
16680 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
16681 
16682 static bool
16683 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
16684 {
16685 	uint32_t reg;
16686 	uint16_t id1, id2;
16687 	int i, rv;
16688 
16689 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16690 		device_xname(sc->sc_dev), __func__));
16691 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
16692 
16693 	id1 = id2 = 0xffff;
16694 	for (i = 0; i < 2; i++) {
16695 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
16696 		    &id1);
16697 		if ((rv != 0) || MII_INVALIDID(id1))
16698 			continue;
16699 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
16700 		    &id2);
16701 		if ((rv != 0) || MII_INVALIDID(id2))
16702 			continue;
16703 		break;
16704 	}
16705 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
16706 		goto out;
16707 
16708 	/*
16709 	 * In case the PHY needs to be in mdio slow mode,
16710 	 * set slow mode and try to get the PHY id again.
16711 	 */
16712 	rv = 0;
16713 	if (sc->sc_type < WM_T_PCH_LPT) {
16714 		sc->phy.release(sc);
16715 		wm_set_mdio_slow_mode_hv(sc);
16716 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
16717 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
16718 		sc->phy.acquire(sc);
16719 	}
16720 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
16721 		device_printf(sc->sc_dev, "XXX return with false\n");
16722 		return false;
16723 	}
16724 out:
16725 	if (sc->sc_type >= WM_T_PCH_LPT) {
16726 		/* Only unforce SMBus if ME is not active */
16727 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
16728 			uint16_t phyreg;
16729 
16730 			/* Unforce SMBus mode in PHY */
16731 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
16732 			    CV_SMB_CTRL, &phyreg);
16733 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
16734 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
16735 			    CV_SMB_CTRL, phyreg);
16736 
16737 			/* Unforce SMBus mode in MAC */
16738 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
16739 			reg &= ~CTRL_EXT_FORCE_SMBUS;
16740 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16741 		}
16742 	}
16743 	return true;
16744 }
16745 
16746 static void
16747 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
16748 {
16749 	uint32_t reg;
16750 	int i;
16751 
16752 	/* Set PHY Config Counter to 50msec */
16753 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
16754 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
16755 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
16756 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
16757 
16758 	/* Toggle LANPHYPC */
16759 	reg = CSR_READ(sc, WMREG_CTRL);
16760 	reg |= CTRL_LANPHYPC_OVERRIDE;
16761 	reg &= ~CTRL_LANPHYPC_VALUE;
16762 	CSR_WRITE(sc, WMREG_CTRL, reg);
16763 	CSR_WRITE_FLUSH(sc);
16764 	delay(1000);
16765 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
16766 	CSR_WRITE(sc, WMREG_CTRL, reg);
16767 	CSR_WRITE_FLUSH(sc);
16768 
16769 	if (sc->sc_type < WM_T_PCH_LPT)
16770 		delay(50 * 1000);
16771 	else {
16772 		i = 20;
16773 
16774 		do {
16775 			delay(5 * 1000);
16776 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
16777 		    && i--);
16778 
16779 		delay(30 * 1000);
16780 	}
16781 }
16782 
16783 static int
16784 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
16785 {
16786 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
16787 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
16788 	uint32_t rxa;
16789 	uint16_t scale = 0, lat_enc = 0;
16790 	int32_t obff_hwm = 0;
16791 	int64_t lat_ns, value;
16792 
16793 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16794 		device_xname(sc->sc_dev), __func__));
16795 
16796 	if (link) {
16797 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
16798 		uint32_t status;
16799 		uint16_t speed;
16800 		pcireg_t preg;
16801 
16802 		status = CSR_READ(sc, WMREG_STATUS);
16803 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
16804 		case STATUS_SPEED_10:
16805 			speed = 10;
16806 			break;
16807 		case STATUS_SPEED_100:
16808 			speed = 100;
16809 			break;
16810 		case STATUS_SPEED_1000:
16811 			speed = 1000;
16812 			break;
16813 		default:
16814 			device_printf(sc->sc_dev, "Unknown speed "
16815 			    "(status = %08x)\n", status);
16816 			return -1;
16817 		}
16818 
16819 		/* Rx Packet Buffer Allocation size (KB) */
16820 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
16821 
16822 		/*
16823 		 * Determine the maximum latency tolerated by the device.
16824 		 *
16825 		 * Per the PCIe spec, the tolerated latencies are encoded as
16826 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
16827 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
16828 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
16829 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
16830 		 */
16831 		lat_ns = ((int64_t)rxa * 1024 -
16832 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
16833 			+ ETHER_HDR_LEN))) * 8 * 1000;
16834 		if (lat_ns < 0)
16835 			lat_ns = 0;
16836 		else
16837 			lat_ns /= speed;
16838 		value = lat_ns;
16839 
16840 		while (value > LTRV_VALUE) {
16841 			scale ++;
16842 			value = howmany(value, __BIT(5));
16843 		}
16844 		if (scale > LTRV_SCALE_MAX) {
16845 			device_printf(sc->sc_dev,
16846 			    "Invalid LTR latency scale %d\n", scale);
16847 			return -1;
16848 		}
16849 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
16850 
16851 		/* Determine the maximum latency tolerated by the platform */
16852 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
16853 		    WM_PCI_LTR_CAP_LPT);
16854 		max_snoop = preg & 0xffff;
16855 		max_nosnoop = preg >> 16;
16856 
16857 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
16858 
16859 		if (lat_enc > max_ltr_enc) {
16860 			lat_enc = max_ltr_enc;
16861 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
16862 			    * PCI_LTR_SCALETONS(
16863 				    __SHIFTOUT(lat_enc,
16864 					PCI_LTR_MAXSNOOPLAT_SCALE));
16865 		}
16866 
16867 		if (lat_ns) {
16868 			lat_ns *= speed * 1000;
16869 			lat_ns /= 8;
16870 			lat_ns /= 1000000000;
16871 			obff_hwm = (int32_t)(rxa - lat_ns);
16872 		}
16873 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
16874 			device_printf(sc->sc_dev, "Invalid high water mark %d"
16875 			    "(rxa = %d, lat_ns = %d)\n",
16876 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
16877 			return -1;
16878 		}
16879 	}
16880 	/* Snoop and No-Snoop latencies the same */
16881 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
16882 	CSR_WRITE(sc, WMREG_LTRV, reg);
16883 
16884 	/* Set OBFF high water mark */
16885 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
16886 	reg |= obff_hwm;
16887 	CSR_WRITE(sc, WMREG_SVT, reg);
16888 
16889 	/* Enable OBFF */
16890 	reg = CSR_READ(sc, WMREG_SVCR);
16891 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
16892 	CSR_WRITE(sc, WMREG_SVCR, reg);
16893 
16894 	return 0;
16895 }
16896 
16897 /*
16898  * I210 Errata 25 and I211 Errata 10
16899  * Slow System Clock.
16900  *
16901  * Note that this function is called on both FLASH and iNVM case on NetBSD.
16902  */
16903 static int
16904 wm_pll_workaround_i210(struct wm_softc *sc)
16905 {
16906 	uint32_t mdicnfg, wuc;
16907 	uint32_t reg;
16908 	pcireg_t pcireg;
16909 	uint32_t pmreg;
16910 	uint16_t nvmword, tmp_nvmword;
16911 	uint16_t phyval;
16912 	bool wa_done = false;
16913 	int i, rv = 0;
16914 
16915 	/* Get Power Management cap offset */
16916 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
16917 	    &pmreg, NULL) == 0)
16918 		return -1;
16919 
16920 	/* Save WUC and MDICNFG registers */
16921 	wuc = CSR_READ(sc, WMREG_WUC);
16922 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
16923 
16924 	reg = mdicnfg & ~MDICNFG_DEST;
16925 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
16926 
16927 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0) {
16928 		/*
16929 		 * The default value of the Initialization Control Word 1
16930 		 * is the same on both I210's FLASH_HW and I21[01]'s iNVM.
16931 		 */
16932 		nvmword = INVM_DEFAULT_AL;
16933 	}
16934 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
16935 
16936 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
16937 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
16938 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
16939 
16940 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
16941 			rv = 0;
16942 			break; /* OK */
16943 		} else
16944 			rv = -1;
16945 
16946 		wa_done = true;
16947 		/* Directly reset the internal PHY */
16948 		reg = CSR_READ(sc, WMREG_CTRL);
16949 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
16950 
16951 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
16952 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
16953 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
16954 
16955 		CSR_WRITE(sc, WMREG_WUC, 0);
16956 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
16957 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
16958 
16959 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
16960 		    pmreg + PCI_PMCSR);
16961 		pcireg |= PCI_PMCSR_STATE_D3;
16962 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
16963 		    pmreg + PCI_PMCSR, pcireg);
16964 		delay(1000);
16965 		pcireg &= ~PCI_PMCSR_STATE_D3;
16966 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
16967 		    pmreg + PCI_PMCSR, pcireg);
16968 
16969 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
16970 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
16971 
16972 		/* Restore WUC register */
16973 		CSR_WRITE(sc, WMREG_WUC, wuc);
16974 	}
16975 
16976 	/* Restore MDICNFG setting */
16977 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
16978 	if (wa_done)
16979 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
16980 	return rv;
16981 }
16982 
16983 static void
16984 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
16985 {
16986 	uint32_t reg;
16987 
16988 	DPRINTF(sc, WM_DEBUG_INIT, ("%s: %s called\n",
16989 		device_xname(sc->sc_dev), __func__));
16990 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
16991 	    || (sc->sc_type == WM_T_PCH_CNP));
16992 
16993 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
16994 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
16995 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
16996 
16997 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
16998 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
16999 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
17000 }
17001 
17002 /* Sysctl function */
17003 #ifdef WM_DEBUG
17004 static int
17005 wm_sysctl_debug(SYSCTLFN_ARGS)
17006 {
17007 	struct sysctlnode node = *rnode;
17008 	struct wm_softc *sc = (struct wm_softc *)node.sysctl_data;
17009 	uint32_t dflags;
17010 	int error;
17011 
17012 	dflags = sc->sc_debug;
17013 	node.sysctl_data = &dflags;
17014 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
17015 
17016 	if (error || newp == NULL)
17017 		return error;
17018 
17019 	sc->sc_debug = dflags;
17020 
17021 	return 0;
17022 }
17023 #endif
17024